From 32209a26356a560e289d5be987291557929b8555 Mon Sep 17 00:00:00 2001 From: Diego Tavares Date: Mon, 6 Jan 2025 16:14:12 -0800 Subject: [PATCH 1/3] Reformat all java files MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Formatting was previously being handled by code_style–ij.xml which is not really compatible with many new IDEs. This PR changes formating to GoogleStyle enforced by the spotless gradle plugin in conjunction with jdtls, which is compatible with vscode and eclipse. --- cuebot/build.gradle | 10 + cuebot/code_style_ij.xml | 67 - cuebot/jdtls.xml | 337 ++ .../spring/remoting/CueServerInterceptor.java | 60 +- .../common/spring/remoting/GrpcServer.java | 122 +- .../com/imageworks/spcue/ActionEntity.java | 141 +- .../com/imageworks/spcue/ActionInterface.java | 21 +- .../imageworks/spcue/AllocationEntity.java | 37 +- .../imageworks/spcue/AllocationInterface.java | 21 +- .../imageworks/spcue/BuildableDependency.java | 209 +- .../com/imageworks/spcue/BuildableJob.java | 166 +- .../com/imageworks/spcue/BuildableLayer.java | 64 +- .../com/imageworks/spcue/CommentDetail.java | 27 +- .../imageworks/spcue/CueGrpcException.java | 35 +- .../imageworks/spcue/CuebotApplication.java | 76 +- .../java/com/imageworks/spcue/DeedEntity.java | 31 +- .../imageworks/spcue/DepartmentEntity.java | 25 +- .../imageworks/spcue/DepartmentInterface.java | 21 +- .../com/imageworks/spcue/DependInterface.java | 19 +- .../spcue/DependencyManagerException.java | 35 +- .../com/imageworks/spcue/DispatchFrame.java | 91 +- .../com/imageworks/spcue/DispatchHost.java | 323 +- .../com/imageworks/spcue/DispatchJob.java | 31 +- .../java/com/imageworks/spcue/Entity.java | 93 +- .../imageworks/spcue/EntityCreationError.java | 57 +- .../com/imageworks/spcue/EntityException.java | 61 +- .../com/imageworks/spcue/EntityInterface.java | 24 +- .../spcue/EntityModificationError.java | 57 +- .../spcue/EntityNotFoundException.java | 33 +- .../imageworks/spcue/EntityRemovalError.java | 57 +- .../spcue/EntityRetrievalException.java | 49 +- .../imageworks/spcue/ExecutionSummary.java | 163 +- .../com/imageworks/spcue/FacilityEntity.java | 33 +- .../imageworks/spcue/FacilityInterface.java | 21 +- .../com/imageworks/spcue/FilterEntity.java | 51 +- .../com/imageworks/spcue/FilterInterface.java | 21 +- .../com/imageworks/spcue/FrameDetail.java | 45 +- .../com/imageworks/spcue/FrameEntity.java | 49 +- .../com/imageworks/spcue/FrameInterface.java | 37 +- .../imageworks/spcue/FrameStateTotals.java | 163 +- .../com/imageworks/spcue/GroupDetail.java | 65 +- .../com/imageworks/spcue/GroupEntity.java | 39 +- .../com/imageworks/spcue/GroupInterface.java | 21 +- .../spcue/HistoricalJobTransferException.java | 49 +- .../HostConfigurationErrorException.java | 51 +- .../java/com/imageworks/spcue/HostEntity.java | 121 +- .../com/imageworks/spcue/HostInterface.java | 21 +- .../java/com/imageworks/spcue/Inherit.java | 29 +- .../java/com/imageworks/spcue/JobDetail.java | 91 +- .../spcue/JobDispatchException.java | 47 +- .../java/com/imageworks/spcue/JobEntity.java | 49 +- .../com/imageworks/spcue/JobInterface.java | 21 +- .../imageworks/spcue/JobLaunchException.java | 49 +- .../com/imageworks/spcue/LayerDetail.java | 291 +- .../com/imageworks/spcue/LayerEntity.java | 61 +- .../com/imageworks/spcue/LayerInterface.java | 21 +- .../java/com/imageworks/spcue/LayerStats.java | 293 +- .../spcue/LightweightDependency.java | 53 +- .../com/imageworks/spcue/LimitEntity.java | 42 +- .../com/imageworks/spcue/LimitInterface.java | 20 +- .../imageworks/spcue/LocalHostAssignment.java | 412 +- .../com/imageworks/spcue/MaintenanceTask.java | 72 +- .../com/imageworks/spcue/MatcherEntity.java | 73 +- .../imageworks/spcue/MatcherInterface.java | 21 +- .../com/imageworks/spcue/MinimalHost.java | 59 +- .../com/imageworks/spcue/OwnerEntity.java | 27 +- .../com/imageworks/spcue/PointDetail.java | 53 +- .../com/imageworks/spcue/PointInterface.java | 21 +- .../com/imageworks/spcue/ProcInterface.java | 21 +- .../spcue/PrometheusMetricsCollector.java | 546 ++- .../java/com/imageworks/spcue/Redirect.java | 177 +- .../com/imageworks/spcue/ResourceUsage.java | 69 +- .../com/imageworks/spcue/ServiceEntity.java | 111 +- .../spcue/ServiceOverrideEntity.java | 27 +- .../java/com/imageworks/spcue/ShowEntity.java | 41 +- .../com/imageworks/spcue/ShowInterface.java | 21 +- .../com/imageworks/spcue/SortableShow.java | 173 +- .../java/com/imageworks/spcue/Source.java | 78 +- .../spcue/SpcueRuntimeException.java | 51 +- .../spcue/SpecBuilderException.java | 35 +- .../com/imageworks/spcue/StrandedCores.java | 49 +- .../imageworks/spcue/SubscriptionEntity.java | 55 +- .../spcue/SubscriptionInterface.java | 21 +- .../java/com/imageworks/spcue/TaskEntity.java | 100 +- .../com/imageworks/spcue/TaskInterface.java | 31 +- .../com/imageworks/spcue/ThreadStats.java | 47 +- .../imageworks/spcue/TrackitTaskDetail.java | 39 +- .../com/imageworks/spcue/VirtualProc.java | 485 +-- .../imageworks/spcue/config/AppConfig.java | 108 +- .../spcue/config/DatabaseEngine.java | 8 +- .../config/PostgresDatabaseCondition.java | 16 +- .../com/imageworks/spcue/dao/ActionDao.java | 34 +- .../imageworks/spcue/dao/AllocationDao.java | 175 +- .../com/imageworks/spcue/dao/BookingDao.java | 382 +- .../com/imageworks/spcue/dao/CommentDao.java | 166 +- .../com/imageworks/spcue/dao/DeedDao.java | 97 +- .../imageworks/spcue/dao/DepartmentDao.java | 107 +- .../com/imageworks/spcue/dao/DependDao.java | 366 +- .../imageworks/spcue/dao/DispatcherDao.java | 334 +- .../com/imageworks/spcue/dao/FacilityDao.java | 103 +- .../com/imageworks/spcue/dao/FilterDao.java | 53 +- .../com/imageworks/spcue/dao/FrameDao.java | 742 ++-- .../com/imageworks/spcue/dao/GroupDao.java | 420 +- .../imageworks/spcue/dao/HistoricalDao.java | 49 +- .../com/imageworks/spcue/dao/HostDao.java | 596 ++- .../java/com/imageworks/spcue/dao/JobDao.java | 914 ++-- .../com/imageworks/spcue/dao/LayerDao.java | 862 ++-- .../com/imageworks/spcue/dao/LimitDao.java | 106 +- .../imageworks/spcue/dao/MaintenanceDao.java | 83 +- .../com/imageworks/spcue/dao/MatcherDao.java | 35 +- .../spcue/dao/NestedWhiteboardDao.java | 49 +- .../com/imageworks/spcue/dao/OwnerDao.java | 110 +- .../com/imageworks/spcue/dao/PointDao.java | 214 +- .../com/imageworks/spcue/dao/ProcDao.java | 558 ++- .../com/imageworks/spcue/dao/RedirectDao.java | 95 +- .../com/imageworks/spcue/dao/ServiceDao.java | 39 +- .../com/imageworks/spcue/dao/ShowDao.java | 263 +- .../imageworks/spcue/dao/SubscriptionDao.java | 205 +- .../com/imageworks/spcue/dao/TaskDao.java | 216 +- .../com/imageworks/spcue/dao/TrackitDao.java | 37 +- .../imageworks/spcue/dao/WhiteboardDao.java | 1234 +++--- .../spcue/dao/criteria/CriteriaException.java | 49 +- .../spcue/dao/criteria/CriteriaInterface.java | 39 +- .../spcue/dao/criteria/Direction.java | 22 +- .../dao/criteria/FrameSearchFactory.java | 100 +- .../dao/criteria/FrameSearchInterface.java | 73 +- .../spcue/dao/criteria/HostSearchFactory.java | 46 +- .../dao/criteria/HostSearchInterface.java | 27 +- .../spcue/dao/criteria/JobSearchFactory.java | 66 +- .../dao/criteria/JobSearchInterface.java | 32 +- .../imageworks/spcue/dao/criteria/Phrase.java | 60 +- .../spcue/dao/criteria/ProcSearchFactory.java | 68 +- .../dao/criteria/ProcSearchInterface.java | 56 +- .../imageworks/spcue/dao/criteria/Sort.java | 55 +- .../spcue/dao/criteria/postgres/Criteria.java | 488 +-- .../dao/criteria/postgres/FrameSearch.java | 356 +- .../dao/criteria/postgres/HostSearch.java | 68 +- .../dao/criteria/postgres/JobSearch.java | 82 +- .../dao/criteria/postgres/ProcSearch.java | 180 +- .../spcue/dao/postgres/ActionDaoJdbc.java | 222 +- .../spcue/dao/postgres/AllocationDaoJdbc.java | 333 +- .../spcue/dao/postgres/BookingDaoJdbc.java | 682 ++- .../spcue/dao/postgres/CommentDaoJdbc.java | 207 +- .../spcue/dao/postgres/DeedDaoJdbc.java | 152 +- .../spcue/dao/postgres/DepartmentDaoJdbc.java | 107 +- .../spcue/dao/postgres/DependDaoJdbc.java | 1054 ++--- .../spcue/dao/postgres/DispatchQuery.java | 19 +- .../spcue/dao/postgres/DispatcherDaoJdbc.java | 1000 ++--- .../spcue/dao/postgres/FacilityDaoJdbc.java | 113 +- .../spcue/dao/postgres/FilterDaoJdbc.java | 271 +- .../spcue/dao/postgres/FrameDaoJdbc.java | 1886 ++++---- .../spcue/dao/postgres/GroupDaoJdbc.java | 726 ++-- .../spcue/dao/postgres/HistoricalDaoJdbc.java | 53 +- .../spcue/dao/postgres/HostDaoJdbc.java | 1205 +++--- .../spcue/dao/postgres/JobDaoJdbc.java | 1766 ++++---- .../spcue/dao/postgres/LayerDaoJdbc.java | 1458 +++---- .../spcue/dao/postgres/LimitDaoJdbc.java | 171 +- .../dao/postgres/MaintenanceDaoJdbc.java | 94 +- .../spcue/dao/postgres/MatcherDaoJdbc.java | 136 +- .../dao/postgres/NestedWhiteboardDaoJdbc.java | 819 ++-- .../spcue/dao/postgres/OwnerDaoJdbc.java | 166 +- .../spcue/dao/postgres/PointDaoJdbc.java | 314 +- .../spcue/dao/postgres/ProcDaoJdbc.java | 1403 +++--- .../spcue/dao/postgres/RedirectDaoJdbc.java | 137 +- .../spcue/dao/postgres/ServiceDaoJdbc.java | 439 +- .../spcue/dao/postgres/ShowDaoJdbc.java | 421 +- .../dao/postgres/SubscriptionDaoJdbc.java | 311 +- .../spcue/dao/postgres/TaskDaoJdbc.java | 359 +- .../spcue/dao/postgres/WhiteboardDaoJdbc.java | 3779 +++++++---------- .../spcue/depend/AbstractDepend.java | 102 +- .../com/imageworks/spcue/depend/Depend.java | 21 +- .../spcue/depend/DependCreationVisitor.java | 149 +- .../spcue/depend/DependException.java | 49 +- .../spcue/depend/DependVisitor.java | 51 +- .../imageworks/spcue/depend/FrameByFrame.java | 101 +- .../imageworks/spcue/depend/FrameOnFrame.java | 125 +- .../imageworks/spcue/depend/FrameOnJob.java | 89 +- .../imageworks/spcue/depend/FrameOnLayer.java | 99 +- .../imageworks/spcue/depend/JobOnFrame.java | 90 +- .../com/imageworks/spcue/depend/JobOnJob.java | 89 +- .../imageworks/spcue/depend/JobOnLayer.java | 84 +- .../imageworks/spcue/depend/LayerOnFrame.java | 96 +- .../imageworks/spcue/depend/LayerOnJob.java | 89 +- .../imageworks/spcue/depend/LayerOnLayer.java | 103 +- .../spcue/depend/LayerOnSimFrame.java | 104 +- .../spcue/depend/PreviousFrame.java | 88 +- .../spcue/depend/QueueDependOperation.java | 39 +- .../spcue/dispatcher/AbstractDispatcher.java | 397 +- .../spcue/dispatcher/BookingQueue.java | 162 +- .../spcue/dispatcher/CoreUnitDispatcher.java | 827 ++-- .../spcue/dispatcher/DispatchQueue.java | 144 +- .../DispatchQueueTaskRejectionException.java | 49 +- .../spcue/dispatcher/DispatchSupport.java | 998 +++-- .../dispatcher/DispatchSupportService.java | 1189 +++--- .../spcue/dispatcher/Dispatcher.java | 383 +- .../spcue/dispatcher/DispatcherException.java | 35 +- .../dispatcher/FrameCompleteHandler.java | 1223 +++--- .../dispatcher/FrameLookupException.java | 49 +- .../dispatcher/FrameReservationException.java | 49 +- .../spcue/dispatcher/HealthyThreadPool.java | 387 +- .../spcue/dispatcher/HostReportHandler.java | 2253 ++++------ .../spcue/dispatcher/HostReportQueue.java | 202 +- .../spcue/dispatcher/JobLookupException.java | 49 +- .../spcue/dispatcher/LocalDispatcher.java | 674 ++- .../spcue/dispatcher/QueueHealthCheck.java | 4 +- .../spcue/dispatcher/QueueRejectCounter.java | 41 +- .../spcue/dispatcher/RedirectManager.java | 705 ++- .../spcue/dispatcher/ResourceContainer.java | 61 +- .../ResourceCreationFailureException.java | 49 +- .../ResourceDuplicationFailureException.java | 52 +- .../ResourceReleaseFailureException.java | 49 +- .../ResourceReservationFailureException.java | 49 +- .../dispatcher/RqdRetryReportException.java | 49 +- .../dispatcher/commands/DispatchBookHost.java | 199 +- .../commands/DispatchBookHostLocal.java | 49 +- .../commands/DispatchCommandTemplate.java | 38 +- .../commands/DispatchDropDepends.java | 108 +- .../commands/DispatchEatFrames.java | 57 +- .../commands/DispatchHandleHostReport.java | 85 +- .../commands/DispatchJobComplete.java | 62 +- .../commands/DispatchKillFrames.java | 56 +- .../commands/DispatchKillProcs.java | 62 +- .../commands/DispatchLaunchJob.java | 51 +- .../dispatcher/commands/DispatchMoveJobs.java | 63 +- .../commands/DispatchNextFrame.java | 55 +- .../commands/DispatchReorderFrames.java | 94 +- .../commands/DispatchRetryFrames.java | 57 +- .../commands/DispatchRqdKillFrame.java | 75 +- .../commands/DispatchRqdKillFrameMemory.java | 91 +- .../commands/DispatchSatisfyDepends.java | 99 +- .../DispatchShutdownJobIfCompleted.java | 60 +- .../commands/DispatchStaggerFrames.java | 90 +- .../dispatcher/commands/KeyRunnable.java | 39 +- .../commands/ManageReparentHosts.java | 60 +- .../com/imageworks/spcue/rqd/RqdClient.java | 147 +- .../spcue/rqd/RqdClientException.java | 49 +- .../imageworks/spcue/rqd/RqdClientGrpc.java | 310 +- .../imageworks/spcue/servant/CueStatic.java | 259 +- .../spcue/servant/ManageAction.java | 112 +- .../spcue/servant/ManageAllocation.java | 456 +- .../spcue/servant/ManageComment.java | 77 +- .../imageworks/spcue/servant/ManageDeed.java | 118 +- .../spcue/servant/ManageDepartment.java | 334 +- .../spcue/servant/ManageDepend.java | 150 +- .../spcue/servant/ManageFacility.java | 105 +- .../spcue/servant/ManageFilter.java | 460 +- .../imageworks/spcue/servant/ManageFrame.java | 678 ++- .../imageworks/spcue/servant/ManageGroup.java | 562 ++- .../imageworks/spcue/servant/ManageHost.java | 620 ++- .../imageworks/spcue/servant/ManageJob.java | 1702 ++++---- .../imageworks/spcue/servant/ManageLayer.java | 967 +++-- .../imageworks/spcue/servant/ManageLimit.java | 158 +- .../spcue/servant/ManageMatcher.java | 110 +- .../imageworks/spcue/servant/ManageOwner.java | 219 +- .../imageworks/spcue/servant/ManageProc.java | 488 ++- .../spcue/servant/ManageRenderPartition.java | 74 +- .../spcue/servant/ManageService.java | 209 +- .../spcue/servant/ManageServiceOverride.java | 105 +- .../imageworks/spcue/servant/ManageShow.java | 791 ++-- .../spcue/servant/ManageSubscription.java | 187 +- .../imageworks/spcue/servant/ManageTask.java | 92 +- .../spcue/servant/RqdReportStatic.java | 89 +- .../imageworks/spcue/servant/ServantUtil.java | 96 +- .../spcue/service/AdminManager.java | 173 +- .../spcue/service/AdminManagerService.java | 591 ++- .../spcue/service/BookingManager.java | 241 +- .../spcue/service/BookingManagerService.java | 373 +- .../spcue/service/CommentManager.java | 144 +- .../spcue/service/CommentManagerService.java | 159 +- .../spcue/service/DepartmentManager.java | 369 +- .../service/DepartmentManagerService.java | 466 +- .../spcue/service/DependManager.java | 342 +- .../spcue/service/DependManagerService.java | 1144 +++-- .../spcue/service/EmailSupport.java | 508 ++- .../spcue/service/FilterManager.java | 89 +- .../spcue/service/FilterManagerService.java | 845 ++-- .../spcue/service/GroupManager.java | 188 +- .../spcue/service/GroupManagerService.java | 362 +- .../spcue/service/HistoricalManager.java | 43 +- .../service/HistoricalManagerService.java | 64 +- .../spcue/service/HistoricalSupport.java | 61 +- .../imageworks/spcue/service/HostManager.java | 432 +- .../spcue/service/HostManagerService.java | 726 ++-- .../imageworks/spcue/service/JmsMover.java | 128 +- .../imageworks/spcue/service/JobLauncher.java | 389 +- .../imageworks/spcue/service/JobManager.java | 929 ++-- .../spcue/service/JobManagerService.java | 1189 +++--- .../spcue/service/JobManagerSupport.java | 1074 +++-- .../com/imageworks/spcue/service/JobSpec.java | 1692 ++++---- .../spcue/service/LocalBookingSupport.java | 246 +- .../service/MaintenanceManagerSupport.java | 438 +- .../spcue/service/OwnerManager.java | 166 +- .../spcue/service/OwnerManagerService.java | 188 +- .../spcue/service/RedirectService.java | 205 +- .../spcue/service/ServiceManager.java | 39 +- .../spcue/service/ServiceManagerService.java | 157 +- .../imageworks/spcue/service/Whiteboard.java | 37 +- .../spcue/service/WhiteboardService.java | 787 ++-- .../spcue/servlet/HealthCheckServlet.java | 212 +- .../spcue/servlet/JobLaunchServlet.java | 96 +- .../com/imageworks/spcue/util/Convert.java | 73 +- .../spcue/util/CueExceptionUtil.java | 73 +- .../com/imageworks/spcue/util/CueUtil.java | 623 ++- .../com/imageworks/spcue/util/FrameRange.java | 304 +- .../com/imageworks/spcue/util/FrameSet.java | 291 +- .../com/imageworks/spcue/util/JobLogUtil.java | 85 +- .../com/imageworks/spcue/util/SqlUtil.java | 243 +- .../com/imageworks/spcue/util/TagUtil.java | 59 +- .../spcue/config/TestAppConfig.java | 45 +- .../spcue/test/AssumingPostgresEngine.java | 72 +- .../imageworks/spcue/test/EntityTests.java | 105 +- .../spcue/test/TestDatabaseSetupPostgres.java | 88 +- .../spcue/test/TransactionalTest.java | 27 +- .../test/dao/criteria/FrameSearchTests.java | 390 +- .../test/dao/criteria/HostSearchTests.java | 134 +- .../test/dao/criteria/JobSearchTests.java | 120 +- .../test/dao/criteria/ProcSearchTests.java | 350 +- .../test/dao/postgres/ActionDaoTests.java | 326 +- .../test/dao/postgres/AllocationDaoTests.java | 325 +- .../test/dao/postgres/BookingDaoTests.java | 689 ++- .../test/dao/postgres/CommentDaoTests.java | 354 +- .../spcue/test/dao/postgres/DeedDaoTests.java | 236 +- .../test/dao/postgres/DepartmentDaoTests.java | 154 +- .../test/dao/postgres/DependDaoTests.java | 756 ++-- .../dao/postgres/DispatcherDaoFifoTests.java | 339 +- .../test/dao/postgres/DispatcherDaoTests.java | 920 ++-- .../test/dao/postgres/FacilityDaoTests.java | 77 +- .../test/dao/postgres/FilterDaoTests.java | 501 +-- .../test/dao/postgres/FrameDaoTests.java | 1206 +++--- .../test/dao/postgres/GroupDaoTests.java | 806 ++-- .../test/dao/postgres/HistoricalDaoTests.java | 91 +- .../spcue/test/dao/postgres/HostDaoTests.java | 1069 +++-- .../spcue/test/dao/postgres/JobDaoTests.java | 1315 +++--- .../test/dao/postgres/LayerDaoTests.java | 1369 +++--- .../test/dao/postgres/LimitDaoTests.java | 207 +- .../dao/postgres/MaintenanceDaoTests.java | 84 +- .../test/dao/postgres/MatcherDaoTests.java | 200 +- .../postgres/NestedWhiteboardDaoTests.java | 69 +- .../test/dao/postgres/OwnerDaoTests.java | 188 +- .../test/dao/postgres/PointDaoTests.java | 207 +- .../spcue/test/dao/postgres/ProcDaoTests.java | 1515 ++++--- .../test/dao/postgres/ServiceDaoTests.java | 405 +- .../spcue/test/dao/postgres/ShowDaoTests.java | 375 +- .../dao/postgres/SubscriptionDaoTests.java | 424 +- .../spcue/test/dao/postgres/TaskDaoTests.java | 462 +- .../test/dao/postgres/WhiteboardDaoTests.java | 2423 ++++++----- .../CoreUnitDispatcherGpuJobTests.java | 310 +- .../CoreUnitDispatcherGpuTests.java | 360 +- .../CoreUnitDispatcherGpusJobTests.java | 429 +- .../dispatcher/CoreUnitDispatcherTests.java | 327 +- .../test/dispatcher/DispatchSupportTests.java | 232 +- .../dispatcher/FrameCompleteHandlerTests.java | 777 ++-- .../test/dispatcher/HistoryControlTests.java | 274 +- .../dispatcher/HostReportHandlerGpuTests.java | 149 +- .../dispatcher/HostReportHandlerTests.java | 1040 ++--- .../test/dispatcher/LocalDispatcherTests.java | 618 ++- .../test/dispatcher/RedirectManagerTests.java | 585 ++- .../test/dispatcher/StrandedCoreTests.java | 189 +- .../test/dispatcher/TestBookingQueue.java | 149 +- .../test/servant/FakeStreamObserver.java | 33 +- .../test/servant/ManageAllocationTests.java | 197 +- .../spcue/test/servant/ManageFrameTests.java | 180 +- .../spcue/test/service/AdminManagerTests.java | 302 +- .../test/service/BookingManagerTests.java | 664 ++- .../test/service/CommentManagerTests.java | 73 +- .../test/service/DepartmentManagerTests.java | 120 +- .../service/DependManagerChunkingTests.java | 357 +- .../test/service/DependManagerTests.java | 878 ++-- .../spcue/test/service/EmailSupportTests.java | 125 +- .../test/service/FilterManagerTests.java | 652 ++- .../spcue/test/service/GroupManagerTests.java | 149 +- .../spcue/test/service/HostManagerTests.java | 287 +- .../spcue/test/service/JobManagerTests.java | 820 ++-- .../spcue/test/service/JobSpecTests.java | 168 +- .../MaintenanceManagerSupportTests.java | 50 +- .../spcue/test/service/OwnerManagerTests.java | 278 +- .../test/service/ServiceManagerTests.java | 284 +- .../spcue/test/service/WhiteboardTests.java | 68 +- .../spcue/test/util/CoreSaturationTests.java | 68 +- .../spcue/test/util/CoreSpanTests.java | 260 +- .../spcue/test/util/CueUtilTester.java | 250 +- .../spcue/test/util/FrameRangeTests.java | 278 +- .../spcue/test/util/FrameSetTests.java | 137 +- .../spcue/test/util/JobLogUtilTests.java | 111 +- .../spcue/test/util/SqlUtilTests.java | 12 +- 385 files changed, 50619 insertions(+), 57836 deletions(-) delete mode 100644 cuebot/code_style_ij.xml create mode 100644 cuebot/jdtls.xml diff --git a/cuebot/build.gradle b/cuebot/build.gradle index ce6d2a3b7..815a09695 100644 --- a/cuebot/build.gradle +++ b/cuebot/build.gradle @@ -7,6 +7,7 @@ plugins { id('com.google.protobuf') version "0.9.1" id('jacoco') id('org.sonarqube') version "2.8" + id('com.diffplug.spotless') version "5.16.0" } sourceCompatibility = 11 @@ -171,3 +172,12 @@ test { } } } + +spotless { + java { + targetExclude 'src/compiled_protobuf/**' + toggleOffOn() + eclipse().configFile('jdtls.xml') + indentWithSpaces(4) + } +} diff --git a/cuebot/code_style_ij.xml b/cuebot/code_style_ij.xml deleted file mode 100644 index caa706a34..000000000 --- a/cuebot/code_style_ij.xml +++ /dev/null @@ -1,67 +0,0 @@ - - \ No newline at end of file diff --git a/cuebot/jdtls.xml b/cuebot/jdtls.xml new file mode 100644 index 000000000..7bb6804eb --- /dev/null +++ b/cuebot/jdtls.xml @@ -0,0 +1,337 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cuebot/src/main/java/com/imageworks/common/spring/remoting/CueServerInterceptor.java b/cuebot/src/main/java/com/imageworks/common/spring/remoting/CueServerInterceptor.java index 31ebeb12e..9939785b5 100644 --- a/cuebot/src/main/java/com/imageworks/common/spring/remoting/CueServerInterceptor.java +++ b/cuebot/src/main/java/com/imageworks/common/spring/remoting/CueServerInterceptor.java @@ -7,43 +7,39 @@ import io.grpc.ServerCallHandler; import io.grpc.ServerInterceptor; import io.grpc.Status; -import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; - +import org.apache.logging.log4j.Logger; public class CueServerInterceptor implements ServerInterceptor { - private static final Logger logger = LogManager.getLogger(CueServerInterceptor.class); - private static final Logger accessLogger = LogManager.getLogger("API"); + private static final Logger logger = LogManager.getLogger(CueServerInterceptor.class); + private static final Logger accessLogger = LogManager.getLogger("API"); - @Override - public ServerCall.Listener interceptCall( - ServerCall serverCall, Metadata metadata, - ServerCallHandler serverCallHandler) { - accessLogger.info("gRPC [" + - serverCall.getAttributes().get(Grpc.TRANSPORT_ATTR_REMOTE_ADDR) + - "]: " + serverCall.getMethodDescriptor().getFullMethodName()); + @Override + public ServerCall.Listener interceptCall(ServerCall serverCall, + Metadata metadata, ServerCallHandler serverCallHandler) { + accessLogger.info("gRPC [" + serverCall.getAttributes().get(Grpc.TRANSPORT_ATTR_REMOTE_ADDR) + + "]: " + serverCall.getMethodDescriptor().getFullMethodName()); - ServerCall.Listener delegate = serverCallHandler.startCall(serverCall, metadata); - return new SimpleForwardingServerCallListener(delegate) { - @Override - public void onHalfClose() { - try { - super.onHalfClose(); - } catch (Exception e) { - logger.error("Caught an unexpected error.", e); - serverCall.close(Status.INTERNAL - .withCause(e) - .withDescription(e.toString() + "\n" + e.getMessage()), - new Metadata()); - } - } + ServerCall.Listener delegate = serverCallHandler.startCall(serverCall, metadata); + return new SimpleForwardingServerCallListener(delegate) { + @Override + public void onHalfClose() { + try { + super.onHalfClose(); + } catch (Exception e) { + logger.error("Caught an unexpected error.", e); + serverCall.close( + Status.INTERNAL.withCause(e).withDescription(e.toString() + "\n" + e.getMessage()), + new Metadata()); + } + } - @Override - public void onMessage(ReqT request) { - accessLogger.info("Request Data: " + request); - super.onMessage(request); - } - }; - } + @Override + public void onMessage(ReqT request) { + accessLogger.info("Request Data: " + request); + super.onMessage(request); + } + }; + } } diff --git a/cuebot/src/main/java/com/imageworks/common/spring/remoting/GrpcServer.java b/cuebot/src/main/java/com/imageworks/common/spring/remoting/GrpcServer.java index a5038f82c..66c49de47 100644 --- a/cuebot/src/main/java/com/imageworks/common/spring/remoting/GrpcServer.java +++ b/cuebot/src/main/java/com/imageworks/common/spring/remoting/GrpcServer.java @@ -38,76 +38,74 @@ import com.imageworks.spcue.servant.ManageTask; import com.imageworks.spcue.servant.RqdReportStatic; - public class GrpcServer implements ApplicationContextAware { - private static final Logger logger = LogManager.getLogger(GrpcServer.class); + private static final Logger logger = LogManager.getLogger(GrpcServer.class); - private static final String DEFAULT_NAME = "CueGrpcServer"; - private static final String DEFAULT_PORT = "8443"; - private static final int DEFAULT_MAX_MESSAGE_BYTES = 104857600; + private static final String DEFAULT_NAME = "CueGrpcServer"; + private static final String DEFAULT_PORT = "8443"; + private static final int DEFAULT_MAX_MESSAGE_BYTES = 104857600; - private String name; - private int port; - private int maxMessageBytes; - private Server server; - private ApplicationContext applicationContext; + private String name; + private int port; + private int maxMessageBytes; + private Server server; + private ApplicationContext applicationContext; - public GrpcServer() { - this(DEFAULT_NAME, DEFAULT_PORT, new Properties(), DEFAULT_MAX_MESSAGE_BYTES); - } + public GrpcServer() { + this(DEFAULT_NAME, DEFAULT_PORT, new Properties(), DEFAULT_MAX_MESSAGE_BYTES); + } - public GrpcServer(String name, String port, Properties props, Integer maxMessageBytes) { - logger.info("Setting up gRPC server..."); - this.name = name; - this.port = Integer.parseInt(port); - this.maxMessageBytes = maxMessageBytes; - } + public GrpcServer(String name, String port, Properties props, Integer maxMessageBytes) { + logger.info("Setting up gRPC server..."); + this.name = name; + this.port = Integer.parseInt(port); + this.maxMessageBytes = maxMessageBytes; + } - public void shutdown() { - if (!server.isShutdown()) { - logger.info("gRPC server shutting down on " + this.name + " at port " + this.port); - server.shutdown(); - } + public void shutdown() { + if (!server.isShutdown()) { + logger.info("gRPC server shutting down on " + this.name + " at port " + this.port); + server.shutdown(); } + } - public void start() throws IOException { - server = ServerBuilder - .forPort(this.port) - .addService(applicationContext.getBean("rqdReportStatic", RqdReportStatic.class)) - .addService(applicationContext.getBean("cueStaticServant", CueStatic.class)) - .addService(applicationContext.getBean("manageAction", ManageAction.class)) - .addService(applicationContext.getBean("manageAllocation", ManageAllocation.class)) - .addService(applicationContext.getBean("manageComment", ManageComment.class)) - .addService(applicationContext.getBean("manageDeed", ManageDeed.class)) - .addService(applicationContext.getBean("manageDepartment", ManageDepartment.class)) - .addService(applicationContext.getBean("manageDepend", ManageDepend.class)) - .addService(applicationContext.getBean("manageFacility", ManageFacility.class)) - .addService(applicationContext.getBean("manageFilter", ManageFilter.class)) - .addService(applicationContext.getBean("manageFrame", ManageFrame.class)) - .addService(applicationContext.getBean("manageGroup", ManageGroup.class)) - .addService(applicationContext.getBean("manageHost", ManageHost.class)) - .addService(applicationContext.getBean("manageJob", ManageJob.class)) - .addService(applicationContext.getBean("manageLayer", ManageLayer.class)) - .addService(applicationContext.getBean("manageLimit", ManageLimit.class)) - .addService(applicationContext.getBean("manageMatcher", ManageMatcher.class)) - .addService(applicationContext.getBean("manageOwner", ManageOwner.class)) - .addService(applicationContext.getBean("manageProc", ManageProc.class)) - .addService(applicationContext.getBean("manageRenderPartition", ManageRenderPartition.class)) - .addService(applicationContext.getBean("manageService", ManageService.class)) - .addService(applicationContext.getBean("manageServiceOverride", ManageServiceOverride.class)) - .addService(applicationContext.getBean("manageShow", ManageShow.class)) - .addService(applicationContext.getBean("manageSubscription", ManageSubscription.class)) - .addService(applicationContext.getBean("manageTask", ManageTask.class)) - .maxInboundMessageSize(maxMessageBytes) - .intercept(new CueServerInterceptor()) - .build(); - server.start(); - logger.info("gRPC server started on " + this.name + " at port " + this.port + " !"); - } + public void start() throws IOException { + server = ServerBuilder.forPort(this.port) + .addService(applicationContext.getBean("rqdReportStatic", RqdReportStatic.class)) + .addService(applicationContext.getBean("cueStaticServant", CueStatic.class)) + .addService(applicationContext.getBean("manageAction", ManageAction.class)) + .addService(applicationContext.getBean("manageAllocation", ManageAllocation.class)) + .addService(applicationContext.getBean("manageComment", ManageComment.class)) + .addService(applicationContext.getBean("manageDeed", ManageDeed.class)) + .addService(applicationContext.getBean("manageDepartment", ManageDepartment.class)) + .addService(applicationContext.getBean("manageDepend", ManageDepend.class)) + .addService(applicationContext.getBean("manageFacility", ManageFacility.class)) + .addService(applicationContext.getBean("manageFilter", ManageFilter.class)) + .addService(applicationContext.getBean("manageFrame", ManageFrame.class)) + .addService(applicationContext.getBean("manageGroup", ManageGroup.class)) + .addService(applicationContext.getBean("manageHost", ManageHost.class)) + .addService(applicationContext.getBean("manageJob", ManageJob.class)) + .addService(applicationContext.getBean("manageLayer", ManageLayer.class)) + .addService(applicationContext.getBean("manageLimit", ManageLimit.class)) + .addService(applicationContext.getBean("manageMatcher", ManageMatcher.class)) + .addService(applicationContext.getBean("manageOwner", ManageOwner.class)) + .addService(applicationContext.getBean("manageProc", ManageProc.class)) + .addService( + applicationContext.getBean("manageRenderPartition", ManageRenderPartition.class)) + .addService(applicationContext.getBean("manageService", ManageService.class)) + .addService( + applicationContext.getBean("manageServiceOverride", ManageServiceOverride.class)) + .addService(applicationContext.getBean("manageShow", ManageShow.class)) + .addService(applicationContext.getBean("manageSubscription", ManageSubscription.class)) + .addService(applicationContext.getBean("manageTask", ManageTask.class)) + .maxInboundMessageSize(maxMessageBytes).intercept(new CueServerInterceptor()).build(); + server.start(); + logger.info("gRPC server started on " + this.name + " at port " + this.port + " !"); + } - @Override - public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { - this.applicationContext = applicationContext; - } + @Override + public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { + this.applicationContext = applicationContext; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/ActionEntity.java b/cuebot/src/main/java/com/imageworks/spcue/ActionEntity.java index 95aff5d34..b04fb5fa5 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ActionEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ActionEntity.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import com.imageworks.spcue.grpc.filter.Action; @@ -25,76 +21,75 @@ public class ActionEntity extends Entity implements ActionInterface { - public String filterId; - public String showId; - - public ActionType type; - public ActionValueType valueType; - public String stringValue; - public long intValue; - public boolean booleanValue; - public String groupValue; - public float floatValue; - - public ActionEntity() { - this.name = null; + public String filterId; + public String showId; + + public ActionType type; + public ActionValueType valueType; + public String stringValue; + public long intValue; + public boolean booleanValue; + public String groupValue; + public float floatValue; + + public ActionEntity() { + this.name = null; + } + + public static ActionEntity build(Action data) { + ActionEntity entity = new ActionEntity(); + if (data.getGroupValue() != null) { + entity.groupValue = data.getGroupValue(); } - - public static ActionEntity build(Action data) { - ActionEntity entity = new ActionEntity(); - if (data.getGroupValue() != null) { - entity.groupValue = data.getGroupValue(); - } - entity.stringValue = data.getStringValue(); - entity.booleanValue = data.getBooleanValue(); - entity.intValue = data.getIntegerValue(); - entity.floatValue = data.getFloatValue(); - entity.name = ""; - entity.type = data.getType(); - entity.valueType = data.getValueType(); - return entity; - } - - public static ActionEntity build(FilterInterface filter, Action data) { - ActionEntity entity = build(data); - entity.filterId = filter.getFilterId(); - entity.showId = filter.getShowId(); - return entity; + entity.stringValue = data.getStringValue(); + entity.booleanValue = data.getBooleanValue(); + entity.intValue = data.getIntegerValue(); + entity.floatValue = data.getFloatValue(); + entity.name = ""; + entity.type = data.getType(); + entity.valueType = data.getValueType(); + return entity; + } + + public static ActionEntity build(FilterInterface filter, Action data) { + ActionEntity entity = build(data); + entity.filterId = filter.getFilterId(); + entity.showId = filter.getShowId(); + return entity; + } + + public static ActionEntity build(FilterInterface filter, Action data, String id) { + ActionEntity action = build(filter, data); + action.id = id; + if (action.isNew()) { + throw new SpcueRuntimeException("the action has not been created yet"); } + return action; - public static ActionEntity build(FilterInterface filter, Action data, String id) { - ActionEntity action = build(filter, data); - action.id = id; - if (action.isNew()) { - throw new SpcueRuntimeException("the action has not been created yet"); - } - return action; + } - } + public String getId() { + return id; + } - public String getId() { - return id; - } + public String getName() { + return null; + } - public String getName() { - return null; - } + public String getActionId() { + return id; + } - public String getActionId() { - return id; + public String getFilterId() { + if (filterId == null) { + throw new SpcueRuntimeException( + "Trying to get a filterId from a ActityEntity created without a filter"); } + return filterId; + } - public String getFilterId() { - if (filterId == null){ - throw new SpcueRuntimeException( - "Trying to get a filterId from a ActityEntity created without a filter"); - } - return filterId; - } - - public String getShowId() { - return showId; - } + public String getShowId() { + return showId; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/ActionInterface.java b/cuebot/src/main/java/com/imageworks/spcue/ActionInterface.java index 06dacf924..4f28e2c54 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ActionInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ActionInterface.java @@ -2,26 +2,21 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public interface ActionInterface extends FilterInterface { - public String getActionId(); + public String getActionId(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/AllocationEntity.java b/cuebot/src/main/java/com/imageworks/spcue/AllocationEntity.java index 6b358612e..27b492ac1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/AllocationEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/AllocationEntity.java @@ -2,36 +2,31 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public class AllocationEntity extends Entity implements AllocationInterface { - public String tag; - public String facilityId; + public String tag; + public String facilityId; - public String getAllocationId() { - return id; - } + public String getAllocationId() { + return id; + } - @Override - public String getFacilityId() { - return facilityId; - } + @Override + public String getFacilityId() { + return facilityId; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/AllocationInterface.java b/cuebot/src/main/java/com/imageworks/spcue/AllocationInterface.java index d21121242..9e2669d15 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/AllocationInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/AllocationInterface.java @@ -2,25 +2,20 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public interface AllocationInterface extends EntityInterface, FacilityInterface { - public String getAllocationId(); + public String getAllocationId(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/BuildableDependency.java b/cuebot/src/main/java/com/imageworks/spcue/BuildableDependency.java index 22ecd73a9..d3eb94b07 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/BuildableDependency.java +++ b/cuebot/src/main/java/com/imageworks/spcue/BuildableDependency.java @@ -2,122 +2,117 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import com.imageworks.spcue.grpc.depend.DependType; public class BuildableDependency { - public DependType type; - public boolean anyFrame = false; - public boolean launchDepend = false; - - public String dependErJobName; - public String dependErLayerName; - public String dependErFrameName; - - public String dependOnJobName; - public String dependOnLayerName; - public String dependOnFrameName; - - public boolean isAnyFrame() { - return anyFrame; - } - - public void setAnyFrame(boolean anyFrame) { - this.anyFrame = anyFrame; - } - - public String getDependErFrameName() { - return dependErFrameName; - } - - public void setDependErFrameName(String dependErFrameName) { - this.dependErFrameName = dependErFrameName; - } - - public String getDependErJobName() { - return dependErJobName; - } - - public void setDependErJobName(String dependErJobName) { - this.dependErJobName = dependErJobName; - } - - public String getDependErLayerName() { - return dependErLayerName; - } - - public void setDependErLayerName(String dependErLayerName) { - this.dependErLayerName = dependErLayerName; - } - - public String getDependOnFrameName() { - return dependOnFrameName; - } - - public void setDependOnFrameName(String dependOnFrameName) { - this.dependOnFrameName = dependOnFrameName; - } - - public String getDependOnJobName() { - return dependOnJobName; - } - - public void setDependOnJobName(String dependOnJobName) { - this.dependOnJobName = dependOnJobName; - } - - public String getDependOnLayerName() { - return dependOnLayerName; - } - - public void setDependOnLayerName(String dependOnLayerName) { - this.dependOnLayerName = dependOnLayerName; - } - - public DependType getType() { - return type; - } - - public void setType(DependType type) { - this.type = type; - } - - public String toString() { - StringBuilder sb = new StringBuilder(1024); - sb.append("Depend Type: " + type.toString() + "\n"); - sb.append("Depend on job: " + dependErJobName + "\n"); - sb.append("Depend on layer: " + dependOnLayerName + "\n"); - sb.append("Depend on frame: " + dependOnFrameName + "\n"); - sb.append("Depend er job: " + dependOnJobName + "\n"); - sb.append("Depend er layer: " + dependErLayerName + "\n"); - sb.append("Depend er frame: " + dependErFrameName + "\n"); - return sb.toString(); - } - - public boolean isLaunchDepend() { - return launchDepend; - } - - public void setLaunchDepend(boolean launchDepend) { - this.launchDepend = launchDepend; - } + public DependType type; + public boolean anyFrame = false; + public boolean launchDepend = false; + + public String dependErJobName; + public String dependErLayerName; + public String dependErFrameName; + + public String dependOnJobName; + public String dependOnLayerName; + public String dependOnFrameName; + + public boolean isAnyFrame() { + return anyFrame; + } + + public void setAnyFrame(boolean anyFrame) { + this.anyFrame = anyFrame; + } + + public String getDependErFrameName() { + return dependErFrameName; + } + + public void setDependErFrameName(String dependErFrameName) { + this.dependErFrameName = dependErFrameName; + } + + public String getDependErJobName() { + return dependErJobName; + } + + public void setDependErJobName(String dependErJobName) { + this.dependErJobName = dependErJobName; + } + + public String getDependErLayerName() { + return dependErLayerName; + } + + public void setDependErLayerName(String dependErLayerName) { + this.dependErLayerName = dependErLayerName; + } + + public String getDependOnFrameName() { + return dependOnFrameName; + } + + public void setDependOnFrameName(String dependOnFrameName) { + this.dependOnFrameName = dependOnFrameName; + } + + public String getDependOnJobName() { + return dependOnJobName; + } + + public void setDependOnJobName(String dependOnJobName) { + this.dependOnJobName = dependOnJobName; + } + + public String getDependOnLayerName() { + return dependOnLayerName; + } + + public void setDependOnLayerName(String dependOnLayerName) { + this.dependOnLayerName = dependOnLayerName; + } + + public DependType getType() { + return type; + } + + public void setType(DependType type) { + this.type = type; + } + + public String toString() { + StringBuilder sb = new StringBuilder(1024); + sb.append("Depend Type: " + type.toString() + "\n"); + sb.append("Depend on job: " + dependErJobName + "\n"); + sb.append("Depend on layer: " + dependOnLayerName + "\n"); + sb.append("Depend on frame: " + dependOnFrameName + "\n"); + sb.append("Depend er job: " + dependOnJobName + "\n"); + sb.append("Depend er layer: " + dependErLayerName + "\n"); + sb.append("Depend er frame: " + dependErFrameName + "\n"); + return sb.toString(); + } + + public boolean isLaunchDepend() { + return launchDepend; + } + + public void setLaunchDepend(boolean launchDepend) { + this.launchDepend = launchDepend; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/BuildableJob.java b/cuebot/src/main/java/com/imageworks/spcue/BuildableJob.java index 2c9d213a9..e71ec29c9 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/BuildableJob.java +++ b/cuebot/src/main/java/com/imageworks/spcue/BuildableJob.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import java.util.ArrayList; @@ -25,84 +21,82 @@ import java.util.Map; /** - * A buildable job represents a job stored - * in the job spec XML file. + * A buildable job represents a job stored in the job spec XML file. * * @category Job Launching */ public class BuildableJob { - /** - * Struct for the job detail, used for adding job to DB. - */ - public JobDetail detail; - - /** - * Maximum CPU cores and GPU units overrides. - */ - public Integer maxCoresOverride = null; - public Integer maxGpusOverride = null; - - /** - * List of layers - */ - private List layers = new ArrayList(); - - private BuildableJob postJob = null; - - /** - * Stores the local core assignment if one was launched with the job. - */ - private LocalHostAssignment runLocalConf = null; - - /** - * Job specific environment variables - */ - public Map env = new HashMap(); - - public BuildableJob() { } - - public BuildableJob(JobDetail detail) { - this.detail = detail; - } - - /** - * Add a layer to the job - * - * @param layer - */ - public void addBuildableLayer(BuildableLayer layer) { - layers.add(layer); - } - - /** - * Add a key/value pair environment var to job - * - * @param key - * @param value - */ - public void addEnvironmentVariable(String key, String value) { - env.put(key, value); - } - - public List getBuildableLayers() { - return layers; - } - - public void setPostJob(BuildableJob job) { - this.postJob = job; - } - - public BuildableJob getPostJob() { - return this.postJob; - } - - public void setRunLocalConf(LocalHostAssignment runLocalConf) { - this.runLocalConf = runLocalConf; - } - - public LocalHostAssignment getRunLocalConf() { - return this.runLocalConf; - } + /** + * Struct for the job detail, used for adding job to DB. + */ + public JobDetail detail; + + /** + * Maximum CPU cores and GPU units overrides. + */ + public Integer maxCoresOverride = null; + public Integer maxGpusOverride = null; + + /** + * List of layers + */ + private List layers = new ArrayList(); + + private BuildableJob postJob = null; + + /** + * Stores the local core assignment if one was launched with the job. + */ + private LocalHostAssignment runLocalConf = null; + + /** + * Job specific environment variables + */ + public Map env = new HashMap(); + + public BuildableJob() {} + + public BuildableJob(JobDetail detail) { + this.detail = detail; + } + + /** + * Add a layer to the job + * + * @param layer + */ + public void addBuildableLayer(BuildableLayer layer) { + layers.add(layer); + } + + /** + * Add a key/value pair environment var to job + * + * @param key + * @param value + */ + public void addEnvironmentVariable(String key, String value) { + env.put(key, value); + } + + public List getBuildableLayers() { + return layers; + } + + public void setPostJob(BuildableJob job) { + this.postJob = job; + } + + public BuildableJob getPostJob() { + return this.postJob; + } + + public void setRunLocalConf(LocalHostAssignment runLocalConf) { + this.runLocalConf = runLocalConf; + } + + public LocalHostAssignment getRunLocalConf() { + return this.runLocalConf; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/BuildableLayer.java b/cuebot/src/main/java/com/imageworks/spcue/BuildableLayer.java index d23f28d06..0fbb0a2a4 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/BuildableLayer.java +++ b/cuebot/src/main/java/com/imageworks/spcue/BuildableLayer.java @@ -2,54 +2,46 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import java.util.HashMap; import java.util.Map; /** - * A buildable layer represents a layer stored - * in the job spec XML file. + * A buildable layer represents a layer stored in the job spec XML file. * * @category Job Launching */ public class BuildableLayer { - /** - * If the user manually set memory, this is - * updated to true. - */ - public boolean isMemoryOverride = false; - - /** - * Stores the layer detail. LayerDetail is needed to - * actually insert the layer into the DB. - */ - public LayerDetail layerDetail = new LayerDetail(); - - /** - * Map for storing environment vars - */ - public Map env = new HashMap(); - - public BuildableLayer() { } - - public BuildableLayer(LayerDetail detail) { - this.layerDetail = detail; - } + /** + * If the user manually set memory, this is updated to true. + */ + public boolean isMemoryOverride = false; + + /** + * Stores the layer detail. LayerDetail is needed to actually insert the layer into the DB. + */ + public LayerDetail layerDetail = new LayerDetail(); + + /** + * Map for storing environment vars + */ + public Map env = new HashMap(); + + public BuildableLayer() {} + + public BuildableLayer(LayerDetail detail) { + this.layerDetail = detail; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/CommentDetail.java b/cuebot/src/main/java/com/imageworks/spcue/CommentDetail.java index 213c28bc3..b8ac3e3ec 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/CommentDetail.java +++ b/cuebot/src/main/java/com/imageworks/spcue/CommentDetail.java @@ -2,29 +2,24 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import java.sql.Timestamp; public class CommentDetail extends Entity { - public Timestamp timestamp; - public String subject; - public String message; - public String user; + public Timestamp timestamp; + public String subject; + public String message; + public String user; } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/CueGrpcException.java b/cuebot/src/main/java/com/imageworks/spcue/CueGrpcException.java index 237b69dae..a9ddfe41d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/CueGrpcException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/CueGrpcException.java @@ -1,36 +1,31 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue; - import org.springframework.core.NestedRuntimeException; - public class CueGrpcException extends NestedRuntimeException { - private static final long serialVersionUID = -3215497096936812369L; + private static final long serialVersionUID = -3215497096936812369L; + + public CueGrpcException(String message) { + super(message); + } - public CueGrpcException(String message) { - super(message); - } + public CueGrpcException(String message, Throwable cause) { + super(message, cause); + } - public CueGrpcException(String message, Throwable cause) { - super(message, cause); - } - } diff --git a/cuebot/src/main/java/com/imageworks/spcue/CuebotApplication.java b/cuebot/src/main/java/com/imageworks/spcue/CuebotApplication.java index 6ef64080c..c50c308f2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/CuebotApplication.java +++ b/cuebot/src/main/java/com/imageworks/spcue/CuebotApplication.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue; import java.util.Arrays; @@ -30,35 +27,34 @@ @SpringBootApplication public class CuebotApplication extends SpringApplication { - private static String[] checkArgs(String[] args) { - Optional deprecatedFlag = Arrays.stream(args) - .filter(arg -> arg.startsWith("--log.frame-log-root=")).findFirst(); - if (deprecatedFlag.isPresent()) { - // Log a deprecation warning. - Logger warning_logger = LogManager.getLogger(CuebotApplication.class); - warning_logger.warn("`--log.frame-log-root` is deprecated and will be removed in an " + - "upcoming release. It has been replaced with `--log.frame-log-root.default_os`. " + - "See opencue.properties for details on OpenCue's new OS-dependent root directories."); - // If new flags are not present, swap in the value provided using the new flag. - // If the new flags are already present, don't do anything. - Optional newFlags = Arrays.stream(args) - .filter(arg -> arg.startsWith("--log.frame-log-root.")).findAny(); - if (!newFlags.isPresent()) { - String fixedFlag = "--log.frame-log-root.default_os=" - + StringUtils.substringAfter(deprecatedFlag.get(), "="); - args = Stream.concat( - Arrays.stream(args).filter(arg -> !arg.startsWith("--log.frame-log-root=")), - Stream.of(fixedFlag)) - .toArray(String[]::new); - } - } - return args; - } - - public static void main(String[] args) { - // Cuebot startup - String[] filteredArgs = checkArgs(args); - SpringApplication.run(CuebotApplication.class, filteredArgs); + private static String[] checkArgs(String[] args) { + Optional deprecatedFlag = + Arrays.stream(args).filter(arg -> arg.startsWith("--log.frame-log-root=")).findFirst(); + if (deprecatedFlag.isPresent()) { + // Log a deprecation warning. + Logger warning_logger = LogManager.getLogger(CuebotApplication.class); + warning_logger.warn("`--log.frame-log-root` is deprecated and will be removed in an " + + "upcoming release. It has been replaced with `--log.frame-log-root.default_os`. " + + "See opencue.properties for details on OpenCue's new OS-dependent root directories."); + // If new flags are not present, swap in the value provided using the new flag. + // If the new flags are already present, don't do anything. + Optional newFlags = + Arrays.stream(args).filter(arg -> arg.startsWith("--log.frame-log-root.")).findAny(); + if (!newFlags.isPresent()) { + String fixedFlag = "--log.frame-log-root.default_os=" + + StringUtils.substringAfter(deprecatedFlag.get(), "="); + args = Stream + .concat(Arrays.stream(args).filter(arg -> !arg.startsWith("--log.frame-log-root=")), + Stream.of(fixedFlag)) + .toArray(String[]::new); + } } + return args; + } + + public static void main(String[] args) { + // Cuebot startup + String[] filteredArgs = checkArgs(args); + SpringApplication.run(CuebotApplication.class, filteredArgs); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/DeedEntity.java b/cuebot/src/main/java/com/imageworks/spcue/DeedEntity.java index fe518ef20..a0d79622a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/DeedEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/DeedEntity.java @@ -2,31 +2,26 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public class DeedEntity extends Entity { - public String owner; - public String host; - public String show; + public String owner; + public String host; + public String show; - public String getName() { - return String.format("%s.%s", owner, host); - } + public String getName() { + return String.format("%s.%s", owner, host); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/DepartmentEntity.java b/cuebot/src/main/java/com/imageworks/spcue/DepartmentEntity.java index fbe8bfad4..ed077e3bd 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/DepartmentEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/DepartmentEntity.java @@ -2,28 +2,23 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public class DepartmentEntity extends Entity implements DepartmentInterface { - public String getDepartmentId() { - return id; - } + public String getDepartmentId() { + return id; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/DepartmentInterface.java b/cuebot/src/main/java/com/imageworks/spcue/DepartmentInterface.java index a1fa79ab4..03e5ffab7 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/DepartmentInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/DepartmentInterface.java @@ -2,25 +2,20 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public interface DepartmentInterface extends EntityInterface { - public String getDepartmentId(); + public String getDepartmentId(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/DependInterface.java b/cuebot/src/main/java/com/imageworks/spcue/DependInterface.java index a6895d137..e631bc200 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/DependInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/DependInterface.java @@ -2,24 +2,19 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public interface DependInterface extends EntityInterface { } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/DependencyManagerException.java b/cuebot/src/main/java/com/imageworks/spcue/DependencyManagerException.java index 59bc959f8..2f750c14e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/DependencyManagerException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/DependencyManagerException.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import org.springframework.core.NestedRuntimeException; @@ -24,15 +20,14 @@ @SuppressWarnings("serial") public class DependencyManagerException extends NestedRuntimeException { - public DependencyManagerException(String arg0) { - super(arg0); - // TODO Auto-generated constructor stub - } + public DependencyManagerException(String arg0) { + super(arg0); + // TODO Auto-generated constructor stub + } - public DependencyManagerException(String arg0, Throwable arg1) { - super(arg0, arg1); - // TODO Auto-generated constructor stub - } + public DependencyManagerException(String arg0, Throwable arg1) { + super(arg0, arg1); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/DispatchFrame.java b/cuebot/src/main/java/com/imageworks/spcue/DispatchFrame.java index 1bd3806a9..9e5c856b6 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/DispatchFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/DispatchFrame.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import java.util.Optional; @@ -26,51 +22,50 @@ public class DispatchFrame extends FrameEntity implements FrameInterface { - public int retries; - public FrameState state; + public int retries; + public FrameState state; - public String show; - public String shot; - public String owner; - public Optional uid; - public String logDir; - public String command; - public String range; - public int chunkSize; + public String show; + public String shot; + public String owner; + public Optional uid; + public String logDir; + public String command; + public String range; + public int chunkSize; - public String layerName; - public String jobName; + public String layerName; + public String jobName; - public int minCores; - public int maxCores; - public boolean threadable; - public int minGpus; - public int maxGpus; - public long minGpuMemory; + public int minCores; + public int maxCores; + public boolean threadable; + public int minGpus; + public int maxGpus; + public long minGpuMemory; - // A comma separated list of services - public String services; + // A comma separated list of services + public String services; - // The Operational System this frame is expected to run in - public String os; + // The Operational System this frame is expected to run in + public String os; - // Memory requirement for this frame in bytes - private long minMemory; + // Memory requirement for this frame in bytes + private long minMemory; - // Soft limit to be enforced for this frame in bytes - public long softMemoryLimit; + // Soft limit to be enforced for this frame in bytes + public long softMemoryLimit; - // Hard limit to be enforced for this frame in bytes - public long hardMemoryLimit; + // Hard limit to be enforced for this frame in bytes + public long hardMemoryLimit; - public void setMinMemory(long minMemory) { - this.minMemory = minMemory; - this.softMemoryLimit = (long)(((double)minMemory) * Dispatcher.SOFT_MEMORY_MULTIPLIER); - this.hardMemoryLimit = (long)(((double)minMemory) * Dispatcher.HARD_MEMORY_MULTIPLIER); - } + public void setMinMemory(long minMemory) { + this.minMemory = minMemory; + this.softMemoryLimit = (long) (((double) minMemory) * Dispatcher.SOFT_MEMORY_MULTIPLIER); + this.hardMemoryLimit = (long) (((double) minMemory) * Dispatcher.HARD_MEMORY_MULTIPLIER); + } - public long getMinMemory() { - return this.minMemory; - } + public long getMinMemory() { + return this.minMemory; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/DispatchHost.java b/cuebot/src/main/java/com/imageworks/spcue/DispatchHost.java index 40a3e6bbc..37060a244 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/DispatchHost.java +++ b/cuebot/src/main/java/com/imageworks/spcue/DispatchHost.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import com.imageworks.spcue.dispatcher.ResourceContainer; @@ -30,167 +26,164 @@ public class DispatchHost extends Entity implements HostInterface, FacilityInterface, ResourceContainer { - private static final Logger logger = LogManager.getLogger(DispatchHost.class); - - public String facilityId; - public String allocationId; - public LockState lockState; - public HardwareState hardwareState; - - public int cores; - public int idleCores; - - public int gpus; - public int idleGpus; - - // Basically an 0 = auto, 1 = all. - public int threadMode; - - public long memory; - public long idleMemory; - public long gpuMemory; - public long idleGpuMemory; - public String tags; - private String os; - - public boolean isNimby; - public boolean isLocalDispatch = false; - - /** - * Number of cores that will be added to the first proc - * booked to this host. - */ - public int strandedCores = 0; - public int strandedGpus = 0; - - // To reserve resources for future gpu job - long idleMemoryOrig = 0; - int idleCoresOrig = 0; - long idleGpuMemoryOrig = 0; - int idleGpusOrig = 0; - - public String getHostId() { - return id; - } - - public String getAllocationId() { - return allocationId; - } - - public String getFacilityId() { - return facilityId; + private static final Logger logger = LogManager.getLogger(DispatchHost.class); + + public String facilityId; + public String allocationId; + public LockState lockState; + public HardwareState hardwareState; + + public int cores; + public int idleCores; + + public int gpus; + public int idleGpus; + + // Basically an 0 = auto, 1 = all. + public int threadMode; + + public long memory; + public long idleMemory; + public long gpuMemory; + public long idleGpuMemory; + public String tags; + private String os; + + public boolean isNimby; + public boolean isLocalDispatch = false; + + /** + * Number of cores that will be added to the first proc booked to this host. + */ + public int strandedCores = 0; + public int strandedGpus = 0; + + // To reserve resources for future gpu job + long idleMemoryOrig = 0; + int idleCoresOrig = 0; + long idleGpuMemoryOrig = 0; + int idleGpusOrig = 0; + + public String getHostId() { + return id; + } + + public String getAllocationId() { + return allocationId; + } + + public String getFacilityId() { + return facilityId; + } + + public String[] getOs() { + return this.os.split(","); + } + + public void setOs(String os) { + this.os = os; + } + + public boolean canHandleNegativeCoresRequest(int requestedCores) { + // Request is positive, no need to test further. + if (requestedCores > 0) { + logger.debug(getName() + " can handle the job with " + requestedCores + " cores."); + return true; } - - public String[] getOs() { - return this.os.split(","); - } - - public void setOs(String os) { - this.os = os; + // All cores are available, validate the request. + if (cores == idleCores) { + logger.debug(getName() + " can handle the job with " + requestedCores + " cores."); + return true; } - - public boolean canHandleNegativeCoresRequest(int requestedCores) { - // Request is positive, no need to test further. - if (requestedCores > 0) { - logger.debug(getName() + " can handle the job with " + requestedCores + " cores."); - return true; - } - // All cores are available, validate the request. - if (cores == idleCores) { - logger.debug(getName() + " can handle the job with " + requestedCores + " cores."); - return true; - } - // Some or all cores are busy, avoid booking again. - logger.debug(getName() + " cannot handle the job with " + requestedCores + " cores."); - return false; + // Some or all cores are busy, avoid booking again. + logger.debug(getName() + " cannot handle the job with " + requestedCores + " cores."); + return false; + } + + public int handleNegativeCoresRequirement(int requestedCores) { + // If we request a <=0 amount of cores, return positive core count. + // Request -2 on a 24 core machine will return 22. + + if (requestedCores > 0) { + // Do not process positive core requests. + logger.debug("Requested " + requestedCores + " cores."); + return requestedCores; } - - public int handleNegativeCoresRequirement(int requestedCores) { - // If we request a <=0 amount of cores, return positive core count. - // Request -2 on a 24 core machine will return 22. - - if (requestedCores > 0) { - // Do not process positive core requests. - logger.debug("Requested " + requestedCores + " cores."); - return requestedCores; - } - if (requestedCores <=0 && idleCores < cores) { - // If request is negative but cores are already used, return 0. - // We don't want to overbook the host. - logger.debug("Requested " + requestedCores + " cores, but the host is busy and cannot book more jobs."); - return 0; - } - // Book all cores minus the request - int totalCores = idleCores + requestedCores; - logger.debug("Requested " + requestedCores + " cores <= 0, " + - idleCores + " cores are free, booking " + totalCores + " cores"); - return totalCores; + if (requestedCores <= 0 && idleCores < cores) { + // If request is negative but cores are already used, return 0. + // We don't want to overbook the host. + logger.debug("Requested " + requestedCores + + " cores, but the host is busy and cannot book more jobs."); + return 0; } - - @Override - public boolean hasAdditionalResources(int minCores, long minMemory, int minGpus, long minGpuMemory) { - minCores = handleNegativeCoresRequirement(minCores); - if (idleCores < minCores) { - return false; - } - if (minCores <= 0) { - return false; - } - else if (idleMemory < minMemory) { - return false; - } - else if (idleGpus < minGpus) { - return false; - } - else if (idleGpuMemory < minGpuMemory) { - return false; - } - - return true; + // Book all cores minus the request + int totalCores = idleCores + requestedCores; + logger.debug("Requested " + requestedCores + " cores <= 0, " + idleCores + + " cores are free, booking " + totalCores + " cores"); + return totalCores; + } + + @Override + public boolean hasAdditionalResources(int minCores, long minMemory, int minGpus, + long minGpuMemory) { + minCores = handleNegativeCoresRequirement(minCores); + if (idleCores < minCores) { + return false; } - - @Override - public void useResources(int coreUnits, long memory, int gpuUnits, long gpuMemory) { - idleCores = idleCores - coreUnits; - idleMemory = idleMemory - memory; - idleGpus = idleGpus - gpuUnits; - idleGpuMemory = idleGpuMemory - gpuMemory; + if (minCores <= 0) { + return false; + } else if (idleMemory < minMemory) { + return false; + } else if (idleGpus < minGpus) { + return false; + } else if (idleGpuMemory < minGpuMemory) { + return false; } - /** - * If host has idle gpu, remove enough resources to book a gpu frame later. - * - */ - public void removeGpu() { - if (idleGpuMemory > 0 && idleGpuMemoryOrig == 0) { - idleMemoryOrig = idleMemory; - idleCoresOrig = idleCores; - idleGpuMemoryOrig = idleGpuMemory; - idleGpusOrig = idleGpus; - - idleMemory = idleMemory - Math.min(CueUtil.GB4, idleMemory); - idleCores = idleCores - Math.min(100, idleCores); - idleGpuMemory = idleGpuMemory - Math.min(CueUtil.GB4, idleGpuMemory); - idleGpus = idleGpus - Math.min(1, idleGpus); - } + return true; + } + + @Override + public void useResources(int coreUnits, long memory, int gpuUnits, long gpuMemory) { + idleCores = idleCores - coreUnits; + idleMemory = idleMemory - memory; + idleGpus = idleGpus - gpuUnits; + idleGpuMemory = idleGpuMemory - gpuMemory; + } + + /** + * If host has idle gpu, remove enough resources to book a gpu frame later. + * + */ + public void removeGpu() { + if (idleGpuMemory > 0 && idleGpuMemoryOrig == 0) { + idleMemoryOrig = idleMemory; + idleCoresOrig = idleCores; + idleGpuMemoryOrig = idleGpuMemory; + idleGpusOrig = idleGpus; + + idleMemory = idleMemory - Math.min(CueUtil.GB4, idleMemory); + idleCores = idleCores - Math.min(100, idleCores); + idleGpuMemory = idleGpuMemory - Math.min(CueUtil.GB4, idleGpuMemory); + idleGpus = idleGpus - Math.min(1, idleGpus); } - - /** - * If host had idle gpu removed, restore the host to the origional state. - * - */ - public void restoreGpu() { - if (idleGpuMemoryOrig > 0) { - idleMemory = idleMemoryOrig; - idleCores = idleCoresOrig; - idleGpuMemory = idleGpuMemoryOrig; - idleGpus = idleGpusOrig; - - idleMemoryOrig = 0; - idleCoresOrig = 0; - idleGpuMemoryOrig = 0; - idleGpusOrig = 0; - } + } + + /** + * If host had idle gpu removed, restore the host to the origional state. + * + */ + public void restoreGpu() { + if (idleGpuMemoryOrig > 0) { + idleMemory = idleMemoryOrig; + idleCores = idleCoresOrig; + idleGpuMemory = idleGpuMemoryOrig; + idleGpus = idleGpusOrig; + + idleMemoryOrig = 0; + idleCoresOrig = 0; + idleGpuMemoryOrig = 0; + idleGpusOrig = 0; } + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/DispatchJob.java b/cuebot/src/main/java/com/imageworks/spcue/DispatchJob.java index 5684cbfab..347565b10 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/DispatchJob.java +++ b/cuebot/src/main/java/com/imageworks/spcue/DispatchJob.java @@ -2,31 +2,26 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import com.imageworks.spcue.grpc.job.JobState; public class DispatchJob extends JobEntity implements JobInterface { - public int maxRetries; - public boolean paused; - public boolean autoEat; - public boolean autoBook; - public boolean autoUnbook; - public JobState state; + public int maxRetries; + public boolean paused; + public boolean autoEat; + public boolean autoBook; + public boolean autoUnbook; + public JobState state; } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/Entity.java b/cuebot/src/main/java/com/imageworks/spcue/Entity.java index d45b37ab2..b216893f6 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/Entity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/Entity.java @@ -2,71 +2,66 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public class Entity implements EntityInterface { - public String id = null; - public String name = "unknown"; + public String id = null; + public String name = "unknown"; - public Entity() { } + public Entity() {} - public Entity(String id) { - this.id = id; - } + public Entity(String id) { + this.id = id; + } - public Entity(String id, String name) { - this.id = id; - this.name = name; - } + public Entity(String id, String name) { + this.id = id; + this.name = name; + } - public String getId() { - return id; - } - public String getName() { - return name; - } + public String getId() { + return id; + } - public boolean isNew() { - return id == null; - } + public String getName() { + return name; + } - @Override - public String toString() { - return String.format("%s/%s", getName(), getId()); - } + public boolean isNew() { + return id == null; + } - @Override - public int hashCode() { - if (id != null) { - return id.hashCode(); - } - else { - return super.hashCode(); - } + @Override + public String toString() { + return String.format("%s/%s", getName(), getId()); + } + + @Override + public int hashCode() { + if (id != null) { + return id.hashCode(); + } else { + return super.hashCode(); } + } - @Override - public boolean equals(Object other) { - if (other == null) { - return false; - } - return this.toString().equals(other.toString()); + @Override + public boolean equals(Object other) { + if (other == null) { + return false; } + return this.toString().equals(other.toString()); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/EntityCreationError.java b/cuebot/src/main/java/com/imageworks/spcue/EntityCreationError.java index bbe51cf18..6c1a243f8 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/EntityCreationError.java +++ b/cuebot/src/main/java/com/imageworks/spcue/EntityCreationError.java @@ -2,49 +2,44 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; @SuppressWarnings("serial") public class EntityCreationError extends EntityException { - public EntityCreationError() { - // TODO Auto-generated constructor stub - } + public EntityCreationError() { + // TODO Auto-generated constructor stub + } - public EntityCreationError(String message, EntityInterface entity) { - super("failed to create entity of type: " + entity.getClass() - + " with name: " + entity.getName() + " ," + message, entity); - } + public EntityCreationError(String message, EntityInterface entity) { + super("failed to create entity of type: " + entity.getClass() + " with name: " + + entity.getName() + " ," + message, entity); + } - public EntityCreationError(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public EntityCreationError(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public EntityCreationError(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public EntityCreationError(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public EntityCreationError(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public EntityCreationError(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/EntityException.java b/cuebot/src/main/java/com/imageworks/spcue/EntityException.java index 802d6c219..463ec588c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/EntityException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/EntityException.java @@ -2,52 +2,47 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; @SuppressWarnings("serial") public class EntityException extends SpcueRuntimeException { - @SuppressWarnings("unused") - private EntityInterface entity; + @SuppressWarnings("unused") + private EntityInterface entity; - public EntityException() { - // TODO Auto-generated constructor stub - } + public EntityException() { + // TODO Auto-generated constructor stub + } - public EntityException(String message, EntityInterface e) { - super(message); - entity = e; - } + public EntityException(String message, EntityInterface e) { + super(message); + entity = e; + } - public EntityException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public EntityException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public EntityException(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public EntityException(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public EntityException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public EntityException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/EntityInterface.java b/cuebot/src/main/java/com/imageworks/spcue/EntityInterface.java index 47660eb8c..a9fbd0765 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/EntityInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/EntityInterface.java @@ -2,26 +2,22 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public interface EntityInterface { - public String getName(); - public String getId(); -} + public String getName(); + public String getId(); +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/EntityModificationError.java b/cuebot/src/main/java/com/imageworks/spcue/EntityModificationError.java index 87bbb8ff5..fb347f3d6 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/EntityModificationError.java +++ b/cuebot/src/main/java/com/imageworks/spcue/EntityModificationError.java @@ -2,49 +2,44 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; @SuppressWarnings("serial") public class EntityModificationError extends EntityException { - public EntityModificationError() { - // TODO Auto-generated constructor stub - } + public EntityModificationError() { + // TODO Auto-generated constructor stub + } - public EntityModificationError(String message, EntityInterface e) { - super(message, e); - // TODO Auto-generated constructor stub - } + public EntityModificationError(String message, EntityInterface e) { + super(message, e); + // TODO Auto-generated constructor stub + } - public EntityModificationError(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public EntityModificationError(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public EntityModificationError(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public EntityModificationError(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public EntityModificationError(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public EntityModificationError(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/EntityNotFoundException.java b/cuebot/src/main/java/com/imageworks/spcue/EntityNotFoundException.java index 8ec27a5ae..f463904c4 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/EntityNotFoundException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/EntityNotFoundException.java @@ -1,36 +1,31 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue; - import org.springframework.core.NestedRuntimeException; - public class EntityNotFoundException extends NestedRuntimeException { - private static final long serialVersionUID = -5612998213656259822L; + private static final long serialVersionUID = -5612998213656259822L; - public EntityNotFoundException(String message) { - super(message); - } + public EntityNotFoundException(String message) { + super(message); + } - public EntityNotFoundException(String message, Throwable cause) { - super(message, cause); - } + public EntityNotFoundException(String message, Throwable cause) { + super(message, cause); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/EntityRemovalError.java b/cuebot/src/main/java/com/imageworks/spcue/EntityRemovalError.java index a7dfc48d3..31379fbaf 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/EntityRemovalError.java +++ b/cuebot/src/main/java/com/imageworks/spcue/EntityRemovalError.java @@ -2,49 +2,44 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; @SuppressWarnings("serial") public class EntityRemovalError extends EntityException { - public EntityRemovalError() { - // TODO Auto-generated constructor stub - } + public EntityRemovalError() { + // TODO Auto-generated constructor stub + } - public EntityRemovalError(String message, EntityInterface entity) { - super("failed to create entity of type: " + entity.getClass() - + " with name: " + entity.getName() + " ," + message, entity); - } + public EntityRemovalError(String message, EntityInterface entity) { + super("failed to create entity of type: " + entity.getClass() + " with name: " + + entity.getName() + " ," + message, entity); + } - public EntityRemovalError(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public EntityRemovalError(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public EntityRemovalError(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public EntityRemovalError(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public EntityRemovalError(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public EntityRemovalError(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/EntityRetrievalException.java b/cuebot/src/main/java/com/imageworks/spcue/EntityRetrievalException.java index cd5eb5633..9ae399952 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/EntityRetrievalException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/EntityRetrievalException.java @@ -2,44 +2,39 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; @SuppressWarnings("serial") public class EntityRetrievalException extends RuntimeException { - public EntityRetrievalException() { - // TODO Auto-generated constructor stub - } + public EntityRetrievalException() { + // TODO Auto-generated constructor stub + } - public EntityRetrievalException(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public EntityRetrievalException(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public EntityRetrievalException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public EntityRetrievalException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } - public EntityRetrievalException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public EntityRetrievalException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/ExecutionSummary.java b/cuebot/src/main/java/com/imageworks/spcue/ExecutionSummary.java index afe85121a..87a499060 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ExecutionSummary.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ExecutionSummary.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; /** @@ -24,77 +20,76 @@ */ public class ExecutionSummary { - public double standardDeviation; - public long coreTime; - public long coreTimeSuccess; - public long coreTimeFail; - public long gpuTime; - public long gpuTimeSuccess; - public long gpuTimeFail; - public long highMemoryKb; - - public long getHighMemoryKb() { - return highMemoryKb; - } - - public void setHighMemoryKb(long highMemoryKb) { - this.highMemoryKb = highMemoryKb; - } - - public double getStandardDeviation() { - return standardDeviation; - } - - public void setStandardDeviation(double standardDeviation) { - this.standardDeviation = standardDeviation; - } - - public long getCoreTime() { - return coreTime; - } - - public void setCoreTime(long coreTime) { - this.coreTime = coreTime; - } - - public long getCoreTimeSuccess() { - return coreTimeSuccess; - } - - public void setCoreTimeSuccess(long coreTimeSuccess) { - this.coreTimeSuccess = coreTimeSuccess; - } - - public long getCoreTimeFail() { - return coreTimeFail; - } - - public void setCoreTimeFail(long coreTimeFail) { - this.coreTimeFail = coreTimeFail; - } - - public long getGpuTime() { - return gpuTime; - } - - public void setGpuTime(long gpuTime) { - this.gpuTime = gpuTime; - } - - public long getGpuTimeSuccess() { - return gpuTimeSuccess; - } - - public void setGpuTimeSuccess(long gpuTimeSuccess) { - this.gpuTimeSuccess = gpuTimeSuccess; - } - - public long getGpuTimeFail() { - return gpuTimeFail; - } - - public void setGpuTimeFail(long gpuTimeFail) { - this.gpuTimeFail = gpuTimeFail; - } + public double standardDeviation; + public long coreTime; + public long coreTimeSuccess; + public long coreTimeFail; + public long gpuTime; + public long gpuTimeSuccess; + public long gpuTimeFail; + public long highMemoryKb; + + public long getHighMemoryKb() { + return highMemoryKb; + } + + public void setHighMemoryKb(long highMemoryKb) { + this.highMemoryKb = highMemoryKb; + } + + public double getStandardDeviation() { + return standardDeviation; + } + + public void setStandardDeviation(double standardDeviation) { + this.standardDeviation = standardDeviation; + } + + public long getCoreTime() { + return coreTime; + } + + public void setCoreTime(long coreTime) { + this.coreTime = coreTime; + } + + public long getCoreTimeSuccess() { + return coreTimeSuccess; + } + + public void setCoreTimeSuccess(long coreTimeSuccess) { + this.coreTimeSuccess = coreTimeSuccess; + } + + public long getCoreTimeFail() { + return coreTimeFail; + } + + public void setCoreTimeFail(long coreTimeFail) { + this.coreTimeFail = coreTimeFail; + } + + public long getGpuTime() { + return gpuTime; + } + + public void setGpuTime(long gpuTime) { + this.gpuTime = gpuTime; + } + + public long getGpuTimeSuccess() { + return gpuTimeSuccess; + } + + public void setGpuTimeSuccess(long gpuTimeSuccess) { + this.gpuTimeSuccess = gpuTimeSuccess; + } + + public long getGpuTimeFail() { + return gpuTimeFail; + } + + public void setGpuTimeFail(long gpuTimeFail) { + this.gpuTimeFail = gpuTimeFail; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/FacilityEntity.java b/cuebot/src/main/java/com/imageworks/spcue/FacilityEntity.java index b9d26d1f8..14d1db864 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/FacilityEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/FacilityEntity.java @@ -2,33 +2,28 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public class FacilityEntity extends Entity implements FacilityInterface { - public FacilityEntity() {} + public FacilityEntity() {} - public FacilityEntity(String id) { - this.id = id; - } + public FacilityEntity(String id) { + this.id = id; + } - public String getFacilityId() { - return id; - } + public String getFacilityId() { + return id; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/FacilityInterface.java b/cuebot/src/main/java/com/imageworks/spcue/FacilityInterface.java index 0a83cd58b..26cd1abe0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/FacilityInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/FacilityInterface.java @@ -2,26 +2,21 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public interface FacilityInterface extends EntityInterface { - public String getFacilityId(); + public String getFacilityId(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/FilterEntity.java b/cuebot/src/main/java/com/imageworks/spcue/FilterEntity.java index 22bea4b46..3c30f8b9c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/FilterEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/FilterEntity.java @@ -2,47 +2,42 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import com.imageworks.spcue.grpc.filter.FilterType; public class FilterEntity extends Entity implements FilterInterface { - public FilterType type; - public String showId; - public boolean enabled; - public float order; + public FilterType type; + public String showId; + public boolean enabled; + public float order; - public String getId() { - return id; - } + public String getId() { + return id; + } - public String getName() { - return name; - } + public String getName() { + return name; + } - public String getFilterId() { - return id; - } + public String getFilterId() { + return id; + } - public String getShowId() { - return showId; - } + public String getShowId() { + return showId; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/FilterInterface.java b/cuebot/src/main/java/com/imageworks/spcue/FilterInterface.java index 1c6441fec..0805749ab 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/FilterInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/FilterInterface.java @@ -2,26 +2,21 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public interface FilterInterface extends ShowInterface { - public String getFilterId(); + public String getFilterId(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/FrameDetail.java b/cuebot/src/main/java/com/imageworks/spcue/FrameDetail.java index 891523805..cb95b341f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/FrameDetail.java +++ b/cuebot/src/main/java/com/imageworks/spcue/FrameDetail.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import java.sql.Timestamp; @@ -25,18 +21,17 @@ public class FrameDetail extends FrameEntity implements FrameInterface { - public FrameState state; - public int number; - public int dependCount; - public int retryCount; - public int exitStatus; - public long maxRss; - public int dispatchOrder; - public String lastResource; - - public Timestamp dateStarted; - public Timestamp dateStopped; - public Timestamp dateUpdated; - public Timestamp dateLLU; + public FrameState state; + public int number; + public int dependCount; + public int retryCount; + public int exitStatus; + public long maxRss; + public int dispatchOrder; + public String lastResource; + + public Timestamp dateStarted; + public Timestamp dateStopped; + public Timestamp dateUpdated; + public Timestamp dateLLU; } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/FrameEntity.java b/cuebot/src/main/java/com/imageworks/spcue/FrameEntity.java index 8ba4bee88..eb9a3d080 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/FrameEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/FrameEntity.java @@ -2,44 +2,39 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public class FrameEntity extends LayerEntity implements FrameInterface { - public String layerId; - public int version; + public String layerId; + public int version; - public FrameEntity() {} + public FrameEntity() {} - public FrameEntity(String id) { - this.id = id; - } + public FrameEntity(String id) { + this.id = id; + } - public String getFrameId() { - return id; - } + public String getFrameId() { + return id; + } - public String getLayerId() { - return layerId; - } + public String getLayerId() { + return layerId; + } - public int getVersion() { - return version; - } + public int getVersion() { + return version; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/FrameInterface.java b/cuebot/src/main/java/com/imageworks/spcue/FrameInterface.java index 945685444..d60325a7c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/FrameInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/FrameInterface.java @@ -1,35 +1,28 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public interface FrameInterface extends LayerInterface { - public String getFrameId(); + public String getFrameId(); - /** - * To change a frame state, you must have the same version of the frame that - * exists in the DB. If the version changes, any operation that changes the - * state will fail. - * - * @return the time stamp that represents the last time this frame was - * updated. - */ - public int getVersion(); + /** + * To change a frame state, you must have the same version of the frame that exists in the DB. If + * the version changes, any operation that changes the state will fail. + * + * @return the time stamp that represents the last time this frame was updated. + */ + public int getVersion(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/FrameStateTotals.java b/cuebot/src/main/java/com/imageworks/spcue/FrameStateTotals.java index cf8c96d58..fa9e33c17 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/FrameStateTotals.java +++ b/cuebot/src/main/java/com/imageworks/spcue/FrameStateTotals.java @@ -2,95 +2,90 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public class FrameStateTotals { - public int waiting = 0; - public int running = 0; - public int depend = 0; - public int dead = 0; - public int eaten = 0; - public int succeeded = 0; - public int total = 0; - public int checkpoint = 0; - - public int getWaiting() { - return waiting; - } - - public void setWaiting(int waiting) { - this.waiting = waiting; - } - - public int getRunning() { - return running; - } - - public void setRunning(int running) { - this.running = running; - } - - public int getDepend() { - return depend; - } - - public void setDepend(int depend) { - this.depend = depend; - } - - public int getDead() { - return dead; - } - - public void setDead(int dead) { - this.dead = dead; - } - - public int getEaten() { - return eaten; - } - - public void setEaten(int eaten) { - this.eaten = eaten; - } - - public int getSucceeded() { - return succeeded; - } - - public void setSucceeded(int succeeded) { - this.succeeded = succeeded; - } - - public int getTotal() { - return total; - } - - public void setTotal(int total) { - this.total = total; - } - - public int getCheckpoint() { - return checkpoint; - } - - public void setCheckpoint(int checkpoint) { - this.checkpoint = checkpoint; - } + public int waiting = 0; + public int running = 0; + public int depend = 0; + public int dead = 0; + public int eaten = 0; + public int succeeded = 0; + public int total = 0; + public int checkpoint = 0; + + public int getWaiting() { + return waiting; + } + + public void setWaiting(int waiting) { + this.waiting = waiting; + } + + public int getRunning() { + return running; + } + + public void setRunning(int running) { + this.running = running; + } + + public int getDepend() { + return depend; + } + + public void setDepend(int depend) { + this.depend = depend; + } + + public int getDead() { + return dead; + } + + public void setDead(int dead) { + this.dead = dead; + } + + public int getEaten() { + return eaten; + } + + public void setEaten(int eaten) { + this.eaten = eaten; + } + + public int getSucceeded() { + return succeeded; + } + + public void setSucceeded(int succeeded) { + this.succeeded = succeeded; + } + + public int getTotal() { + return total; + } + + public void setTotal(int total) { + this.total = total; + } + + public int getCheckpoint() { + return checkpoint; + } + + public void setCheckpoint(int checkpoint) { + this.checkpoint = checkpoint; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/GroupDetail.java b/cuebot/src/main/java/com/imageworks/spcue/GroupDetail.java index cd9f8a998..1d4c468d2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/GroupDetail.java +++ b/cuebot/src/main/java/com/imageworks/spcue/GroupDetail.java @@ -2,53 +2,48 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public class GroupDetail extends Entity implements GroupInterface, DepartmentInterface { - public int jobMinCores = -1; - public int jobMaxCores = -1; - public int jobMinGpus = -1; - public int jobMaxGpus = -1; - public int jobPriority = -1; + public int jobMinCores = -1; + public int jobMaxCores = -1; + public int jobMinGpus = -1; + public int jobMaxGpus = -1; + public int jobPriority = -1; - public int minCores = -1; - public int maxCores = -1; + public int minCores = -1; + public int maxCores = -1; - public int minGpus = -1; - public int maxGpus = -1; + public int minGpus = -1; + public int maxGpus = -1; - public String parentId = null; - public String showId; - public String deptId; + public String parentId = null; + public String showId; + public String deptId; - @Override - public String getShowId() { - return showId; - } + @Override + public String getShowId() { + return showId; + } - public String getGroupId() { - return id; - } + public String getGroupId() { + return id; + } - @Override - public String getDepartmentId() { - return deptId; - } + @Override + public String getDepartmentId() { + return deptId; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/GroupEntity.java b/cuebot/src/main/java/com/imageworks/spcue/GroupEntity.java index ffcb2ac02..49d81e967 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/GroupEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/GroupEntity.java @@ -2,38 +2,31 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public class GroupEntity extends Entity implements GroupInterface { - public String showId; - - @Override - public String getGroupId() { - return id; - } - - @Override - public String getShowId() { - return showId; - } + public String showId; + @Override + public String getGroupId() { + return id; + } + @Override + public String getShowId() { + return showId; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/GroupInterface.java b/cuebot/src/main/java/com/imageworks/spcue/GroupInterface.java index f5ae74549..a00118941 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/GroupInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/GroupInterface.java @@ -2,26 +2,21 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public interface GroupInterface extends ShowInterface { - public String getGroupId(); + public String getGroupId(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/HistoricalJobTransferException.java b/cuebot/src/main/java/com/imageworks/spcue/HistoricalJobTransferException.java index bbe557a64..eafe9701f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/HistoricalJobTransferException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/HistoricalJobTransferException.java @@ -2,44 +2,39 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; @SuppressWarnings("serial") public class HistoricalJobTransferException extends SpcueRuntimeException { - public HistoricalJobTransferException() { - // TODO Auto-generated constructor stub - } + public HistoricalJobTransferException() { + // TODO Auto-generated constructor stub + } - public HistoricalJobTransferException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public HistoricalJobTransferException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public HistoricalJobTransferException(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public HistoricalJobTransferException(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public HistoricalJobTransferException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public HistoricalJobTransferException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/HostConfigurationErrorException.java b/cuebot/src/main/java/com/imageworks/spcue/HostConfigurationErrorException.java index 8147659b0..a7eb2b393 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/HostConfigurationErrorException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/HostConfigurationErrorException.java @@ -2,45 +2,40 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; @SuppressWarnings("serial") public class HostConfigurationErrorException extends SpcueRuntimeException { - public HostConfigurationErrorException() { - super(); - // TODO Auto-generated constructor stub - } + public HostConfigurationErrorException() { + super(); + // TODO Auto-generated constructor stub + } - public HostConfigurationErrorException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public HostConfigurationErrorException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public HostConfigurationErrorException(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public HostConfigurationErrorException(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public HostConfigurationErrorException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public HostConfigurationErrorException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/HostEntity.java b/cuebot/src/main/java/com/imageworks/spcue/HostEntity.java index 5a019e8f1..172d5593e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/HostEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/HostEntity.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import java.util.Date; @@ -27,56 +23,55 @@ public class HostEntity extends Entity implements HostInterface { - public String facilityId; - public String allocId; - public HardwareState state; - public LockState lockState; - public boolean nimbyEnabled; - - public int procs; - public int cores; - public int idleCores; - public long memory; - public long idleMemory; - public int gpus; - public int idleGpus; - public long gpuMemory; - public long idleGpuMemory; - - public boolean unlockAtBoot; - - public Date dateCreated; - public Date datePinged; - public Date dateBooted; - - public HostEntity() {} - - public HostEntity(Host grpcHost) { - this.id = grpcHost.getId(); - this.allocId = grpcHost.getAllocName(); - this.state = grpcHost.getState(); - this.lockState = grpcHost.getLockState(); - this.nimbyEnabled = grpcHost.getNimbyEnabled(); - this.cores = (int) grpcHost.getCores(); - this.idleCores = (int) grpcHost.getIdleCores(); - this.memory = grpcHost.getMemory(); - this.idleMemory = grpcHost.getIdleMemory(); - this.gpus = (int) grpcHost.getGpus(); - this.idleGpus = (int) grpcHost.getIdleGpus(); - this.gpuMemory = grpcHost.getGpuMemory(); - this.idleGpuMemory = grpcHost.getIdleGpuMemory(); - } - - public String getHostId() { - return id; - } - - public String getAllocationId() { - return allocId; - } - - public String getFacilityId() { - return facilityId; - } + public String facilityId; + public String allocId; + public HardwareState state; + public LockState lockState; + public boolean nimbyEnabled; + + public int procs; + public int cores; + public int idleCores; + public long memory; + public long idleMemory; + public int gpus; + public int idleGpus; + public long gpuMemory; + public long idleGpuMemory; + + public boolean unlockAtBoot; + + public Date dateCreated; + public Date datePinged; + public Date dateBooted; + + public HostEntity() {} + + public HostEntity(Host grpcHost) { + this.id = grpcHost.getId(); + this.allocId = grpcHost.getAllocName(); + this.state = grpcHost.getState(); + this.lockState = grpcHost.getLockState(); + this.nimbyEnabled = grpcHost.getNimbyEnabled(); + this.cores = (int) grpcHost.getCores(); + this.idleCores = (int) grpcHost.getIdleCores(); + this.memory = grpcHost.getMemory(); + this.idleMemory = grpcHost.getIdleMemory(); + this.gpus = (int) grpcHost.getGpus(); + this.idleGpus = (int) grpcHost.getIdleGpus(); + this.gpuMemory = grpcHost.getGpuMemory(); + this.idleGpuMemory = grpcHost.getIdleGpuMemory(); + } + + public String getHostId() { + return id; + } + + public String getAllocationId() { + return allocId; + } + + public String getFacilityId() { + return facilityId; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/HostInterface.java b/cuebot/src/main/java/com/imageworks/spcue/HostInterface.java index 5e875652a..f3fff15e1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/HostInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/HostInterface.java @@ -2,24 +2,19 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public interface HostInterface extends AllocationInterface { - String getHostId(); + String getHostId(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/Inherit.java b/cuebot/src/main/java/com/imageworks/spcue/Inherit.java index 1fdb23336..b368e2380 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/Inherit.java +++ b/cuebot/src/main/java/com/imageworks/spcue/Inherit.java @@ -2,34 +2,23 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; /** - * An enumeration of possible things a job can - * inherit from a group. + * An enumeration of possible things a job can inherit from a group. * */ public enum Inherit { - Priority, - MinCores, - MaxCores, - MinGpus, - MaxGpus, - All + Priority, MinCores, MaxCores, MinGpus, MaxGpus, All } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/JobDetail.java b/cuebot/src/main/java/com/imageworks/spcue/JobDetail.java index dad6f8a6d..b5a7a3e78 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/JobDetail.java +++ b/cuebot/src/main/java/com/imageworks/spcue/JobDetail.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue; @@ -22,42 +20,41 @@ import com.imageworks.spcue.grpc.job.JobState; public class JobDetail extends JobEntity implements JobInterface, DepartmentInterface { - public String groupId; - public String deptId; - public JobState state; - public String shot; - public String user; - public String email; - public Optional uid; - public String logDir; - public boolean isPaused; - public boolean isAutoEat; - public int totalFrames; - public int totalLayers; - public int startTime; - public int stopTime; - public int maxRetries; - - public String os; - public String facilityName; - public String deptName; - public String showName; - - public int priority = 1; - public int minCoreUnits = 100; - public int maxCoreUnits = 200000; - public int minGpuUnits = 0; - public int maxGpuUnits = 1000; - public boolean isLocal = false; - public String localHostName; - public int localMaxCores; - public long localMaxMemory; - public int localThreadNumber; - public int localMaxGpus; - public long localMaxGpuMemory; - - public String getDepartmentId() { - return deptId; - } + public String groupId; + public String deptId; + public JobState state; + public String shot; + public String user; + public String email; + public Optional uid; + public String logDir; + public boolean isPaused; + public boolean isAutoEat; + public int totalFrames; + public int totalLayers; + public int startTime; + public int stopTime; + public int maxRetries; + + public String os; + public String facilityName; + public String deptName; + public String showName; + + public int priority = 1; + public int minCoreUnits = 100; + public int maxCoreUnits = 200000; + public int minGpuUnits = 0; + public int maxGpuUnits = 1000; + public boolean isLocal = false; + public String localHostName; + public int localMaxCores; + public long localMaxMemory; + public int localThreadNumber; + public int localMaxGpus; + public long localMaxGpuMemory; + + public String getDepartmentId() { + return deptId; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/JobDispatchException.java b/cuebot/src/main/java/com/imageworks/spcue/JobDispatchException.java index 2dc84cf92..d12c4b896 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/JobDispatchException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/JobDispatchException.java @@ -2,42 +2,37 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; @SuppressWarnings("serial") public class JobDispatchException extends SpcueRuntimeException { - public JobDispatchException() { - // TODO Auto-generated constructor stub - } + public JobDispatchException() { + // TODO Auto-generated constructor stub + } - public JobDispatchException(String message) { - super(message); - } + public JobDispatchException(String message) { + super(message); + } - public JobDispatchException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public JobDispatchException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public JobDispatchException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public JobDispatchException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/JobEntity.java b/cuebot/src/main/java/com/imageworks/spcue/JobEntity.java index eec806605..d9aec9e4b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/JobEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/JobEntity.java @@ -2,44 +2,39 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public class JobEntity extends Entity implements JobInterface { - public String showId; - public String facilityId; + public String showId; + public String facilityId; - public JobEntity() {} + public JobEntity() {} - public JobEntity(String id) { - this.id = id; - } + public JobEntity(String id) { + this.id = id; + } - public String getJobId() { - return id; - } + public String getJobId() { + return id; + } - public String getShowId() { - return showId; - } + public String getShowId() { + return showId; + } - public String getFacilityId() { - return facilityId; - } + public String getFacilityId() { + return facilityId; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/JobInterface.java b/cuebot/src/main/java/com/imageworks/spcue/JobInterface.java index e53cbccbe..631925492 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/JobInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/JobInterface.java @@ -2,25 +2,20 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public interface JobInterface extends ShowInterface, FacilityInterface { - public String getJobId(); + public String getJobId(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/JobLaunchException.java b/cuebot/src/main/java/com/imageworks/spcue/JobLaunchException.java index 56fa14984..a10bb18c4 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/JobLaunchException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/JobLaunchException.java @@ -2,44 +2,39 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; @SuppressWarnings("serial") public class JobLaunchException extends SpcueRuntimeException { - public JobLaunchException() { - // TODO Auto-generated constructor stub - } + public JobLaunchException() { + // TODO Auto-generated constructor stub + } - public JobLaunchException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public JobLaunchException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public JobLaunchException(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public JobLaunchException(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public JobLaunchException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public JobLaunchException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/LayerDetail.java b/cuebot/src/main/java/com/imageworks/spcue/LayerDetail.java index 575547122..d173c63dc 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/LayerDetail.java +++ b/cuebot/src/main/java/com/imageworks/spcue/LayerDetail.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import java.util.ArrayList; @@ -27,144 +23,143 @@ import com.imageworks.spcue.grpc.job.LayerType; public class LayerDetail extends LayerEntity implements LayerInterface { - public String command; - public String range; - public LayerType type; - public int minimumCores; - public int maximumCores; - public int minimumGpus; - public int maximumGpus; - public boolean isThreadable; - public long minimumMemory; - public long minimumGpuMemory; - public int chunkSize; - public int timeout; - public int timeout_llu; - public int dispatchOrder; - public int totalFrameCount; - - public Set tags = new LinkedHashSet(); - public Set services = new LinkedHashSet(); - public Set limits = new LinkedHashSet(); - public Set outputs = new LinkedHashSet(); - - /* - * - */ - public List getServices() { - return new ArrayList(services); - } - - public String getCommand() { - return command; - } - - public void setCommand(String command) { - this.command = command; - } - - public String getRange() { - return range; - } - - public void setRange(String range) { - this.range = range; - } - - public LayerType getType() { - return type; - } - - public void setType(LayerType type) { - this.type = type; - } - - public int getMinimumCores() { - return minimumCores; - } - - public void setMinimumCores(int minimumCores) { - this.minimumCores = minimumCores; - } - - public boolean isThreadable() { - return isThreadable; - } - - public void setThreadable(boolean isThreadable) { - this.isThreadable = isThreadable; - } - - public int getTimeout() { - return timeout; - } - - public void setTimeout(int timeout) { - this.timeout = timeout; - } - - public int getTimeoutLLU() { - return timeout; - } - - public void setTimeoutLLU(int timeout_llu) { - this.timeout_llu = timeout_llu; - } - - public long getMinimumMemory() { - return minimumMemory; - } - - public void setMinimumMemory(long minimumMemory) { - this.minimumMemory = minimumMemory; - } - - public int getMinimumGpus() { - return minimumGpus; - } - - public void setMinimumGpus(int minimumGpus) { - this.minimumGpus = minimumGpus; - } - - public long getMinimumGpuMemory() { - return minimumGpuMemory; - } - - public void setMinimumGpuMemory(long minimumGpuMemory) { - this.minimumGpuMemory = minimumGpuMemory; - } - - public int getChunkSize() { - return chunkSize; - } - - public void setChunkSize(int chunkSize) { - this.chunkSize = chunkSize; - } - - public int getDispatchOrder() { - return dispatchOrder; - } - - public void setDispatchOrder(int dispatchOrder) { - this.dispatchOrder = dispatchOrder; - } - - public int getTotalFrameCount() { - return totalFrameCount; - } + public String command; + public String range; + public LayerType type; + public int minimumCores; + public int maximumCores; + public int minimumGpus; + public int maximumGpus; + public boolean isThreadable; + public long minimumMemory; + public long minimumGpuMemory; + public int chunkSize; + public int timeout; + public int timeout_llu; + public int dispatchOrder; + public int totalFrameCount; + + public Set tags = new LinkedHashSet(); + public Set services = new LinkedHashSet(); + public Set limits = new LinkedHashSet(); + public Set outputs = new LinkedHashSet(); + + /* + * + */ + public List getServices() { + return new ArrayList(services); + } + + public String getCommand() { + return command; + } + + public void setCommand(String command) { + this.command = command; + } + + public String getRange() { + return range; + } + + public void setRange(String range) { + this.range = range; + } + + public LayerType getType() { + return type; + } + + public void setType(LayerType type) { + this.type = type; + } + + public int getMinimumCores() { + return minimumCores; + } + + public void setMinimumCores(int minimumCores) { + this.minimumCores = minimumCores; + } + + public boolean isThreadable() { + return isThreadable; + } + + public void setThreadable(boolean isThreadable) { + this.isThreadable = isThreadable; + } + + public int getTimeout() { + return timeout; + } + + public void setTimeout(int timeout) { + this.timeout = timeout; + } + + public int getTimeoutLLU() { + return timeout; + } + + public void setTimeoutLLU(int timeout_llu) { + this.timeout_llu = timeout_llu; + } + + public long getMinimumMemory() { + return minimumMemory; + } + + public void setMinimumMemory(long minimumMemory) { + this.minimumMemory = minimumMemory; + } + + public int getMinimumGpus() { + return minimumGpus; + } + + public void setMinimumGpus(int minimumGpus) { + this.minimumGpus = minimumGpus; + } + + public long getMinimumGpuMemory() { + return minimumGpuMemory; + } + + public void setMinimumGpuMemory(long minimumGpuMemory) { + this.minimumGpuMemory = minimumGpuMemory; + } + + public int getChunkSize() { + return chunkSize; + } + + public void setChunkSize(int chunkSize) { + this.chunkSize = chunkSize; + } + + public int getDispatchOrder() { + return dispatchOrder; + } + + public void setDispatchOrder(int dispatchOrder) { + this.dispatchOrder = dispatchOrder; + } + + public int getTotalFrameCount() { + return totalFrameCount; + } - public void setTotalFrameCount(int totalFrameCount) { - this.totalFrameCount = totalFrameCount; - } + public void setTotalFrameCount(int totalFrameCount) { + this.totalFrameCount = totalFrameCount; + } - public Set getTags() { - return tags; - } + public Set getTags() { + return tags; + } - public void setTags(Set tags) { - this.tags = tags; - } + public void setTags(Set tags) { + this.tags = tags; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/LayerEntity.java b/cuebot/src/main/java/com/imageworks/spcue/LayerEntity.java index 92e11c8cb..34f26cb5b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/LayerEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/LayerEntity.java @@ -2,51 +2,46 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public class LayerEntity extends Entity implements LayerInterface { - public String showId; - public String facilityId; - public String jobId; + public String showId; + public String facilityId; + public String jobId; - public LayerEntity() { } + public LayerEntity() {} - public LayerEntity(String id) { - this.id = id; - } + public LayerEntity(String id) { + this.id = id; + } - public String getLayerId() { - return id; - } + public String getLayerId() { + return id; + } - public String getJobId() { - return jobId; - } + public String getJobId() { + return jobId; + } - @Override - public String getShowId() { - return showId; - } + @Override + public String getShowId() { + return showId; + } - @Override - public String getFacilityId() { - return facilityId; - } + @Override + public String getFacilityId() { + return facilityId; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/LayerInterface.java b/cuebot/src/main/java/com/imageworks/spcue/LayerInterface.java index 414f42872..f53b1810e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/LayerInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/LayerInterface.java @@ -2,26 +2,21 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public interface LayerInterface extends JobInterface { - public String getLayerId(); + public String getLayerId(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/LayerStats.java b/cuebot/src/main/java/com/imageworks/spcue/LayerStats.java index 0679bb17e..3edb78e4a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/LayerStats.java +++ b/cuebot/src/main/java/com/imageworks/spcue/LayerStats.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import java.io.File; @@ -27,147 +23,142 @@ public class LayerStats { - private LayerDetail detail; - private FrameStateTotals frameStateTotals; - private ExecutionSummary executionSummary; - private List threadStats; - private List outputs; - - private String graphUnits; - private float conversionUnits; - private int scale; + private LayerDetail detail; + private FrameStateTotals frameStateTotals; + private ExecutionSummary executionSummary; + private List threadStats; + private List outputs; - public List getOutputs() { - return outputs; - } - - public void setOutputs(List outputs) { + private String graphUnits; + private float conversionUnits; + private int scale; - List newOutputs = new ArrayList(outputs.size()); - for (String output: outputs) { - newOutputs.add(new File(output).getParent() + "/*"); - } - this.outputs = newOutputs; - } + public List getOutputs() { + return outputs; + } - public List getThreadStats() { - return threadStats; - } - - public void setThreadStats(List threadStats) { - this.threadStats = threadStats; - setGraphScaleValues(); - } - - public LayerDetail getDetail() { - return detail; - } - - public void setDetail(LayerDetail detail) { - this.detail = detail; - } - - public FrameStateTotals getFrameStateTotals() { - return frameStateTotals; - } - - public void setFrameStateTotals(FrameStateTotals frameStateTotals) { - this.frameStateTotals = frameStateTotals; - } - - public ExecutionSummary getExecutionSummary() { - return executionSummary; - } - - public void setExecutionSummary(ExecutionSummary executionSummary) { - this.executionSummary = executionSummary; - } - - public int getGraphScale() { - return scale; - } - public String getGraphUnits() { - return graphUnits; - } - - public String getFormattedHighMemory() { - return String.format(Locale.ROOT, "%.1fGB", - executionSummary.highMemoryKb / 1024.0 / 1024.0); - } - - public String getFormattedProcHours() { - return String.format(Locale.ROOT, "%.1f", executionSummary.coreTime / 3600.0); - } - - public int getFailedFrames() { - return frameStateTotals.waiting + frameStateTotals.dead + frameStateTotals.eaten; - } - - public String getGraphLegend() { - StringBuilder sb = new StringBuilder(128); - List reversed = new ArrayList(threadStats); - Collections.reverse(reversed); - for(ThreadStats t: reversed) { - sb.append("|"); - sb.append(t.getThreads()); - sb.append("+"); - sb.append("Thread "); - } - return sb.toString(); - } - - public String getGraphData() { - - StringBuilder sb = new StringBuilder(128); - - for(ThreadStats t: threadStats) { - sb.append(String.format(Locale.ROOT, "%.2f", t.getAvgFrameTime() / conversionUnits)); - sb.append(","); - } - if (sb.length() > 1) { - sb.deleteCharAt(sb.length() - 1); - } - return sb.toString(); - } - - public int getThreadAvgCount() { - return threadStats.size(); - } - - /** - * Since frame times vary wildly, anywhere from 1 second - * to 7 days, this method will set some values so - * average frame times are displayed in units that make - * them easy to compare. - * - * Based on the highest average frame time per thread group, - * average frame can be displayed in minutes, seconds, or hours. - * - */ - private void setGraphScaleValues() { - - int hightestAverageSec = 0; - for(ThreadStats t: threadStats) { - if (t.getAvgFrameTime() >= hightestAverageSec) { - hightestAverageSec = t.getAvgFrameTime(); - } - } - - if (hightestAverageSec < 60) { - graphUnits = "Seconds"; - scale = ((hightestAverageSec / 2 + 1) * 2); - conversionUnits = 1f; - } - else if (hightestAverageSec < 3600) { - graphUnits = "Minutes"; - scale = ((hightestAverageSec / 60) + 1); - conversionUnits = 60f; - } - else { - graphUnits = "Hours"; - scale = ((hightestAverageSec / 3600) + 1); - conversionUnits = 3600f; - } - } + public void setOutputs(List outputs) { + + List newOutputs = new ArrayList(outputs.size()); + for (String output : outputs) { + newOutputs.add(new File(output).getParent() + "/*"); + } + this.outputs = newOutputs; + } + + public List getThreadStats() { + return threadStats; + } + + public void setThreadStats(List threadStats) { + this.threadStats = threadStats; + setGraphScaleValues(); + } + + public LayerDetail getDetail() { + return detail; + } + + public void setDetail(LayerDetail detail) { + this.detail = detail; + } + + public FrameStateTotals getFrameStateTotals() { + return frameStateTotals; + } + + public void setFrameStateTotals(FrameStateTotals frameStateTotals) { + this.frameStateTotals = frameStateTotals; + } + + public ExecutionSummary getExecutionSummary() { + return executionSummary; + } + + public void setExecutionSummary(ExecutionSummary executionSummary) { + this.executionSummary = executionSummary; + } + + public int getGraphScale() { + return scale; + } + + public String getGraphUnits() { + return graphUnits; + } + + public String getFormattedHighMemory() { + return String.format(Locale.ROOT, "%.1fGB", executionSummary.highMemoryKb / 1024.0 / 1024.0); + } + + public String getFormattedProcHours() { + return String.format(Locale.ROOT, "%.1f", executionSummary.coreTime / 3600.0); + } + + public int getFailedFrames() { + return frameStateTotals.waiting + frameStateTotals.dead + frameStateTotals.eaten; + } + + public String getGraphLegend() { + StringBuilder sb = new StringBuilder(128); + List reversed = new ArrayList(threadStats); + Collections.reverse(reversed); + for (ThreadStats t : reversed) { + sb.append("|"); + sb.append(t.getThreads()); + sb.append("+"); + sb.append("Thread "); + } + return sb.toString(); + } + + public String getGraphData() { + + StringBuilder sb = new StringBuilder(128); + + for (ThreadStats t : threadStats) { + sb.append(String.format(Locale.ROOT, "%.2f", t.getAvgFrameTime() / conversionUnits)); + sb.append(","); + } + if (sb.length() > 1) { + sb.deleteCharAt(sb.length() - 1); + } + return sb.toString(); + } + + public int getThreadAvgCount() { + return threadStats.size(); + } + + /** + * Since frame times vary wildly, anywhere from 1 second to 7 days, this method will set some + * values so average frame times are displayed in units that make them easy to compare. + * + * Based on the highest average frame time per thread group, average frame can be displayed in + * minutes, seconds, or hours. + * + */ + private void setGraphScaleValues() { + + int hightestAverageSec = 0; + for (ThreadStats t : threadStats) { + if (t.getAvgFrameTime() >= hightestAverageSec) { + hightestAverageSec = t.getAvgFrameTime(); + } + } + + if (hightestAverageSec < 60) { + graphUnits = "Seconds"; + scale = ((hightestAverageSec / 2 + 1) * 2); + conversionUnits = 1f; + } else if (hightestAverageSec < 3600) { + graphUnits = "Minutes"; + scale = ((hightestAverageSec / 60) + 1); + conversionUnits = 60f; + } else { + graphUnits = "Hours"; + scale = ((hightestAverageSec / 3600) + 1); + conversionUnits = 3600f; + } + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/LightweightDependency.java b/cuebot/src/main/java/com/imageworks/spcue/LightweightDependency.java index 8f940fbed..87fda00d3 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/LightweightDependency.java +++ b/cuebot/src/main/java/com/imageworks/spcue/LightweightDependency.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import com.imageworks.spcue.grpc.depend.DependTarget; @@ -24,28 +20,27 @@ public class LightweightDependency extends Entity implements DependInterface { - public DependType type; - public DependTarget target; + public DependType type; + public DependTarget target; - public String parent = null; + public String parent = null; - public String dependErJobId; - public String dependErLayerId; - public String dependErFrameId; + public String dependErJobId; + public String dependErLayerId; + public String dependErFrameId; - public String dependOnJobId; - public String dependOnLayerId; - public String dependOnFrameId; + public String dependOnJobId; + public String dependOnLayerId; + public String dependOnFrameId; - public boolean anyFrame; - public boolean active; + public boolean anyFrame; + public boolean active; - public String getName() { - return type.toString() + "/" + dependErJobId; - } + public String getName() { + return type.toString() + "/" + dependErJobId; + } - public String toString() { - return String.format("%s/%s", type.toString(), getId()); - } + public String toString() { + return String.format("%s/%s", type.toString(), getId()); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/LimitEntity.java b/cuebot/src/main/java/com/imageworks/spcue/LimitEntity.java index 6dee9db9b..894d5542d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/LimitEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/LimitEntity.java @@ -2,40 +2,36 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import com.imageworks.spcue.grpc.limit.Limit; public class LimitEntity extends Entity implements LimitInterface { - public int maxValue; - public int currentRunning; + public int maxValue; + public int currentRunning; - public LimitEntity() {} + public LimitEntity() {} - public LimitEntity(Limit grpcLimit) { - this.id = grpcLimit.getId(); - this.name = grpcLimit.getName(); - this.maxValue = grpcLimit.getMaxValue(); - this.currentRunning = grpcLimit.getCurrentRunning(); - } + public LimitEntity(Limit grpcLimit) { + this.id = grpcLimit.getId(); + this.name = grpcLimit.getName(); + this.maxValue = grpcLimit.getMaxValue(); + this.currentRunning = grpcLimit.getCurrentRunning(); + } - public String getLimitId() { - return id; - } + public String getLimitId() { + return id; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/LimitInterface.java b/cuebot/src/main/java/com/imageworks/spcue/LimitInterface.java index df21166e4..1f7a39f2d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/LimitInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/LimitInterface.java @@ -2,25 +2,21 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public interface LimitInterface extends EntityInterface { - public String getLimitId(); + public String getLimitId(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/LocalHostAssignment.java b/cuebot/src/main/java/com/imageworks/spcue/LocalHostAssignment.java index 65ce05c7e..9eee2340f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/LocalHostAssignment.java +++ b/cuebot/src/main/java/com/imageworks/spcue/LocalHostAssignment.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import com.imageworks.spcue.dispatcher.ResourceContainer; @@ -26,209 +22,205 @@ import org.apache.logging.log4j.LogManager; /** - * Contains information about local desktop cores a user has - * assigned to the given job. + * Contains information about local desktop cores a user has assigned to the given job. * - * The local-only option, if true, means the job will only dispatch - * a user's local cores. If false, the job will dispatch cores from - * both the user's machine and the render farm. + * The local-only option, if true, means the job will only dispatch a user's local cores. If false, + * the job will dispatch cores from both the user's machine and the render farm. */ -public class LocalHostAssignment extends Entity - implements ResourceContainer { - - private static final Logger logger = LogManager.getLogger(LocalHostAssignment.class); - - private int idleCoreUnits; - private long idleMemory; - private int idleGpuUnits; - private long idleGpuMemory; - - private long maxMemory; - private long maxGpuMemory; - private int maxCoreUnits; - private int maxGpuUnits; - - private int threads; - - private String hostId; - private String jobId = null; - private String layerId = null; - private String frameId = null; - - private RenderPartitionType type; - - public LocalHostAssignment() { } - - public LocalHostAssignment(int maxCores, int threads, long maxMemory, int maxGpus, long maxGpuMemory) { - this.maxCoreUnits = maxCores; - this.threads = threads; - this.maxMemory = maxMemory; - this.maxGpuUnits = maxGpus; - this.maxGpuMemory = maxGpuMemory; - } - - public int handleNegativeCoresRequirement(int requestedCores) { - // If we request a <=0 amount of cores, return positive core count. - // Request -2 on a 24 core machine will return 22. - - if (requestedCores > 0) { - // Do not process positive core requests. - logger.debug("Requested " + requestedCores + " cores."); - return requestedCores; - } - if (requestedCores <=0 && idleCoreUnits < threads) { - // If request is negative but cores are already used, return 0. - // We don't want to overbook the host. - logger.debug("Requested " + requestedCores + " cores, but the host is busy and cannot book more jobs."); - return 0; - } - // Book all cores minus the request - int totalCores = idleCoreUnits + requestedCores; - logger.debug("Requested " + requestedCores + " cores <= 0, " + - idleCoreUnits + " cores are free, booking " + totalCores + " cores"); - return totalCores; - } - - @Override - public boolean hasAdditionalResources(int minCores, long minMemory, int minGpus, long minGpuMemory) { - minCores = handleNegativeCoresRequirement(minCores); - if (idleCoreUnits < minCores) { - return false; - } - if (minCores <= 0) { - return false; - } - else if (idleMemory < minMemory) { - return false; - } - else if (idleGpuUnits < minGpus) { - return false; - } - else if (idleGpuMemory < minGpuMemory) { - return false; - } - - return true; - } - - @Override - public void useResources(int coreUnits, long memory, int gpuUnits, long gpuMemory) { - idleCoreUnits = idleCoreUnits - coreUnits; - idleMemory = idleMemory - memory; - idleGpuUnits = idleGpuUnits - gpuUnits; - idleGpuMemory = idleGpuMemory - gpuMemory; - } - - public int getThreads() { - return threads; - } - - public void setThreads(int threads) { - this.threads = threads; - } - - public long getMaxMemory() { - return maxMemory; - } - - public void setMaxMemory(long maxMemory) { - this.maxMemory = maxMemory; - } - - public int getMaxCoreUnits() { - return maxCoreUnits; - } - - public void setMaxCoreUnits(int maxCoreUnits) { - this.maxCoreUnits = maxCoreUnits; - } - - public long getIdleMemory() { - return this.idleMemory; - } - - public int getMaxGpuUnits() { - return maxGpuUnits; - } - - public void setMaxGpuUnits(int maxGpuUnits) { - this.maxGpuUnits = maxGpuUnits; - } +public class LocalHostAssignment extends Entity implements ResourceContainer { + + private static final Logger logger = LogManager.getLogger(LocalHostAssignment.class); + + private int idleCoreUnits; + private long idleMemory; + private int idleGpuUnits; + private long idleGpuMemory; + + private long maxMemory; + private long maxGpuMemory; + private int maxCoreUnits; + private int maxGpuUnits; + + private int threads; + + private String hostId; + private String jobId = null; + private String layerId = null; + private String frameId = null; + + private RenderPartitionType type; + + public LocalHostAssignment() {} + + public LocalHostAssignment(int maxCores, int threads, long maxMemory, int maxGpus, + long maxGpuMemory) { + this.maxCoreUnits = maxCores; + this.threads = threads; + this.maxMemory = maxMemory; + this.maxGpuUnits = maxGpus; + this.maxGpuMemory = maxGpuMemory; + } + + public int handleNegativeCoresRequirement(int requestedCores) { + // If we request a <=0 amount of cores, return positive core count. + // Request -2 on a 24 core machine will return 22. + + if (requestedCores > 0) { + // Do not process positive core requests. + logger.debug("Requested " + requestedCores + " cores."); + return requestedCores; + } + if (requestedCores <= 0 && idleCoreUnits < threads) { + // If request is negative but cores are already used, return 0. + // We don't want to overbook the host. + logger.debug("Requested " + requestedCores + + " cores, but the host is busy and cannot book more jobs."); + return 0; + } + // Book all cores minus the request + int totalCores = idleCoreUnits + requestedCores; + logger.debug("Requested " + requestedCores + " cores <= 0, " + idleCoreUnits + + " cores are free, booking " + totalCores + " cores"); + return totalCores; + } + + @Override + public boolean hasAdditionalResources(int minCores, long minMemory, int minGpus, + long minGpuMemory) { + minCores = handleNegativeCoresRequirement(minCores); + if (idleCoreUnits < minCores) { + return false; + } + if (minCores <= 0) { + return false; + } else if (idleMemory < minMemory) { + return false; + } else if (idleGpuUnits < minGpus) { + return false; + } else if (idleGpuMemory < minGpuMemory) { + return false; + } + + return true; + } + + @Override + public void useResources(int coreUnits, long memory, int gpuUnits, long gpuMemory) { + idleCoreUnits = idleCoreUnits - coreUnits; + idleMemory = idleMemory - memory; + idleGpuUnits = idleGpuUnits - gpuUnits; + idleGpuMemory = idleGpuMemory - gpuMemory; + } + + public int getThreads() { + return threads; + } + + public void setThreads(int threads) { + this.threads = threads; + } + + public long getMaxMemory() { + return maxMemory; + } + + public void setMaxMemory(long maxMemory) { + this.maxMemory = maxMemory; + } + + public int getMaxCoreUnits() { + return maxCoreUnits; + } + + public void setMaxCoreUnits(int maxCoreUnits) { + this.maxCoreUnits = maxCoreUnits; + } + + public long getIdleMemory() { + return this.idleMemory; + } + + public int getMaxGpuUnits() { + return maxGpuUnits; + } + + public void setMaxGpuUnits(int maxGpuUnits) { + this.maxGpuUnits = maxGpuUnits; + } + + public long getMaxGpuMemory() { + return maxGpuMemory; + } + + public void setMaxGpuMemory(long maxGpuMemory) { + this.maxGpuMemory = maxGpuMemory; + } + + public long getIdleGpuMemory() { + return this.idleGpuMemory; + } + + public int getIdleCoreUnits() { + return this.idleCoreUnits; + } + + public void setIdleCoreUnits(int idleCoreUnits) { + this.idleCoreUnits = idleCoreUnits; + } + + public void setIdleMemory(long idleMemory) { + this.idleMemory = idleMemory; + } + + public int getIdleGpuUnits() { + return this.idleGpuUnits; + } + + public void setIdleGpuUnits(int idleGpuUnits) { + this.idleGpuUnits = idleGpuUnits; + } + + public void setIdleGpuMemory(long idleGpuMemory) { + this.idleGpuMemory = idleGpuMemory; + } + + public String getHostId() { + return hostId; + } + + public void setHostId(String hostId) { + this.hostId = hostId; + } + + public String getJobId() { + return jobId; + } + + public void setJobId(String jobId) { + this.jobId = jobId; + } - public long getMaxGpuMemory() { - return maxGpuMemory; - } - - public void setMaxGpuMemory(long maxGpuMemory) { - this.maxGpuMemory = maxGpuMemory; - } - - public long getIdleGpuMemory() { - return this.idleGpuMemory; - } - - public int getIdleCoreUnits() { - return this.idleCoreUnits; - } - - public void setIdleCoreUnits(int idleCoreUnits) { - this.idleCoreUnits = idleCoreUnits; - } + public String getLayerId() { + return layerId; + } + + public void setLayerId(String layerId) { + this.layerId = layerId; + } + + public String getFrameId() { + return frameId; + } - public void setIdleMemory(long idleMemory) { - this.idleMemory = idleMemory; - } + public void setFrameId(String frameId) { + this.frameId = frameId; + } - public int getIdleGpuUnits() { - return this.idleGpuUnits; - } + public RenderPartitionType getType() { + return type; + } - public void setIdleGpuUnits(int idleGpuUnits) { - this.idleGpuUnits = idleGpuUnits; - } - - public void setIdleGpuMemory(long idleGpuMemory) { - this.idleGpuMemory = idleGpuMemory; - } - - public String getHostId() { - return hostId; - } - - public void setHostId(String hostId) { - this.hostId = hostId; - } - - public String getJobId() { - return jobId; - } - - public void setJobId(String jobId) { - this.jobId = jobId; - } - - public String getLayerId() { - return layerId; - } - - public void setLayerId(String layerId) { - this.layerId = layerId; - } - - public String getFrameId() { - return frameId; - } - - public void setFrameId(String frameId) { - this.frameId = frameId; - } - - public RenderPartitionType getType() { - return type; - } - - public void setType(RenderPartitionType type) { - this.type = type; - } + public void setType(RenderPartitionType type) { + this.type = type; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/MaintenanceTask.java b/cuebot/src/main/java/com/imageworks/spcue/MaintenanceTask.java index 479080ac7..f46378c8f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/MaintenanceTask.java +++ b/cuebot/src/main/java/com/imageworks/spcue/MaintenanceTask.java @@ -2,53 +2,47 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; /** - * MaintenanceTasks are stored in the task_lock table. Before - * a maintenance operation kicks off a lock must be taken out on it - * so multiple bots don't run the same task. + * MaintenanceTasks are stored in the task_lock table. Before a maintenance operation kicks off a + * lock must be taken out on it so multiple bots don't run the same task. */ public enum MaintenanceTask { - /** - * Lock the transfer of jobs to the historical table - */ - LOCK_HISTORICAL_TRANSFER, - - /** - * Lock the hardware start check - */ - LOCK_HARDWARE_STATE_CHECK, - - /** - * Lock the orphaned proc check - */ - LOCK_ORPHANED_PROC_CHECK, - - /** - * Lock for task updates - */ - LOCK_TASK_UPDATE, - - /** - * Lock the stale checkpoint task. - */ - LOCK_STALE_CHECKPOINT + /** + * Lock the transfer of jobs to the historical table + */ + LOCK_HISTORICAL_TRANSFER, + + /** + * Lock the hardware start check + */ + LOCK_HARDWARE_STATE_CHECK, + + /** + * Lock the orphaned proc check + */ + LOCK_ORPHANED_PROC_CHECK, + + /** + * Lock for task updates + */ + LOCK_TASK_UPDATE, + + /** + * Lock the stale checkpoint task. + */ + LOCK_STALE_CHECKPOINT } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/MatcherEntity.java b/cuebot/src/main/java/com/imageworks/spcue/MatcherEntity.java index 157a5ef9b..f8072e090 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/MatcherEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/MatcherEntity.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import com.imageworks.spcue.grpc.filter.MatchSubject; @@ -25,40 +21,39 @@ public class MatcherEntity extends Entity implements MatcherInterface { - public MatchSubject subject; - public MatchType type; - public String value; + public MatchSubject subject; + public MatchType type; + public String value; - public String filterId; - public String showId; + public String filterId; + public String showId; - public static MatcherEntity build(FilterInterface filter, Matcher data) { + public static MatcherEntity build(FilterInterface filter, Matcher data) { - MatcherEntity detail = new MatcherEntity(); - detail.name = null; - detail.subject = data.getSubject(); - detail.type = data.getType(); - detail.value = data.getInput(); + MatcherEntity detail = new MatcherEntity(); + detail.name = null; + detail.subject = data.getSubject(); + detail.type = data.getType(); + detail.value = data.getInput(); - return detail; - } + return detail; + } - public static MatcherEntity build(FilterInterface filter, Matcher data, String id) { - MatcherEntity detail = build(filter, data); - detail.id = id.toString(); - return detail; - } + public static MatcherEntity build(FilterInterface filter, Matcher data, String id) { + MatcherEntity detail = build(filter, data); + detail.id = id.toString(); + return detail; + } - public String getFilterId() { - return filterId; - } + public String getFilterId() { + return filterId; + } - public String getShowId() { - return showId; - } + public String getShowId() { + return showId; + } - public String getMatcherId() { - return this.id; - } + public String getMatcherId() { + return this.id; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/MatcherInterface.java b/cuebot/src/main/java/com/imageworks/spcue/MatcherInterface.java index ae17ae686..a1d0c042f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/MatcherInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/MatcherInterface.java @@ -2,26 +2,21 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public interface MatcherInterface extends FilterInterface { - public String getMatcherId(); + public String getMatcherId(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/MinimalHost.java b/cuebot/src/main/java/com/imageworks/spcue/MinimalHost.java index c8f872451..dd6fb7399 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/MinimalHost.java +++ b/cuebot/src/main/java/com/imageworks/spcue/MinimalHost.java @@ -2,50 +2,45 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public class MinimalHost extends Entity implements HostInterface { - public String allocId; - public String facilityId; + public String allocId; + public String facilityId; - public MinimalHost() { - // TODO Auto-generated constructor stub - } + public MinimalHost() { + // TODO Auto-generated constructor stub + } - public String getHostId() { - return this.id; - } + public String getHostId() { + return this.id; + } - public String getAllocationId() { - return allocId; - } + public String getAllocationId() { + return allocId; + } - public String getId() { - return id; - } + public String getId() { + return id; + } - public String getName() { - return name; - } + public String getName() { + return name; + } - public String getFacilityId() { - return facilityId; - } + public String getFacilityId() { + return facilityId; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/OwnerEntity.java b/cuebot/src/main/java/com/imageworks/spcue/OwnerEntity.java index 1ebea5ff0..9e2434e5c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/OwnerEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/OwnerEntity.java @@ -2,30 +2,25 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public class OwnerEntity extends Entity { - public OwnerEntity() {} + public OwnerEntity() {} - public OwnerEntity(String name) { - this.name = name; - } + public OwnerEntity(String name) { + this.name = name; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/PointDetail.java b/cuebot/src/main/java/com/imageworks/spcue/PointDetail.java index b99dfdd1a..01efc9716 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/PointDetail.java +++ b/cuebot/src/main/java/com/imageworks/spcue/PointDetail.java @@ -2,45 +2,40 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public class PointDetail extends Entity implements PointInterface { - public String showId; - public String deptId; + public String showId; + public String deptId; - public int cores = 0; - public String tiTask = ""; - public boolean tiManaged = false; + public int cores = 0; + public String tiTask = ""; + public boolean tiManaged = false; - @Override - public String getDepartmentId() { - return deptId; - } + @Override + public String getDepartmentId() { + return deptId; + } - @Override - public String getShowId() { - return showId; - } + @Override + public String getShowId() { + return showId; + } - @Override - public String getPointId() { - return id; - } + @Override + public String getPointId() { + return id; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/PointInterface.java b/cuebot/src/main/java/com/imageworks/spcue/PointInterface.java index 7290a9d18..f1bce6e61 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/PointInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/PointInterface.java @@ -2,24 +2,19 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public interface PointInterface extends DepartmentInterface, ShowInterface { - public String getPointId(); + public String getPointId(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/ProcInterface.java b/cuebot/src/main/java/com/imageworks/spcue/ProcInterface.java index f1f32c037..cbc45a8dd 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ProcInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ProcInterface.java @@ -2,26 +2,21 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public interface ProcInterface extends HostInterface, FrameInterface { - String getProcId(); + String getProcId(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/PrometheusMetricsCollector.java b/cuebot/src/main/java/com/imageworks/spcue/PrometheusMetricsCollector.java index 60e403fe6..624b2c93c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/PrometheusMetricsCollector.java +++ b/cuebot/src/main/java/com/imageworks/spcue/PrometheusMetricsCollector.java @@ -18,310 +18,260 @@ */ @Component public class PrometheusMetricsCollector { - private BookingQueue bookingQueue; - - private DispatchQueue manageQueue; - - private DispatchQueue dispatchQueue; - - private HostReportQueue reportQueue; - - private boolean enabled; - - // BookingQueue bookingQueue - private static final Gauge bookingWaitingTotal = Gauge.build() - .name("cue_booking_waiting_total") - .help("Booking Queue number of waiting tasks") - .labelNames("env", "cuebot_hosts") - .register(); - private static final Gauge bookingRemainingCapacityTotal = Gauge.build() - .name("cue_booking_remaining_capacity_total") - .help("Booking Queue remaining capacity") - .labelNames("env", "cuebot_hosts") - .register(); - private static final Gauge bookingThreadsTotal = Gauge.build() - .name("cue_booking_threads_total") - .help("Booking Queue number of active threads") - .labelNames("env", "cuebot_hosts") - .register(); - private static final Gauge bookingExecutedTotal = Gauge.build() - .name("cue_booking_executed_total") - .help("Booking Queue number of executed tasks") - .labelNames("env", "cuebot_hosts") - .register(); - private static final Gauge bookingRejectedTotal = Gauge.build() - .name("cue_booking_rejected_total") - .help("Booking Queue number of rejected tasks") - .labelNames("env", "cuebot_hosts") - .register(); - - // DispatchQueue manageQueue - private static final Gauge manageWaitingTotal = Gauge.build() - .name("cue_manage_waiting_total") - .help("Manage Queue number of waiting tasks") - .labelNames("env", "cuebot_hosts") - .register(); - private static final Gauge manageRemainingCapacityTotal = Gauge.build() - .name("cue_manage_remaining_capacity_total") - .help("Manage Queue remaining capacity") - .labelNames("env", "cuebot_hosts") - .register(); - private static final Gauge manageThreadsTotal = Gauge.build() - .name("cue_manage_threads_total") - .help("Manage Queue number of active threads") - .labelNames("env", "cuebot_hosts") - .register(); - private static final Gauge manageExecutedTotal = Gauge.build() - .name("cue_manage_executed_total") - .help("Manage Queue number of executed tasks") - .labelNames("env", "cuebot_hosts") - .register(); - private static final Gauge manageRejectedTotal = Gauge.build() - .name("cue_manage_rejected_total") - .help("Manage Queue number of rejected tasks") - .labelNames("env", "cuebot_hosts") - .register(); - - // DispatchQueue dispatchQueue - private static final Gauge dispatchWaitingTotal = Gauge.build() - .name("cue_dispatch_waiting_total") - .help("Dispatch Queue number of waiting tasks") - .labelNames("env", "cuebot_hosts") - .register(); - private static final Gauge dispatchRemainingCapacityTotal = Gauge.build() - .name("cue_dispatch_remaining_capacity_total") - .help("Dispatch Queue remaining capacity") - .labelNames("env", "cuebot_hosts") - .register(); - private static final Gauge dispatchThreadsTotal = Gauge.build() - .name("cue_dispatch_threads_total") - .help("Dispatch Queue number of active threads") - .labelNames("env", "cuebot_hosts") - .register(); - private static final Gauge dispatchExecutedTotal = Gauge.build() - .name("cue_dispatch_executed_total") - .help("Dispatch Queue number of executed tasks") - .labelNames("env", "cuebot_hosts") - .register(); - private static final Gauge dispatchRejectedTotal = Gauge.build() - .name("cue_dispatch_rejected_total") - .help("Dispatch Queue number of rejected tasks") - .labelNames("env", "cuebot_hosts") - .register(); - - // HostReportQueue reportQueue - private static final Gauge reportQueueWaitingTotal = Gauge.build() - .name("cue_report_waiting_total") - .help("Report Queue number of waiting tasks") - .labelNames("env", "cuebot_hosts") - .register(); - private static final Gauge reportQueueRemainingCapacityTotal = Gauge.build() - .name("cue_report_remaining_capacity_total") - .help("Report Queue remaining capacity") - .labelNames("env", "cuebot_hosts") - .register(); - private static final Gauge reportQueueThreadsTotal = Gauge.build() - .name("cue_report_threads_total") - .help("Report Queue number of active threads") - .labelNames("env", "cuebot_hosts") - .register(); - private static final Gauge reportQueueExecutedTotal = Gauge.build() - .name("cue_report_executed_total") - .help("Report Queue number of executed tasks") - .labelNames("env", "cuebot_hosts") - .register(); - private static final Gauge reportQueueRejectedTotal = Gauge.build() - .name("cue_report_rejected_total") - .help("Report Queue number of rejected tasks") - .labelNames("env", "cuebot_hosts") - .register(); - - private static final Counter findJobsByShowQueryCountMetric = Counter.build() - .name("cue_find_jobs_by_show_count") - .help("Count the occurrences of the query FIND_JOBS_BY_SHOW.") - .labelNames("env", "cuebot_hosts") - .register(); - private static final Gauge bookingDurationMillisMetric = Gauge.build() - .name("cue_booking_durations_in_millis") - .help("Register duration of booking steps in milliseconds.") - .labelNames("env", "cuebot_host", "stage_desc") - .register(); - private static final Histogram bookingDurationMillisHistogramMetric = Histogram.build() - .name("cue_booking_durations_histogram_in_millis") - .help("Register a summary of duration of booking steps in milliseconds.") - .labelNames("env", "cuebot_host", "stage_desc") - .register(); - - private static final Counter frameKilledCounter = Counter.build() - .name("cue_frame_killed_counter") - .help("Number of frames kill requests processed") - .labelNames("env", "cuebot_host", "render_node", "cause") - .register(); - - private static final Counter frameKillFailureCounter = Counter.build() - .name("cue_frame_kill_failure_counter") - .help("Number of frames that failed to be killed after FRAME_KILL_RETRY_LIMIT tries") - .labelNames("env", "cuebot_host", "render_node", "job_name", "frame_name", "frame_id") - .register(); - - private String deployment_environment; - private String cuebot_host; - - @Autowired - public PrometheusMetricsCollector(Environment env) { - if (env == null) { - throw new SpcueRuntimeException("Env not defined"); - } - this.enabled = env.getProperty("metrics.prometheus.collector", Boolean.class, false); - String envKey = env.getProperty("metrics.prometheus.environment_id.environment_variable", String.class, - "DEPLOYMENT_ENVIRONMENT"); - - this.cuebot_host = getHostNameFromEnv(); - // Get environment id from environment variable - this.deployment_environment = System.getenv(envKey); - if (this.deployment_environment == null) { - this.deployment_environment = "undefined"; - } + private BookingQueue bookingQueue; + + private DispatchQueue manageQueue; + + private DispatchQueue dispatchQueue; + + private HostReportQueue reportQueue; + + private boolean enabled; + + // BookingQueue bookingQueue + private static final Gauge bookingWaitingTotal = Gauge.build().name("cue_booking_waiting_total") + .help("Booking Queue number of waiting tasks").labelNames("env", "cuebot_hosts").register(); + private static final Gauge bookingRemainingCapacityTotal = + Gauge.build().name("cue_booking_remaining_capacity_total") + .help("Booking Queue remaining capacity").labelNames("env", "cuebot_hosts").register(); + private static final Gauge bookingThreadsTotal = Gauge.build().name("cue_booking_threads_total") + .help("Booking Queue number of active threads").labelNames("env", "cuebot_hosts").register(); + private static final Gauge bookingExecutedTotal = Gauge.build().name("cue_booking_executed_total") + .help("Booking Queue number of executed tasks").labelNames("env", "cuebot_hosts").register(); + private static final Gauge bookingRejectedTotal = Gauge.build().name("cue_booking_rejected_total") + .help("Booking Queue number of rejected tasks").labelNames("env", "cuebot_hosts").register(); + + // DispatchQueue manageQueue + private static final Gauge manageWaitingTotal = Gauge.build().name("cue_manage_waiting_total") + .help("Manage Queue number of waiting tasks").labelNames("env", "cuebot_hosts").register(); + private static final Gauge manageRemainingCapacityTotal = + Gauge.build().name("cue_manage_remaining_capacity_total") + .help("Manage Queue remaining capacity").labelNames("env", "cuebot_hosts").register(); + private static final Gauge manageThreadsTotal = Gauge.build().name("cue_manage_threads_total") + .help("Manage Queue number of active threads").labelNames("env", "cuebot_hosts").register(); + private static final Gauge manageExecutedTotal = Gauge.build().name("cue_manage_executed_total") + .help("Manage Queue number of executed tasks").labelNames("env", "cuebot_hosts").register(); + private static final Gauge manageRejectedTotal = Gauge.build().name("cue_manage_rejected_total") + .help("Manage Queue number of rejected tasks").labelNames("env", "cuebot_hosts").register(); + + // DispatchQueue dispatchQueue + private static final Gauge dispatchWaitingTotal = Gauge.build().name("cue_dispatch_waiting_total") + .help("Dispatch Queue number of waiting tasks").labelNames("env", "cuebot_hosts").register(); + private static final Gauge dispatchRemainingCapacityTotal = + Gauge.build().name("cue_dispatch_remaining_capacity_total") + .help("Dispatch Queue remaining capacity").labelNames("env", "cuebot_hosts").register(); + private static final Gauge dispatchThreadsTotal = Gauge.build().name("cue_dispatch_threads_total") + .help("Dispatch Queue number of active threads").labelNames("env", "cuebot_hosts").register(); + private static final Gauge dispatchExecutedTotal = Gauge.build() + .name("cue_dispatch_executed_total").help("Dispatch Queue number of executed tasks") + .labelNames("env", "cuebot_hosts").register(); + private static final Gauge dispatchRejectedTotal = Gauge.build() + .name("cue_dispatch_rejected_total").help("Dispatch Queue number of rejected tasks") + .labelNames("env", "cuebot_hosts").register(); + + // HostReportQueue reportQueue + private static final Gauge reportQueueWaitingTotal = + Gauge.build().name("cue_report_waiting_total").help("Report Queue number of waiting tasks") + .labelNames("env", "cuebot_hosts").register(); + private static final Gauge reportQueueRemainingCapacityTotal = + Gauge.build().name("cue_report_remaining_capacity_total") + .help("Report Queue remaining capacity").labelNames("env", "cuebot_hosts").register(); + private static final Gauge reportQueueThreadsTotal = + Gauge.build().name("cue_report_threads_total").help("Report Queue number of active threads") + .labelNames("env", "cuebot_hosts").register(); + private static final Gauge reportQueueExecutedTotal = + Gauge.build().name("cue_report_executed_total").help("Report Queue number of executed tasks") + .labelNames("env", "cuebot_hosts").register(); + private static final Gauge reportQueueRejectedTotal = + Gauge.build().name("cue_report_rejected_total").help("Report Queue number of rejected tasks") + .labelNames("env", "cuebot_hosts").register(); + + private static final Counter findJobsByShowQueryCountMetric = + Counter.build().name("cue_find_jobs_by_show_count") + .help("Count the occurrences of the query FIND_JOBS_BY_SHOW.") + .labelNames("env", "cuebot_hosts").register(); + private static final Gauge bookingDurationMillisMetric = + Gauge.build().name("cue_booking_durations_in_millis") + .help("Register duration of booking steps in milliseconds.") + .labelNames("env", "cuebot_host", "stage_desc").register(); + private static final Histogram bookingDurationMillisHistogramMetric = + Histogram.build().name("cue_booking_durations_histogram_in_millis") + .help("Register a summary of duration of booking steps in milliseconds.") + .labelNames("env", "cuebot_host", "stage_desc").register(); + + private static final Counter frameKilledCounter = Counter.build().name("cue_frame_killed_counter") + .help("Number of frames kill requests processed") + .labelNames("env", "cuebot_host", "render_node", "cause").register(); + + private static final Counter frameKillFailureCounter = + Counter.build().name("cue_frame_kill_failure_counter") + .help("Number of frames that failed to be killed after FRAME_KILL_RETRY_LIMIT tries") + .labelNames("env", "cuebot_host", "render_node", "job_name", "frame_name", "frame_id") + .register(); + + private String deployment_environment; + private String cuebot_host; + + @Autowired + public PrometheusMetricsCollector(Environment env) { + if (env == null) { + throw new SpcueRuntimeException("Env not defined"); } - - /** - * Get hostname from environment variable - * - * Uses the following fallback order: - * - * - NODE_HOSTNAME -> HOSTNAME -> HOST -> "undefined" - * - * @return - */ - private String getHostNameFromEnv() { - String hostname = System.getenv("NODE_HOSTNAME"); - if (hostname != null) { - return hostname; - } - - hostname = System.getenv("HOSTNAME"); - if (hostname != null) { - return hostname; - } - - hostname = System.getenv("HOST"); - if (hostname != null) { - return hostname; - } - - return "undefined"; - } - - /** - * Collect metrics from queues - */ - public void collectPrometheusMetrics() { - if (this.enabled) { - // BookingQueue bookingQueue - bookingWaitingTotal.labels(this.deployment_environment, this.cuebot_host).set(bookingQueue.getSize()); - bookingRemainingCapacityTotal.labels(this.deployment_environment, this.cuebot_host) - .set(bookingQueue.getRemainingCapacity()); - bookingThreadsTotal.labels(this.deployment_environment, this.cuebot_host) - .set(bookingQueue.getActiveCount()); - bookingExecutedTotal.labels(this.deployment_environment, this.cuebot_host) - .set(bookingQueue.getCompletedTaskCount()); - bookingRejectedTotal.labels(this.deployment_environment, this.cuebot_host) - .set(bookingQueue.getRejectedTaskCount()); - - // DispatchQueue manageQueue - manageWaitingTotal.labels(this.deployment_environment, this.cuebot_host).set(manageQueue.getSize()); - manageRemainingCapacityTotal.labels(this.deployment_environment, this.cuebot_host) - .set(manageQueue.getRemainingCapacity()); - manageThreadsTotal.labels(this.deployment_environment, this.cuebot_host).set(manageQueue.getActiveCount()); - manageExecutedTotal.labels(this.deployment_environment, this.cuebot_host) - .set(manageQueue.getCompletedTaskCount()); - manageRejectedTotal.labels(this.deployment_environment, this.cuebot_host) - .set(manageQueue.getRejectedTaskCount()); - - // DispatchQueue dispatchQueue - dispatchWaitingTotal.labels(this.deployment_environment, this.cuebot_host).set(dispatchQueue.getSize()); - dispatchRemainingCapacityTotal.labels(this.deployment_environment, this.cuebot_host) - .set(dispatchQueue.getRemainingCapacity()); - dispatchThreadsTotal.labels(this.deployment_environment, this.cuebot_host) - .set(dispatchQueue.getActiveCount()); - dispatchExecutedTotal.labels(this.deployment_environment, this.cuebot_host) - .set(dispatchQueue.getCompletedTaskCount()); - dispatchRejectedTotal.labels(this.deployment_environment, this.cuebot_host) - .set(dispatchQueue.getRejectedTaskCount()); - - // HostReportQueue reportQueue - reportQueueWaitingTotal.labels(this.deployment_environment, this.cuebot_host) - .set(reportQueue.getQueue().size()); - reportQueueRemainingCapacityTotal.labels(this.deployment_environment, this.cuebot_host) - .set(reportQueue.getQueue().remainingCapacity()); - reportQueueThreadsTotal.labels(this.deployment_environment, this.cuebot_host) - .set(reportQueue.getActiveCount()); - reportQueueExecutedTotal.labels(this.deployment_environment, this.cuebot_host) - .set(reportQueue.getTaskCount()); - reportQueueRejectedTotal.labels(this.deployment_environment, this.cuebot_host) - .set(reportQueue.getRejectedTaskCount()); - } - } - - /** - * Set a new value to the cue_booking_durations_in_millis metric - * - * @param stage_desc booking stage description to be used as a tag - * @param value value to set - */ - public void setBookingDurationMetric(String stage_desc, double value) { - bookingDurationMillisMetric.labels(this.deployment_environment, this.cuebot_host, stage_desc).set(value); - bookingDurationMillisHistogramMetric.labels(this.deployment_environment, this.cuebot_host, stage_desc).observe(value); - } - - /** - * Increment cue_find_jobs_by_show_count metric - */ - public void incrementFindJobsByShowQueryCountMetric() { - findJobsByShowQueryCountMetric.labels(this.deployment_environment, this.cuebot_host).inc(); - } - - /** - * Increment cue_frame_killed_counter metric - * - * @param renderNode hostname of the render node receiving the kill request - * @param killCause cause assigned to the request - */ - public void incrementFrameKilledCounter(String renderNode, HostReportHandler.KillCause killCause) { - frameKilledCounter.labels(this.deployment_environment, this.cuebot_host, renderNode, killCause.name()).inc(); + this.enabled = env.getProperty("metrics.prometheus.collector", Boolean.class, false); + String envKey = env.getProperty("metrics.prometheus.environment_id.environment_variable", + String.class, "DEPLOYMENT_ENVIRONMENT"); + + this.cuebot_host = getHostNameFromEnv(); + // Get environment id from environment variable + this.deployment_environment = System.getenv(envKey); + if (this.deployment_environment == null) { + this.deployment_environment = "undefined"; } - - /** - * Increment cue_frame_kill_failure_counter metric - * - * @param hostname - * @param jobName - * @param frameName - * @param frameId - */ - public void incrementFrameKillFailureCounter(String hostname, String jobName, String frameName, String frameId) { - frameKillFailureCounter.labels(this.deployment_environment, - this.cuebot_host, - hostname, - jobName, - frameName, - frameId).inc(); - } - - // Setters used for dependency injection - public void setBookingQueue(BookingQueue bookingQueue) { - this.bookingQueue = bookingQueue; + } + + /** + * Get hostname from environment variable + * + * Uses the following fallback order: + * + * - NODE_HOSTNAME -> HOSTNAME -> HOST -> "undefined" + * + * @return + */ + private String getHostNameFromEnv() { + String hostname = System.getenv("NODE_HOSTNAME"); + if (hostname != null) { + return hostname; } - public void setManageQueue(DispatchQueue manageQueue) { - this.manageQueue = manageQueue; + hostname = System.getenv("HOSTNAME"); + if (hostname != null) { + return hostname; } - public void setDispatchQueue(DispatchQueue dispatchQueue) { - this.dispatchQueue = dispatchQueue; + hostname = System.getenv("HOST"); + if (hostname != null) { + return hostname; } - public void setReportQueue(HostReportQueue reportQueue) { - this.reportQueue = reportQueue; + return "undefined"; + } + + /** + * Collect metrics from queues + */ + public void collectPrometheusMetrics() { + if (this.enabled) { + // BookingQueue bookingQueue + bookingWaitingTotal.labels(this.deployment_environment, this.cuebot_host) + .set(bookingQueue.getSize()); + bookingRemainingCapacityTotal.labels(this.deployment_environment, this.cuebot_host) + .set(bookingQueue.getRemainingCapacity()); + bookingThreadsTotal.labels(this.deployment_environment, this.cuebot_host) + .set(bookingQueue.getActiveCount()); + bookingExecutedTotal.labels(this.deployment_environment, this.cuebot_host) + .set(bookingQueue.getCompletedTaskCount()); + bookingRejectedTotal.labels(this.deployment_environment, this.cuebot_host) + .set(bookingQueue.getRejectedTaskCount()); + + // DispatchQueue manageQueue + manageWaitingTotal.labels(this.deployment_environment, this.cuebot_host) + .set(manageQueue.getSize()); + manageRemainingCapacityTotal.labels(this.deployment_environment, this.cuebot_host) + .set(manageQueue.getRemainingCapacity()); + manageThreadsTotal.labels(this.deployment_environment, this.cuebot_host) + .set(manageQueue.getActiveCount()); + manageExecutedTotal.labels(this.deployment_environment, this.cuebot_host) + .set(manageQueue.getCompletedTaskCount()); + manageRejectedTotal.labels(this.deployment_environment, this.cuebot_host) + .set(manageQueue.getRejectedTaskCount()); + + // DispatchQueue dispatchQueue + dispatchWaitingTotal.labels(this.deployment_environment, this.cuebot_host) + .set(dispatchQueue.getSize()); + dispatchRemainingCapacityTotal.labels(this.deployment_environment, this.cuebot_host) + .set(dispatchQueue.getRemainingCapacity()); + dispatchThreadsTotal.labels(this.deployment_environment, this.cuebot_host) + .set(dispatchQueue.getActiveCount()); + dispatchExecutedTotal.labels(this.deployment_environment, this.cuebot_host) + .set(dispatchQueue.getCompletedTaskCount()); + dispatchRejectedTotal.labels(this.deployment_environment, this.cuebot_host) + .set(dispatchQueue.getRejectedTaskCount()); + + // HostReportQueue reportQueue + reportQueueWaitingTotal.labels(this.deployment_environment, this.cuebot_host) + .set(reportQueue.getQueue().size()); + reportQueueRemainingCapacityTotal.labels(this.deployment_environment, this.cuebot_host) + .set(reportQueue.getQueue().remainingCapacity()); + reportQueueThreadsTotal.labels(this.deployment_environment, this.cuebot_host) + .set(reportQueue.getActiveCount()); + reportQueueExecutedTotal.labels(this.deployment_environment, this.cuebot_host) + .set(reportQueue.getTaskCount()); + reportQueueRejectedTotal.labels(this.deployment_environment, this.cuebot_host) + .set(reportQueue.getRejectedTaskCount()); } -} \ No newline at end of file + } + + /** + * Set a new value to the cue_booking_durations_in_millis metric + * + * @param stage_desc booking stage description to be used as a tag + * @param value value to set + */ + public void setBookingDurationMetric(String stage_desc, double value) { + bookingDurationMillisMetric.labels(this.deployment_environment, this.cuebot_host, stage_desc) + .set(value); + bookingDurationMillisHistogramMetric + .labels(this.deployment_environment, this.cuebot_host, stage_desc).observe(value); + } + + /** + * Increment cue_find_jobs_by_show_count metric + */ + public void incrementFindJobsByShowQueryCountMetric() { + findJobsByShowQueryCountMetric.labels(this.deployment_environment, this.cuebot_host).inc(); + } + + /** + * Increment cue_frame_killed_counter metric + * + * @param renderNode hostname of the render node receiving the kill request + * @param killCause cause assigned to the request + */ + public void incrementFrameKilledCounter(String renderNode, + HostReportHandler.KillCause killCause) { + frameKilledCounter + .labels(this.deployment_environment, this.cuebot_host, renderNode, killCause.name()).inc(); + } + + /** + * Increment cue_frame_kill_failure_counter metric + * + * @param hostname + * @param jobName + * @param frameName + * @param frameId + */ + public void incrementFrameKillFailureCounter(String hostname, String jobName, String frameName, + String frameId) { + frameKillFailureCounter.labels(this.deployment_environment, this.cuebot_host, hostname, jobName, + frameName, frameId).inc(); + } + + // Setters used for dependency injection + public void setBookingQueue(BookingQueue bookingQueue) { + this.bookingQueue = bookingQueue; + } + + public void setManageQueue(DispatchQueue manageQueue) { + this.manageQueue = manageQueue; + } + + public void setDispatchQueue(DispatchQueue dispatchQueue) { + this.dispatchQueue = dispatchQueue; + } + + public void setReportQueue(HostReportQueue reportQueue) { + this.reportQueue = reportQueue; + } +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/Redirect.java b/cuebot/src/main/java/com/imageworks/spcue/Redirect.java index a209e3cb1..2ece95a58 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/Redirect.java +++ b/cuebot/src/main/java/com/imageworks/spcue/Redirect.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import java.io.Serializable; @@ -26,88 +22,85 @@ import com.imageworks.spcue.util.SqlUtil; /** - * A Redirect contains the new destination for a proc. The destination type may - * be a job or a group. + * A Redirect contains the new destination for a proc. The destination type may be a job or a group. */ public class Redirect implements Serializable { - private static final long serialVersionUID = -6461503320817105280L; - - /** - * Track requests to redirect multiple procs together - * by assigning a group id. - * */ - private final String groupId; - private final RedirectType type; - private final String destinationId; - private final String name; - private final long creationTime; - - public static final long EXPIRE_TIME = - TimeUnit.MILLISECONDS.convert(24, TimeUnit.HOURS); - - public Redirect(String groupId, RedirectType type, String destinationId, String name, long creationTime) { - this.groupId = groupId; - this.type = type; - this.destinationId = destinationId; - this.name = name; - this.creationTime = creationTime; - } - - public Redirect(RedirectType type, String destinationId, String name) { - this.groupId = SqlUtil.genKeyRandom(); - this.type = type; - this.destinationId = destinationId; - this.name = name; - this.creationTime = System.currentTimeMillis(); - } - - public Redirect(String groupId, JobInterface job) { - this.groupId = groupId; - this.type = RedirectType.JOB_REDIRECT; - this.destinationId = job.getJobId(); - this.name = job.getName(); - this.creationTime = System.currentTimeMillis(); - } - - public Redirect(JobInterface job) { - this.groupId = SqlUtil.genKeyRandom(); - this.type = RedirectType.JOB_REDIRECT; - this.destinationId = job.getJobId(); - this.name = job.getName(); - this.creationTime = System.currentTimeMillis(); - } - - public Redirect(GroupInterface group) { - this.groupId = SqlUtil.genKeyRandom(); - this.type = RedirectType.GROUP_REDIRECT; - this.destinationId = group.getGroupId(); - this.name = group.getName(); - this.creationTime = System.currentTimeMillis(); - } - - public String getGroupId() { - return groupId; - } - - public RedirectType getType() { - return type; - } - - public String getDestinationId() { - return destinationId; - } - - public String getDestinationName() { - return name; - } - - public boolean isExpired() { - return System.currentTimeMillis() - creationTime >= EXPIRE_TIME; - } - - public long getCreationTime() { - return creationTime; - } + private static final long serialVersionUID = -6461503320817105280L; + + /** + * Track requests to redirect multiple procs together by assigning a group id. + */ + private final String groupId; + private final RedirectType type; + private final String destinationId; + private final String name; + private final long creationTime; + + public static final long EXPIRE_TIME = TimeUnit.MILLISECONDS.convert(24, TimeUnit.HOURS); + + public Redirect(String groupId, RedirectType type, String destinationId, String name, + long creationTime) { + this.groupId = groupId; + this.type = type; + this.destinationId = destinationId; + this.name = name; + this.creationTime = creationTime; + } + + public Redirect(RedirectType type, String destinationId, String name) { + this.groupId = SqlUtil.genKeyRandom(); + this.type = type; + this.destinationId = destinationId; + this.name = name; + this.creationTime = System.currentTimeMillis(); + } + + public Redirect(String groupId, JobInterface job) { + this.groupId = groupId; + this.type = RedirectType.JOB_REDIRECT; + this.destinationId = job.getJobId(); + this.name = job.getName(); + this.creationTime = System.currentTimeMillis(); + } + + public Redirect(JobInterface job) { + this.groupId = SqlUtil.genKeyRandom(); + this.type = RedirectType.JOB_REDIRECT; + this.destinationId = job.getJobId(); + this.name = job.getName(); + this.creationTime = System.currentTimeMillis(); + } + + public Redirect(GroupInterface group) { + this.groupId = SqlUtil.genKeyRandom(); + this.type = RedirectType.GROUP_REDIRECT; + this.destinationId = group.getGroupId(); + this.name = group.getName(); + this.creationTime = System.currentTimeMillis(); + } + + public String getGroupId() { + return groupId; + } + + public RedirectType getType() { + return type; + } + + public String getDestinationId() { + return destinationId; + } + + public String getDestinationName() { + return name; + } + + public boolean isExpired() { + return System.currentTimeMillis() - creationTime >= EXPIRE_TIME; + } + + public long getCreationTime() { + return creationTime; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/ResourceUsage.java b/cuebot/src/main/java/com/imageworks/spcue/ResourceUsage.java index b45af0838..26ba5c65b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ResourceUsage.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ResourceUsage.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; /** @@ -24,38 +20,37 @@ */ public class ResourceUsage { - private final long coreTimeSeconds; - private final long gpuTimeSeconds; - private final long clockTimeSeconds; + private final long coreTimeSeconds; + private final long gpuTimeSeconds; + private final long clockTimeSeconds; - public ResourceUsage(long clockTime, int corePoints, int gpuPoints) { + public ResourceUsage(long clockTime, int corePoints, int gpuPoints) { - if (clockTime < 1) { - clockTime = 1; - } + if (clockTime < 1) { + clockTime = 1; + } - long coreTime = (long) (clockTime * (corePoints / 100f)); - if (coreTime < 1) { - coreTime = 1; - } + long coreTime = (long) (clockTime * (corePoints / 100f)); + if (coreTime < 1) { + coreTime = 1; + } - long gpuTime = clockTime * gpuPoints; + long gpuTime = clockTime * gpuPoints; - clockTimeSeconds = clockTime; - coreTimeSeconds = coreTime; - gpuTimeSeconds = gpuTime; - } + clockTimeSeconds = clockTime; + coreTimeSeconds = coreTime; + gpuTimeSeconds = gpuTime; + } - public long getCoreTimeSeconds() { - return coreTimeSeconds; - } + public long getCoreTimeSeconds() { + return coreTimeSeconds; + } - public long getGpuTimeSeconds() { - return gpuTimeSeconds; - } + public long getGpuTimeSeconds() { + return gpuTimeSeconds; + } - public long getClockTimeSeconds() { - return clockTimeSeconds; - } + public long getClockTimeSeconds() { + return clockTimeSeconds; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/ServiceEntity.java b/cuebot/src/main/java/com/imageworks/spcue/ServiceEntity.java index 72b134f7d..1dfea70e3 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ServiceEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ServiceEntity.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import java.util.LinkedHashSet; @@ -24,53 +20,50 @@ import com.imageworks.spcue.dispatcher.Dispatcher; public class ServiceEntity extends Entity { - /** - * Determines if the service is threadable or not. - */ - public boolean threadable = false; - - /** - * Determines the default minimum cores per frame. - */ - public int minCores = Dispatcher.CORE_POINTS_RESERVED_DEFAULT; - - /** - * Determines the default minimum cores per frame. 0 indicates - * the feature is disabled. - */ - public int maxCores = 0; - - /** - * Determines the default minimum gpus per frame. - */ - public int minGpus = 0; - - /** - * Determines the default minimum gpus per frame. 0 indicates - * the feature is disabled. - */ - public int maxGpus = 0; - - /** - * Determines the default minimum memory per frame. - */ - public long minMemory = Dispatcher.MEM_SERVICE_RESERVED_DEFAULT; - - /** - * Determines the default minimum gpu per frame. - */ - public long minGpuMemory = Dispatcher.MEM_SERVICE_GPU_RESERVED_DEFAULT; - - /** - * Determines the default tags. - */ - public LinkedHashSet tags = new LinkedHashSet(); - - public int timeout = 0; - - public int timeout_llu = 0; - - public long minMemoryIncrease = Dispatcher.MINIMUM_MEMORY_INCREASE; + /** + * Determines if the service is threadable or not. + */ + public boolean threadable = false; -} + /** + * Determines the default minimum cores per frame. + */ + public int minCores = Dispatcher.CORE_POINTS_RESERVED_DEFAULT; + + /** + * Determines the default minimum cores per frame. 0 indicates the feature is disabled. + */ + public int maxCores = 0; + + /** + * Determines the default minimum gpus per frame. + */ + public int minGpus = 0; + + /** + * Determines the default minimum gpus per frame. 0 indicates the feature is disabled. + */ + public int maxGpus = 0; + + /** + * Determines the default minimum memory per frame. + */ + public long minMemory = Dispatcher.MEM_SERVICE_RESERVED_DEFAULT; + /** + * Determines the default minimum gpu per frame. + */ + public long minGpuMemory = Dispatcher.MEM_SERVICE_GPU_RESERVED_DEFAULT; + + /** + * Determines the default tags. + */ + public LinkedHashSet tags = new LinkedHashSet(); + + public int timeout = 0; + + public int timeout_llu = 0; + + public long minMemoryIncrease = Dispatcher.MINIMUM_MEMORY_INCREASE; + +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/ServiceOverrideEntity.java b/cuebot/src/main/java/com/imageworks/spcue/ServiceOverrideEntity.java index 21f6b40a2..2e7cca2b9 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ServiceOverrideEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ServiceOverrideEntity.java @@ -2,29 +2,24 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public class ServiceOverrideEntity extends ServiceEntity { - /** - * The show that wants to override the service. - */ - public String showId; + /** + * The show that wants to override the service. + */ + public String showId; } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/ShowEntity.java b/cuebot/src/main/java/com/imageworks/spcue/ShowEntity.java index 1d2f675e1..a7831deaf 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ShowEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ShowEntity.java @@ -2,35 +2,30 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public class ShowEntity extends Entity implements ShowInterface { - public boolean active; - public boolean paused; - public int defaultMinCores; - public int defaultMaxCores; - public int defaultMinGpus; - public int defaultMaxGpus; - public String[] commentMail; - - public String getShowId() { - return id; - } + public boolean active; + public boolean paused; + public int defaultMinCores; + public int defaultMaxCores; + public int defaultMinGpus; + public int defaultMaxGpus; + public String[] commentMail; + + public String getShowId() { + return id; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/ShowInterface.java b/cuebot/src/main/java/com/imageworks/spcue/ShowInterface.java index e3f20defa..a43aaf6af 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ShowInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ShowInterface.java @@ -2,25 +2,20 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public interface ShowInterface extends EntityInterface { - public String getShowId(); + public String getShowId(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/SortableShow.java b/cuebot/src/main/java/com/imageworks/spcue/SortableShow.java index 83798f079..9c8de3c55 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/SortableShow.java +++ b/cuebot/src/main/java/com/imageworks/spcue/SortableShow.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import java.util.HashSet; @@ -29,92 +25,89 @@ public class SortableShow implements Comparable { - private static final Logger logger = LogManager.getLogger(SortableShow.class); - - private String show; - private float tier; - - private Map failed = new ConcurrentHashMap(); - private Set failedAllocs = new HashSet(); - - public SortableShow(String show, float value) { - this.show = show; - this.tier = value; - } - - public String getShowId() { - return show; - } - - public float getValue() { - return tier; - } - - public boolean isSkipped(String tags, long cores, long memory) { - try { - if (failed.containsKey(tags)) { - long [] mark = failed.get(tags); - if (cores < mark[0]) { - logger.info("skipped due to not enough cores " + cores + " < " + mark[0]); - return true; - } - else if (memory < mark[1]) { - logger.info("skipped due to not enough memory " + memory + " < " + mark[1]); - return true; - } - } - return false; - } catch (Exception e ){ - logger.info("exception checking skipped: " + e); - return false; + private static final Logger logger = LogManager.getLogger(SortableShow.class); + + private String show; + private float tier; + + private Map failed = new ConcurrentHashMap(); + private Set failedAllocs = new HashSet(); + + public SortableShow(String show, float value) { + this.show = show; + this.tier = value; + } + + public String getShowId() { + return show; + } + + public float getValue() { + return tier; + } + + public boolean isSkipped(String tags, long cores, long memory) { + try { + if (failed.containsKey(tags)) { + long[] mark = failed.get(tags); + if (cores < mark[0]) { + logger.info("skipped due to not enough cores " + cores + " < " + mark[0]); + return true; + } else if (memory < mark[1]) { + logger.info("skipped due to not enough memory " + memory + " < " + mark[1]); + return true; } + } + return false; + } catch (Exception e) { + logger.info("exception checking skipped: " + e); + return false; } + } - public boolean isSkipped(AllocationInterface a) { - if (failedAllocs.contains(a)) { - return true; - } - return false; + public boolean isSkipped(AllocationInterface a) { + if (failedAllocs.contains(a)) { + return true; } + return false; + } - public void skip(String tags, long cores, long memory) { - if (tags != null) { - failed.put(tags, new long[] { cores, memory}); - } + public void skip(String tags, long cores, long memory) { + if (tags != null) { + failed.put(tags, new long[] {cores, memory}); } - - /** - * Adds an allocation that should not be - * booked on this show. - * - * @param Allocation - */ - public void skip(AllocationInterface a) { - synchronized (failedAllocs) { - failedAllocs.add(a); - } + } + + /** + * Adds an allocation that should not be booked on this show. + * + * @param Allocation + */ + public void skip(AllocationInterface a) { + synchronized (failedAllocs) { + failedAllocs.add(a); } - - @Override - public int compareTo(SortableShow o) { - return (int) ((this.tier * 100) - (o.getValue() * 100)); + } + + @Override + public int compareTo(SortableShow o) { + return (int) ((this.tier * 100) - (o.getValue() * 100)); + } + + @Override + public int hashCode() { + return show.hashCode(); + }; + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; } - - @Override - public int hashCode() { - return show.hashCode(); - }; - - @Override - public boolean equals(Object other) { - if (other == null) { - return false; - } - if (this.getClass() != other.getClass()) { - return false; - } - SortableShow that = (SortableShow) other; - return that.getShowId().equals(this.getShowId()); + if (this.getClass() != other.getClass()) { + return false; } + SortableShow that = (SortableShow) other; + return that.getShowId().equals(this.getShowId()); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/Source.java b/cuebot/src/main/java/com/imageworks/spcue/Source.java index 8f6d78ee3..e1188f219 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/Source.java +++ b/cuebot/src/main/java/com/imageworks/spcue/Source.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; /** @@ -24,36 +20,32 @@ */ public class Source { - public String source = "unknown"; - public String username = ""; - public String pid = ""; - public String host_kill = ""; - public String reason = ""; - - public Source() {} - - public Source(String source) { - this.source = source; - } - - public Source(String source, String username, String pid, String host_kill, String reason) { - this.source = source; - this.username = username; - this.pid = pid; - this.host_kill = host_kill; - this.reason = reason; - } - - public String getReason() { - return this.reason; - } - - public String toString() { - return "User: " + this.username + - ", Pid: " + this.pid + - ", Hostname: " + this.host_kill + - ", Reason: " + this.reason + - "\n" + this.source; - } + public String source = "unknown"; + public String username = ""; + public String pid = ""; + public String host_kill = ""; + public String reason = ""; + + public Source() {} + + public Source(String source) { + this.source = source; + } + + public Source(String source, String username, String pid, String host_kill, String reason) { + this.source = source; + this.username = username; + this.pid = pid; + this.host_kill = host_kill; + this.reason = reason; + } + + public String getReason() { + return this.reason; + } + + public String toString() { + return "User: " + this.username + ", Pid: " + this.pid + ", Hostname: " + this.host_kill + + ", Reason: " + this.reason + "\n" + this.source; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/SpcueRuntimeException.java b/cuebot/src/main/java/com/imageworks/spcue/SpcueRuntimeException.java index 7b42994c5..c3a6be1d6 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/SpcueRuntimeException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/SpcueRuntimeException.java @@ -2,45 +2,40 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; @SuppressWarnings("serial") public class SpcueRuntimeException extends RuntimeException { - public SpcueRuntimeException() { - super(); - // TODO Auto-generated constructor stub - } + public SpcueRuntimeException() { + super(); + // TODO Auto-generated constructor stub + } - public SpcueRuntimeException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public SpcueRuntimeException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public SpcueRuntimeException(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public SpcueRuntimeException(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public SpcueRuntimeException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public SpcueRuntimeException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/SpecBuilderException.java b/cuebot/src/main/java/com/imageworks/spcue/SpecBuilderException.java index 2d65d9efc..2fd234501 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/SpecBuilderException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/SpecBuilderException.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import org.springframework.core.NestedRuntimeException; @@ -24,15 +20,14 @@ @SuppressWarnings("serial") public class SpecBuilderException extends NestedRuntimeException { - public SpecBuilderException(String arg0) { - super(arg0); - // TODO Auto-generated constructor stub - } + public SpecBuilderException(String arg0) { + super(arg0); + // TODO Auto-generated constructor stub + } - public SpecBuilderException(String arg0, Throwable arg1) { - super(arg0, arg1); - // TODO Auto-generated constructor stub - } + public SpecBuilderException(String arg0, Throwable arg1) { + super(arg0, arg1); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/StrandedCores.java b/cuebot/src/main/java/com/imageworks/spcue/StrandedCores.java index caaa53cf6..3d2e819a6 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/StrandedCores.java +++ b/cuebot/src/main/java/com/imageworks/spcue/StrandedCores.java @@ -2,43 +2,38 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public final class StrandedCores { - /** - * The maximum time this object should be valid. - */ - private static final long MAX_AGE_MILLIS = 5000l; + /** + * The maximum time this object should be valid. + */ + private static final long MAX_AGE_MILLIS = 5000l; - private final int cores; - private final long expireTime = System.currentTimeMillis() + MAX_AGE_MILLIS; + private final int cores; + private final long expireTime = System.currentTimeMillis() + MAX_AGE_MILLIS; - public StrandedCores(int cores) { - this.cores = cores; - } + public StrandedCores(int cores) { + this.cores = cores; + } - public int getCores() { - return this.cores; - } + public int getCores() { + return this.cores; + } - public boolean isExpired() { - return System.currentTimeMillis() > expireTime; - } + public boolean isExpired() { + return System.currentTimeMillis() > expireTime; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/SubscriptionEntity.java b/cuebot/src/main/java/com/imageworks/spcue/SubscriptionEntity.java index 339f0ca91..f3b8d33ea 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/SubscriptionEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/SubscriptionEntity.java @@ -2,47 +2,42 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public class SubscriptionEntity extends Entity implements SubscriptionInterface { - public String showId; - public String allocationId; - public String facilityId; + public String showId; + public String allocationId; + public String facilityId; - public int size; - public int burst; + public int size; + public int burst; - public String getShowId() { - return showId; - } + public String getShowId() { + return showId; + } - public String getAllocationId() { - return allocationId; - } + public String getAllocationId() { + return allocationId; + } - public String getSubscriptionId() { - return id; - } + public String getSubscriptionId() { + return id; + } - @Override - public String getFacilityId() { - return facilityId; - } + @Override + public String getFacilityId() { + return facilityId; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/SubscriptionInterface.java b/cuebot/src/main/java/com/imageworks/spcue/SubscriptionInterface.java index 874ed7415..8b0bf2604 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/SubscriptionInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/SubscriptionInterface.java @@ -2,24 +2,19 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public interface SubscriptionInterface extends ShowInterface, AllocationInterface { - public String getSubscriptionId(); + public String getSubscriptionId(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/TaskEntity.java b/cuebot/src/main/java/com/imageworks/spcue/TaskEntity.java index 13a4fa020..0ae853737 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/TaskEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/TaskEntity.java @@ -1,65 +1,59 @@ - /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public class TaskEntity extends Entity implements TaskInterface { - public int minCoreUnits = 100; - public boolean isDefaultTask = false; - - public String shot; - public String showId; - public String deptId; - public String pointId; - - public TaskEntity() { } - - public TaskEntity(PointInterface c, String shot, int minCoreUnits) { - this.pointId = c.getPointId(); - this.shot = shot; - this.minCoreUnits = minCoreUnits; - } - - public TaskEntity(PointInterface c, String shot) { - this.pointId = c.getPointId(); - this.shot = shot; - } - - @Override - public String getDepartmentId() { - return deptId; - } - - @Override - public String getShowId() { - return showId; - } - - @Override - public String getTaskId() { - return id; - } - - @Override - public String getPointId() { - return pointId; - } + public int minCoreUnits = 100; + public boolean isDefaultTask = false; + + public String shot; + public String showId; + public String deptId; + public String pointId; + + public TaskEntity() {} + + public TaskEntity(PointInterface c, String shot, int minCoreUnits) { + this.pointId = c.getPointId(); + this.shot = shot; + this.minCoreUnits = minCoreUnits; + } + + public TaskEntity(PointInterface c, String shot) { + this.pointId = c.getPointId(); + this.shot = shot; + } + + @Override + public String getDepartmentId() { + return deptId; + } + + @Override + public String getShowId() { + return showId; + } + + @Override + public String getTaskId() { + return id; + } + + @Override + public String getPointId() { + return pointId; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/TaskInterface.java b/cuebot/src/main/java/com/imageworks/spcue/TaskInterface.java index d06fa0bfb..fc13f452f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/TaskInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/TaskInterface.java @@ -2,29 +2,24 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public interface TaskInterface extends EntityInterface, PointInterface { - /** - * Returns the unique Id of the task - * - * @return - */ - public String getTaskId(); + /** + * Returns the unique Id of the task + * + * @return + */ + public String getTaskId(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/ThreadStats.java b/cuebot/src/main/java/com/imageworks/spcue/ThreadStats.java index d1a2fbd2f..fd870aa75 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ThreadStats.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ThreadStats.java @@ -2,43 +2,38 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; public class ThreadStats { - private int threads; - private int avgFrameTime; + private int threads; + private int avgFrameTime; - public int getThreads() { - return threads; - } + public int getThreads() { + return threads; + } - public void setThreads(int threads) { - this.threads = threads; - } + public void setThreads(int threads) { + this.threads = threads; + } - public int getAvgFrameTime() { - return avgFrameTime; - } + public int getAvgFrameTime() { + return avgFrameTime; + } - public void setAvgFrameTime(int avgFrameTime) { - this.avgFrameTime = avgFrameTime; - } + public void setAvgFrameTime(int avgFrameTime) { + this.avgFrameTime = avgFrameTime; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/TrackitTaskDetail.java b/cuebot/src/main/java/com/imageworks/spcue/TrackitTaskDetail.java index 72dcd306b..cc664a322 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/TrackitTaskDetail.java +++ b/cuebot/src/main/java/com/imageworks/spcue/TrackitTaskDetail.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import java.sql.Date; @@ -26,15 +22,14 @@ */ public class TrackitTaskDetail { - public String show; - public String shot; - public String task; - public String status; - public Date startDate; - public Date endDate; - public String cgSup; - public int frameCount; - public int points; - public int weeks; + public String show; + public String shot; + public String task; + public String status; + public Date startDate; + public Date endDate; + public String cgSup; + public int frameCount; + public int points; + public int weeks; } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/VirtualProc.java b/cuebot/src/main/java/com/imageworks/spcue/VirtualProc.java index 2954fe662..c88b1b933 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/VirtualProc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/VirtualProc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue; import com.imageworks.spcue.dispatcher.Dispatcher; @@ -27,265 +23,252 @@ public class VirtualProc extends FrameEntity implements ProcInterface { - private static final Logger logger = LogManager.getLogger(VirtualProc.class); - - public String hostId; - public String allocationId; - public String frameId; - public String hostName; - public String os; - public byte[] childProcesses; - - public boolean canHandleNegativeCoresRequest; - public int coresReserved; - public long memoryReserved; - public long memoryUsed; - public long memoryMax; - public long virtualMemoryUsed; - public long virtualMemoryMax; - - public int gpusReserved; - public long gpuMemoryReserved; - public long gpuMemoryUsed; - public long gpuMemoryMax; - - public boolean unbooked; - public boolean usageRecorded = false; - public boolean isLocalDispatch = false; - - public String getProcId() { - return id; - } - - public String getHostId() { - return hostId; - } - - public String getAllocationId() { - return allocationId; - } - - public String getFrameId() { - return frameId; - } - - public String getName() { - return hostName; - } - - /** - * Build and return a proc in either fast or efficient mode. - * - * Efficient mode tries to assign one core per frame, but may upgrade the - * number of cores based on memory usage. - * - * Fast mode books all the idle cores on the the host at one time. + private static final Logger logger = LogManager.getLogger(VirtualProc.class); + + public String hostId; + public String allocationId; + public String frameId; + public String hostName; + public String os; + public byte[] childProcesses; + + public boolean canHandleNegativeCoresRequest; + public int coresReserved; + public long memoryReserved; + public long memoryUsed; + public long memoryMax; + public long virtualMemoryUsed; + public long virtualMemoryMax; + + public int gpusReserved; + public long gpuMemoryReserved; + public long gpuMemoryUsed; + public long gpuMemoryMax; + + public boolean unbooked; + public boolean usageRecorded = false; + public boolean isLocalDispatch = false; + + public String getProcId() { + return id; + } + + public String getHostId() { + return hostId; + } + + public String getAllocationId() { + return allocationId; + } + + public String getFrameId() { + return frameId; + } + + public String getName() { + return hostName; + } + + /** + * Build and return a proc in either fast or efficient mode. + * + * Efficient mode tries to assign one core per frame, but may upgrade the number of cores based on + * memory usage. + * + * Fast mode books all the idle cores on the the host at one time. + * + * @param host + * @param frame + * @return + */ + public static final VirtualProc build(DispatchHost host, DispatchFrame frame, + String... selfishServices) { + VirtualProc proc = new VirtualProc(); + proc.allocationId = host.getAllocationId(); + proc.hostId = host.getHostId(); + proc.frameId = null; + proc.layerId = frame.getLayerId(); + proc.jobId = frame.getJobId(); + proc.showId = frame.getShowId(); + proc.facilityId = frame.getFacilityId(); + proc.os = frame.os; + + proc.hostName = host.getName(); + proc.unbooked = false; + proc.isLocalDispatch = host.isLocalDispatch; + + proc.coresReserved = frame.minCores; + proc.memoryReserved = frame.getMinMemory(); + proc.gpusReserved = frame.minGpus; + proc.gpuMemoryReserved = frame.minGpuMemory; + + /* + * Frames that are announcing cores less than 100 are not multi-threaded so there is no reason + * for the frame to span more than a single core. * - * @param host - * @param frame - * @return + * If we are in "fast mode", we just book all the cores If the host is nimby, desktops are + * automatically fast mode. */ - public static final VirtualProc build(DispatchHost host, - DispatchFrame frame, - String... selfishServices) { - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.getAllocationId(); - proc.hostId = host.getHostId(); - proc.frameId = null; - proc.layerId = frame.getLayerId(); - proc.jobId = frame.getJobId(); - proc.showId = frame.getShowId(); - proc.facilityId = frame.getFacilityId(); - proc.os = frame.os; - - proc.hostName = host.getName(); - proc.unbooked = false; - proc.isLocalDispatch = host.isLocalDispatch; - - proc.coresReserved = frame.minCores; - proc.memoryReserved = frame.getMinMemory(); - proc.gpusReserved = frame.minGpus; - proc.gpuMemoryReserved = frame.minGpuMemory; - - /* - * Frames that are announcing cores less than 100 are not multi-threaded - * so there is no reason for the frame to span more than a single core. - * - * If we are in "fast mode", we just book all the cores If the host is - * nimby, desktops are automatically fast mode. - */ - - if (host.strandedCores > 0) { - proc.coresReserved = proc.coresReserved + host.strandedCores; - } - - proc.canHandleNegativeCoresRequest = host.canHandleNegativeCoresRequest(proc.coresReserved); - if (proc.coresReserved == 0) { - logger.debug("Reserving all cores"); - proc.coresReserved = host.cores; - } - else if (proc.coresReserved < 0) { - logger.debug("Reserving all cores minus " + proc.coresReserved); - proc.coresReserved = host.cores + proc.coresReserved; - } - else if (proc.coresReserved >= 100) { - - int originalCores = proc.coresReserved; - - /* - * wholeCores could be 0 if we have a fraction of a core, we can - * just throw a re - */ - int wholeCores = (int) (Math.floor(host.idleCores / 100.0)); - if (wholeCores == 0) { - throw new EntityException( - "The host had only a fraction of a core remaining " - + "but the frame required " + frame.minCores); - } + if (host.strandedCores > 0) { + proc.coresReserved = proc.coresReserved + host.strandedCores; + } - // if (host.threadMode == ThreadMode.Variable.value() && - // CueUtil.isDayTime()) { - if (host.threadMode == ThreadMode.ALL_VALUE) { - proc.coresReserved = wholeCores * 100; + proc.canHandleNegativeCoresRequest = host.canHandleNegativeCoresRequest(proc.coresReserved); + + if (proc.coresReserved == 0) { + logger.debug("Reserving all cores"); + proc.coresReserved = host.cores; + } else if (proc.coresReserved < 0) { + logger.debug("Reserving all cores minus " + proc.coresReserved); + proc.coresReserved = host.cores + proc.coresReserved; + } else if (proc.coresReserved >= 100) { + + int originalCores = proc.coresReserved; + + /* + * wholeCores could be 0 if we have a fraction of a core, we can just throw a re + */ + int wholeCores = (int) (Math.floor(host.idleCores / 100.0)); + if (wholeCores == 0) { + throw new EntityException("The host had only a fraction of a core remaining " + + "but the frame required " + frame.minCores); + } + + // if (host.threadMode == ThreadMode.Variable.value() && + // CueUtil.isDayTime()) { + if (host.threadMode == ThreadMode.ALL_VALUE) { + proc.coresReserved = wholeCores * 100; + } else { + if (frame.threadable) { + if (selfishServices != null && frame.services != null + && containsSelfishService(frame.services.split(","), selfishServices)) { + proc.coresReserved = wholeCores * 100; + } else { + if (host.idleMemory - frame.getMinMemory() <= Dispatcher.MEM_STRANDED_THRESHHOLD) { + proc.coresReserved = wholeCores * 100; } else { - if (frame.threadable) { - if (selfishServices != null && - frame.services != null && - containsSelfishService(frame.services.split(","), selfishServices)){ - proc.coresReserved = wholeCores * 100; - } - else { - if (host.idleMemory - frame.getMinMemory() - <= Dispatcher.MEM_STRANDED_THRESHHOLD) { - proc.coresReserved = wholeCores * 100; - } else { - proc.coresReserved = getCoreSpan(host, frame.getMinMemory()); - } - } - if (host.threadMode == ThreadMode.VARIABLE_VALUE - && proc.coresReserved <= 200) { - proc.coresReserved = 200; - if (proc.coresReserved > host.idleCores) { - // Do not allow threadable frame running on 1 core. - throw new JobDispatchException( - "Do not allow threadable frame running one core on a ThreadMode.Variable host."); - } - } - } - } - - /* - * Sanity checks to ensure coreUnits are not to high or to low. - */ - if (proc.coresReserved < 100) { - proc.coresReserved = 100; - } - - /* - * If the core value is changed it can never fall below the - * original. - */ - if (proc.coresReserved < originalCores) { - proc.coresReserved = originalCores; + proc.coresReserved = getCoreSpan(host, frame.getMinMemory()); } - - /* - * Check to ensure we haven't exceeded max cores. - */ - if (frame.maxCores > 0 && proc.coresReserved >= frame.maxCores) { - proc.coresReserved = frame.maxCores; - } - + } + if (host.threadMode == ThreadMode.VARIABLE_VALUE && proc.coresReserved <= 200) { + proc.coresReserved = 200; if (proc.coresReserved > host.idleCores) { - if (host.threadMode == ThreadMode.VARIABLE_VALUE - && frame.threadable && wholeCores == 1) { - throw new JobDispatchException( - "Do not allow threadable frame running one core on a ThreadMode.Variable host."); - } - proc.coresReserved = wholeCores * 100; + // Do not allow threadable frame running on 1 core. + throw new JobDispatchException( + "Do not allow threadable frame running one core on a ThreadMode.Variable host."); } + } } - - /* - * Don't thread non-threadable layers, no matter what people put for the - * number of cores. - */ - if (!frame.threadable && proc.coresReserved > 100) { - proc.coresReserved = 100; + } + + /* + * Sanity checks to ensure coreUnits are not to high or to low. + */ + if (proc.coresReserved < 100) { + proc.coresReserved = 100; + } + + /* + * If the core value is changed it can never fall below the original. + */ + if (proc.coresReserved < originalCores) { + proc.coresReserved = originalCores; + } + + /* + * Check to ensure we haven't exceeded max cores. + */ + if (frame.maxCores > 0 && proc.coresReserved >= frame.maxCores) { + proc.coresReserved = frame.maxCores; + } + + if (proc.coresReserved > host.idleCores) { + if (host.threadMode == ThreadMode.VARIABLE_VALUE && frame.threadable && wholeCores == 1) { + throw new JobDispatchException( + "Do not allow threadable frame running one core on a ThreadMode.Variable host."); } - - return proc; + proc.coresReserved = wholeCores * 100; + } } - private static final boolean containsSelfishService(String[] frameServices, String[] selfishServices) { - for (String frameService: frameServices){ - for (String selfishService: selfishServices) { - if (frameService.equals(selfishService)) { - return true; - } - } - } - return false; + /* + * Don't thread non-threadable layers, no matter what people put for the number of cores. + */ + if (!frame.threadable && proc.coresReserved > 100) { + proc.coresReserved = 100; } - public static final VirtualProc build(DispatchHost host, - DispatchFrame frame, LocalHostAssignment lja) { - - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.getAllocationId(); - proc.hostId = host.getHostId(); - proc.frameId = null; - proc.layerId = frame.getLayerId(); - proc.jobId = frame.getJobId(); - proc.showId = frame.getShowId(); - proc.facilityId = frame.getFacilityId(); - proc.os = frame.os; - - proc.hostName = host.getName(); - proc.unbooked = false; - proc.isLocalDispatch = host.isLocalDispatch; - - proc.coresReserved = lja.getThreads() * 100; - proc.memoryReserved = frame.getMinMemory(); - proc.gpusReserved = frame.minGpus; - proc.gpuMemoryReserved = frame.minGpuMemory; - - int wholeCores = (int) (Math.floor(host.idleCores / 100.0)); - if (wholeCores == 0) { - throw new EntityException( - "The host had only a fraction of a core remaining " - + "but the frame required " + frame.minCores); - } + return proc; + } - if (proc.coresReserved > host.idleCores) { - proc.coresReserved = wholeCores * 100; + private static final boolean containsSelfishService(String[] frameServices, + String[] selfishServices) { + for (String frameService : frameServices) { + for (String selfishService : selfishServices) { + if (frameService.equals(selfishService)) { + return true; } + } + } + return false; + } + + public static final VirtualProc build(DispatchHost host, DispatchFrame frame, + LocalHostAssignment lja) { + + VirtualProc proc = new VirtualProc(); + proc.allocationId = host.getAllocationId(); + proc.hostId = host.getHostId(); + proc.frameId = null; + proc.layerId = frame.getLayerId(); + proc.jobId = frame.getJobId(); + proc.showId = frame.getShowId(); + proc.facilityId = frame.getFacilityId(); + proc.os = frame.os; + + proc.hostName = host.getName(); + proc.unbooked = false; + proc.isLocalDispatch = host.isLocalDispatch; + + proc.coresReserved = lja.getThreads() * 100; + proc.memoryReserved = frame.getMinMemory(); + proc.gpusReserved = frame.minGpus; + proc.gpuMemoryReserved = frame.minGpuMemory; + + int wholeCores = (int) (Math.floor(host.idleCores / 100.0)); + if (wholeCores == 0) { + throw new EntityException("The host had only a fraction of a core remaining " + + "but the frame required " + frame.minCores); + } - return proc; - + if (proc.coresReserved > host.idleCores) { + proc.coresReserved = wholeCores * 100; } - /** - * Allocates additional cores when the frame is using more 50% more than a - * single cores worth of memory. - * - * @param host - * @param minMemory - * @return - */ - public static int getCoreSpan(DispatchHost host, long minMemory) { - int totalCores = (int) (Math.floor(host.cores / 100.0)); - int idleCores = (int) (Math.floor(host.idleCores / 100.0)); - if (idleCores < 1) { - return 100; - } + return proc; + + } + + /** + * Allocates additional cores when the frame is using more 50% more than a single cores worth of + * memory. + * + * @param host + * @param minMemory + * @return + */ + public static int getCoreSpan(DispatchHost host, long minMemory) { + int totalCores = (int) (Math.floor(host.cores / 100.0)); + int idleCores = (int) (Math.floor(host.idleCores / 100.0)); + if (idleCores < 1) { + return 100; + } - long memPerCore = host.idleMemory / totalCores; - double procs = minMemory / (double) memPerCore; - int reserveCores = (int) (Math.round(procs)) * 100; + long memPerCore = host.idleMemory / totalCores; + double procs = minMemory / (double) memPerCore; + int reserveCores = (int) (Math.round(procs)) * 100; - return reserveCores; - } + return reserveCores; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/config/AppConfig.java b/cuebot/src/main/java/com/imageworks/spcue/config/AppConfig.java index 7ed1668ce..d6c4b45fe 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/config/AppConfig.java +++ b/cuebot/src/main/java/com/imageworks/spcue/config/AppConfig.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.config; import com.imageworks.spcue.servlet.JobLaunchServlet; @@ -38,56 +34,58 @@ @Configuration @ImportResource({"classpath:conf/spring/applicationContext-dbEngine.xml", - "classpath:conf/spring/applicationContext-grpc.xml", - "classpath:conf/spring/applicationContext-grpcServer.xml", - "classpath:conf/spring/applicationContext-service.xml", - "classpath:conf/spring/applicationContext-jms.xml", - "classpath:conf/spring/applicationContext-criteria.xml"}) + "classpath:conf/spring/applicationContext-grpc.xml", + "classpath:conf/spring/applicationContext-grpcServer.xml", + "classpath:conf/spring/applicationContext-service.xml", + "classpath:conf/spring/applicationContext-jms.xml", + "classpath:conf/spring/applicationContext-criteria.xml"}) @EnableConfigurationProperties @PropertySource({"classpath:opencue.properties"}) public class AppConfig { - @Configuration - @Conditional(PostgresDatabaseCondition.class) - @ImportResource({"classpath:conf/spring/applicationContext-dao-postgres.xml"}) - static class PostgresEngineConfig {} + @Configuration + @Conditional(PostgresDatabaseCondition.class) + @ImportResource({"classpath:conf/spring/applicationContext-dao-postgres.xml"}) + static class PostgresEngineConfig { + } - @Bean - @Primary - @ConfigurationProperties(prefix="datasource.cue-data-source") - public DataSource cueDataSource() { - return DataSourceBuilder.create().build(); - } + @Bean + @Primary + @ConfigurationProperties(prefix = "datasource.cue-data-source") + public DataSource cueDataSource() { + return DataSourceBuilder.create().build(); + } - @Bean - public ServletRegistrationBean jobLaunchServlet() { - ServletRegistrationBean b = new ServletRegistrationBean<>(); - b.addUrlMappings("/launch"); - b.addInitParameter("contextConfigLocation", "classpath:conf/spring/jobLaunchServlet-servlet.xml"); - b.setServlet(new JobLaunchServlet()); - return b; - } + @Bean + public ServletRegistrationBean jobLaunchServlet() { + ServletRegistrationBean b = new ServletRegistrationBean<>(); + b.addUrlMappings("/launch"); + b.addInitParameter("contextConfigLocation", + "classpath:conf/spring/jobLaunchServlet-servlet.xml"); + b.setServlet(new JobLaunchServlet()); + return b; + } - @Bean - public ServletRegistrationBean healthCheckServlet() { - ServletRegistrationBean b = new ServletRegistrationBean<>(); - b.addUrlMappings("/health"); - b.addInitParameter("contextConfigLocation", "classpath:conf/spring/healthCheckServlet-servlet.xml"); - b.setServlet(new HealthCheckServlet()); - return b; - } + @Bean + public ServletRegistrationBean healthCheckServlet() { + ServletRegistrationBean b = new ServletRegistrationBean<>(); + b.addUrlMappings("/health"); + b.addInitParameter("contextConfigLocation", + "classpath:conf/spring/healthCheckServlet-servlet.xml"); + b.setServlet(new HealthCheckServlet()); + return b; + } - /** - * Registers the Prometheus MetricsServlet to expose metrics at /metrics endpoint - * - * @return A ServletRegistrationBean for MetricsServlet - */ - @Bean - public ServletRegistrationBean prometheusServer() { - ServletRegistrationBean b = new ServletRegistrationBean<>(); - b.addUrlMappings("/metrics"); - b.setServlet(new MetricsServlet()); - return b; - } + /** + * Registers the Prometheus MetricsServlet to expose metrics at /metrics endpoint + * + * @return A ServletRegistrationBean for MetricsServlet + */ + @Bean + public ServletRegistrationBean prometheusServer() { + ServletRegistrationBean b = new ServletRegistrationBean<>(); + b.addUrlMappings("/metrics"); + b.setServlet(new MetricsServlet()); + return b; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/config/DatabaseEngine.java b/cuebot/src/main/java/com/imageworks/spcue/config/DatabaseEngine.java index 0ae36a296..98fd478ed 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/config/DatabaseEngine.java +++ b/cuebot/src/main/java/com/imageworks/spcue/config/DatabaseEngine.java @@ -1,9 +1,9 @@ package com.imageworks.spcue.config; public enum DatabaseEngine { - POSTGRES; + POSTGRES; - public static DatabaseEngine fromEnv() { - return POSTGRES; - } + public static DatabaseEngine fromEnv() { + return POSTGRES; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/config/PostgresDatabaseCondition.java b/cuebot/src/main/java/com/imageworks/spcue/config/PostgresDatabaseCondition.java index f80ebfb03..aae40a11e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/config/PostgresDatabaseCondition.java +++ b/cuebot/src/main/java/com/imageworks/spcue/config/PostgresDatabaseCondition.java @@ -6,14 +6,14 @@ public class PostgresDatabaseCondition implements Condition { - @Override - public boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata) { - String dbEngine = System.getenv("CUEBOT_DB_ENGINE"); - if (dbEngine == null) { - return true; - } - DatabaseEngine selectedDatabaseEngine = DatabaseEngine.valueOf(dbEngine.toUpperCase()); - return selectedDatabaseEngine.equals(DatabaseEngine.POSTGRES); + @Override + public boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata) { + String dbEngine = System.getenv("CUEBOT_DB_ENGINE"); + if (dbEngine == null) { + return true; } + DatabaseEngine selectedDatabaseEngine = DatabaseEngine.valueOf(dbEngine.toUpperCase()); + return selectedDatabaseEngine.equals(DatabaseEngine.POSTGRES); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/ActionDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/ActionDao.java index 2d8d3ccc1..507fa19a8 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/ActionDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/ActionDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import java.util.List; @@ -27,13 +23,15 @@ public interface ActionDao { - void createAction(ActionEntity action); - void deleteAction(ActionInterface action); + void createAction(ActionEntity action); - ActionEntity getAction(String id); - ActionEntity getAction(ActionInterface action); - void updateAction(ActionEntity action); + void deleteAction(ActionInterface action); - List getActions(FilterInterface filter); -} + ActionEntity getAction(String id); + ActionEntity getAction(ActionInterface action); + + void updateAction(ActionEntity action); + + List getActions(FilterInterface filter); +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/AllocationDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/AllocationDao.java index b5a83745c..2b5a6846a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/AllocationDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/AllocationDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import com.imageworks.spcue.AllocationEntity; @@ -30,84 +26,81 @@ */ public interface AllocationDao { - /** - * returns an AllocationEntity from its unique ID - * - * @param id - * @return AllocationEntity - */ - AllocationEntity getAllocationEntity(String id); - - /** - * Return an AllocationEntity for the given facility and unique allocation - * name. - * - * @param name - * @return AllocationEntity - */ - AllocationEntity findAllocationEntity(String facility, String name); - - /** - * Return an AllocationEntity from its fully qualified name which should be - * formatted as facility.name. - * - * @param name - * @return - */ - AllocationEntity findAllocationEntity(String name); - - /** - * Creates a new allocation - * - * @param detail - */ - void insertAllocation(FacilityInterface facility, AllocationEntity detail); - - /** - * Deletes an allocation - * - * @param alloc - */ - void deleteAllocation(AllocationInterface alloc); - - /** - * Updates the name of the allocation. This method also updates all child - * host allocation tags so you'll need to run - * allocDao.recalculateTags(alloc) - * - * @param alloc - * @param name - */ - void updateAllocationName(AllocationInterface alloc, String name); - - /** - * Updates the allocation tag. All hosts in the allocation are retagged. - * - * @param a - * @param tag - */ - void updateAllocationTag(AllocationInterface a, String tag); - - /** - * Sets the default allocation, AKA where procs go first. - * - * @param a - */ - void setDefaultAllocation(AllocationInterface a); - - /** - * Returns the current default allocation. - * - * @return - */ - AllocationEntity getDefaultAllocationEntity(); - - /** - * Set the allocation as billable or not billble. - * - * @param alloc - * @param value - */ - void updateAllocationBillable(AllocationInterface alloc, boolean value); + /** + * returns an AllocationEntity from its unique ID + * + * @param id + * @return AllocationEntity + */ + AllocationEntity getAllocationEntity(String id); + + /** + * Return an AllocationEntity for the given facility and unique allocation name. + * + * @param name + * @return AllocationEntity + */ + AllocationEntity findAllocationEntity(String facility, String name); + + /** + * Return an AllocationEntity from its fully qualified name which should be formatted as + * facility.name. + * + * @param name + * @return + */ + AllocationEntity findAllocationEntity(String name); + + /** + * Creates a new allocation + * + * @param detail + */ + void insertAllocation(FacilityInterface facility, AllocationEntity detail); + + /** + * Deletes an allocation + * + * @param alloc + */ + void deleteAllocation(AllocationInterface alloc); + + /** + * Updates the name of the allocation. This method also updates all child host allocation tags so + * you'll need to run allocDao.recalculateTags(alloc) + * + * @param alloc + * @param name + */ + void updateAllocationName(AllocationInterface alloc, String name); + + /** + * Updates the allocation tag. All hosts in the allocation are retagged. + * + * @param a + * @param tag + */ + void updateAllocationTag(AllocationInterface a, String tag); + + /** + * Sets the default allocation, AKA where procs go first. + * + * @param a + */ + void setDefaultAllocation(AllocationInterface a); + + /** + * Returns the current default allocation. + * + * @return + */ + AllocationEntity getDefaultAllocationEntity(); + + /** + * Set the allocation as billable or not billble. + * + * @param alloc + * @param value + */ + void updateAllocationBillable(AllocationInterface alloc, boolean value); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/BookingDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/BookingDao.java index f3bb09915..4e2eedf5d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/BookingDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/BookingDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import java.util.List; @@ -29,191 +25,183 @@ public interface BookingDao { - /** - * Updates the maximum number of cores the given local - * host assignment should use. - * - * @param l - * @return - */ - boolean updateMaxCores(LocalHostAssignment l, int maxCoreUnits); - - /** - * Updates the maximum number of gpus the given local - * host assignment should use. - * - * @param l - * @return - */ - boolean updateMaxGpus(LocalHostAssignment l, int gpus); - - /** - * Updates the maximum amount of memory a given local host - * assignment should use. - * - * @param l - * @return - */ - boolean updateMaxMemory(LocalHostAssignment l, long maxMemory); - - /** - * Updates the maximum amount of gpu memory a given local host - * assignment should use. - * - * @param l - * @return - */ - boolean updateMaxGpuMemory(LocalHostAssignment l, long maxGpuMemory); - - /** - * Create a new LocalHostAssignment attached to the given job. - * - * @param host - * @param job - * @param lha - */ - void insertLocalHostAssignment(HostInterface host, JobInterface job, - LocalHostAssignment lha); - - /** - * Create a new LocalHostAssignment attached to the given layer. - * - * @param host - * @param layer - * @param lha - */ - void insertLocalHostAssignment(HostInterface host, LayerInterface layer, - LocalHostAssignment lha); - - /** - * Create a new LocalHostAssignment attached to the given frame. - * - * @param host - * @param frame - * @param lha - */ - void insertLocalHostAssignment(HostInterface host, FrameInterface frame, - LocalHostAssignment lha); - - /** - * Return the host + jobs local booking assignment properties. - * @param host - * @param job - * @return - */ - List getLocalJobAssignment(HostInterface host); - - /** - * Return the host + jobs local booking assignment properties. - * @param host - * @param job - * @return - */ - LocalHostAssignment getLocalJobAssignment(String id); - - /** - * Return the host + jobs local booking assignment properties. - * @param hostId - * @param jobId - * @return - */ - LocalHostAssignment getLocalJobAssignment(String hostId, String jobId); - - /** - * Return true if the host has a local job assignment. - * - * @param host - * @return - */ - boolean hasLocalJob(HostInterface host); - - /** - * Returns true if the host has an active local booking. - * - * @param host - * @return - */ - boolean hasActiveLocalJob(HostInterface host); - - /** - * Delete the given LocalHostAssignment. - * - * @param e - */ - boolean deleteLocalJobAssignment(LocalHostAssignment lha); - - /** - * Deactivate the given LocalHostAssignment. - * - * @param l - */ - boolean deactivate(LocalHostAssignment l); - - /** - * Return the difference between the number of assigned cores and - * the given coreUnits. - * - * @param l - * @param coreUnits - * @return - */ - int getCoreUsageDifference(LocalHostAssignment l, int coreUnits); - - /** - * Return the difference between the number of assigned gpus and - * the given gpuUnits. - * - * @param l - * @param gpuUnits - * @return - */ - int getGpuUsageDifference(LocalHostAssignment l, int gpuUnits); - - /** - * Allocate additional cores from the given host. - * - * @param h - * @param cores - * @return - */ - boolean allocateCoresFromHost(HostInterface h, int cores); - - /** - * Deallocate cores from the given host, returning them to its pool. - * - * @param h - * @param cores - * @return - */ - boolean deallocateCoresFromHost(HostInterface h, int cores); - - /** - * Allocate additional gpus from the given host. - * - * @param h - * @param gpus - * @return - */ - boolean allocateGpusFromHost(HostInterface h, int gpus); - - /** - * Deallocate gpu from the given host, returning them to its pool. - * - * @param h - * @param gpus - * @return - */ - boolean deallocateGpusFromHost(HostInterface h, int gpus); - - /** - * Return true if the Host has a resource deficit. A - * deficit can occur if there are more resources in use than the - * maximum allowed due to changes from the user. - * - * @param l - * @return - */ - boolean hasResourceDeficit(HostInterface host); + /** + * Updates the maximum number of cores the given local host assignment should use. + * + * @param l + * @return + */ + boolean updateMaxCores(LocalHostAssignment l, int maxCoreUnits); + + /** + * Updates the maximum number of gpus the given local host assignment should use. + * + * @param l + * @return + */ + boolean updateMaxGpus(LocalHostAssignment l, int gpus); + + /** + * Updates the maximum amount of memory a given local host assignment should use. + * + * @param l + * @return + */ + boolean updateMaxMemory(LocalHostAssignment l, long maxMemory); + + /** + * Updates the maximum amount of gpu memory a given local host assignment should use. + * + * @param l + * @return + */ + boolean updateMaxGpuMemory(LocalHostAssignment l, long maxGpuMemory); + + /** + * Create a new LocalHostAssignment attached to the given job. + * + * @param host + * @param job + * @param lha + */ + void insertLocalHostAssignment(HostInterface host, JobInterface job, LocalHostAssignment lha); + + /** + * Create a new LocalHostAssignment attached to the given layer. + * + * @param host + * @param layer + * @param lha + */ + void insertLocalHostAssignment(HostInterface host, LayerInterface layer, LocalHostAssignment lha); + + /** + * Create a new LocalHostAssignment attached to the given frame. + * + * @param host + * @param frame + * @param lha + */ + void insertLocalHostAssignment(HostInterface host, FrameInterface frame, LocalHostAssignment lha); + + /** + * Return the host + jobs local booking assignment properties. + * + * @param host + * @param job + * @return + */ + List getLocalJobAssignment(HostInterface host); + + /** + * Return the host + jobs local booking assignment properties. + * + * @param host + * @param job + * @return + */ + LocalHostAssignment getLocalJobAssignment(String id); + + /** + * Return the host + jobs local booking assignment properties. + * + * @param hostId + * @param jobId + * @return + */ + LocalHostAssignment getLocalJobAssignment(String hostId, String jobId); + + /** + * Return true if the host has a local job assignment. + * + * @param host + * @return + */ + boolean hasLocalJob(HostInterface host); + + /** + * Returns true if the host has an active local booking. + * + * @param host + * @return + */ + boolean hasActiveLocalJob(HostInterface host); + + /** + * Delete the given LocalHostAssignment. + * + * @param e + */ + boolean deleteLocalJobAssignment(LocalHostAssignment lha); + + /** + * Deactivate the given LocalHostAssignment. + * + * @param l + */ + boolean deactivate(LocalHostAssignment l); + + /** + * Return the difference between the number of assigned cores and the given coreUnits. + * + * @param l + * @param coreUnits + * @return + */ + int getCoreUsageDifference(LocalHostAssignment l, int coreUnits); + + /** + * Return the difference between the number of assigned gpus and the given gpuUnits. + * + * @param l + * @param gpuUnits + * @return + */ + int getGpuUsageDifference(LocalHostAssignment l, int gpuUnits); + + /** + * Allocate additional cores from the given host. + * + * @param h + * @param cores + * @return + */ + boolean allocateCoresFromHost(HostInterface h, int cores); + + /** + * Deallocate cores from the given host, returning them to its pool. + * + * @param h + * @param cores + * @return + */ + boolean deallocateCoresFromHost(HostInterface h, int cores); + + /** + * Allocate additional gpus from the given host. + * + * @param h + * @param gpus + * @return + */ + boolean allocateGpusFromHost(HostInterface h, int gpus); + + /** + * Deallocate gpu from the given host, returning them to its pool. + * + * @param h + * @param gpus + * @return + */ + boolean deallocateGpusFromHost(HostInterface h, int gpus); + + /** + * Return true if the Host has a resource deficit. A deficit can occur if there are more resources + * in use than the maximum allowed due to changes from the user. + * + * @param l + * @return + */ + boolean hasResourceDeficit(HostInterface host); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/CommentDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/CommentDao.java index 08bea59b1..e0b0acc51 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/CommentDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/CommentDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import com.imageworks.spcue.CommentDetail; @@ -27,79 +23,79 @@ public interface CommentDao { - /** - * deletes the specified comment. - * - * @param id - */ - public void deleteComment(String id); - - /** - * Deletes comments using host, user, and subject - * - * @param host - * @param user - * @param subject - * @return boolean: returns true if one or more comments where deleted - */ - public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, String subject); - - /** - * Get comments using host, user, and subject - * - * @param host - * @param user - * @param subject - * @return List - */ - public List getCommentsByHostUserAndSubject(HostInterface host, String user, String subject); - - /** - * Retrieves the specified comment. - * - * @param id - * @return - */ - public CommentDetail getCommentDetail(String id); - - /** - * Inserts a comment on a job - * - * @param job - * @param comment - */ - public void insertComment(JobInterface job, CommentDetail comment); - - /** - * Inserts a comment on a host - * - * @param host - * @param comment - */ - public void insertComment(HostInterface host, CommentDetail comment); - - /** - * Update specified comment - * - * @param comment - */ - public void updateComment(CommentDetail comment); - - /** - * Updates the specified comment's message field with the supplied value. - * - * @param id - * @param message - */ - public void updateCommentMessage(String id, String message); - - /** - * Update the specified comment's subject field with the supplied value. - * - * @param id - * @param subject - */ - public void updateCommentSubject(String id, String subject); + /** + * deletes the specified comment. + * + * @param id + */ + public void deleteComment(String id); + + /** + * Deletes comments using host, user, and subject + * + * @param host + * @param user + * @param subject + * @return boolean: returns true if one or more comments where deleted + */ + public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, String subject); + + /** + * Get comments using host, user, and subject + * + * @param host + * @param user + * @param subject + * @return List + */ + public List getCommentsByHostUserAndSubject(HostInterface host, String user, + String subject); + + /** + * Retrieves the specified comment. + * + * @param id + * @return + */ + public CommentDetail getCommentDetail(String id); + + /** + * Inserts a comment on a job + * + * @param job + * @param comment + */ + public void insertComment(JobInterface job, CommentDetail comment); + + /** + * Inserts a comment on a host + * + * @param host + * @param comment + */ + public void insertComment(HostInterface host, CommentDetail comment); + + /** + * Update specified comment + * + * @param comment + */ + public void updateComment(CommentDetail comment); + + /** + * Updates the specified comment's message field with the supplied value. + * + * @param id + * @param message + */ + public void updateCommentMessage(String id, String message); + + /** + * Update the specified comment's subject field with the supplied value. + * + * @param id + * @param subject + */ + public void updateCommentSubject(String id, String subject); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/DeedDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/DeedDao.java index b02b58222..cb2116884 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/DeedDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/DeedDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import java.util.List; @@ -27,50 +23,47 @@ public interface DeedDao { - /** - * Create a new deed to the host. - */ - DeedEntity insertDeed(OwnerEntity owner, HostInterface host); + /** + * Create a new deed to the host. + */ + DeedEntity insertDeed(OwnerEntity owner, HostInterface host); - /** - * Delete the given deed. Return true if a row was - * actually deleted, false if one was not. - * - * @param deed - * @return - */ - boolean deleteDeed(DeedEntity deed); + /** + * Delete the given deed. Return true if a row was actually deleted, false if one was not. + * + * @param deed + * @return + */ + boolean deleteDeed(DeedEntity deed); - /** - * Delete the given deed. Return true if a row was - * actually deleted, false if one was not. - * - * @param deed - * @return - */ - boolean deleteDeed(HostInterface host); + /** + * Delete the given deed. Return true if a row was actually deleted, false if one was not. + * + * @param deed + * @return + */ + boolean deleteDeed(HostInterface host); - /** - * Return the deed by its given id. - * - * @param id - * @return - */ - DeedEntity getDeed(String id); + /** + * Return the deed by its given id. + * + * @param id + * @return + */ + DeedEntity getDeed(String id); - /** - * Return all deed's from the given owner. - * - * @param owner - * @return - */ - List getDeeds(OwnerEntity owner); + /** + * Return all deed's from the given owner. + * + * @param owner + * @return + */ + List getDeeds(OwnerEntity owner); - /** - * - * - * @param owner - */ - void deleteDeeds(OwnerEntity owner); + /** + * + * + * @param owner + */ + void deleteDeeds(OwnerEntity owner); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/DepartmentDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/DepartmentDao.java index 2288983fb..de79d18cf 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/DepartmentDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/DepartmentDao.java @@ -2,79 +2,70 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import com.imageworks.spcue.DepartmentInterface; /** - * This DAO currently does double duty. It handles the creation, removal, and - * updating of Department and DepartmentConfig entries. + * This DAO currently does double duty. It handles the creation, removal, and updating of Department + * and DepartmentConfig entries. */ public interface DepartmentDao { - /** - * Finds a department by name. Department objects contain only a name and a - * unique ID. - * - * @param name - * @return Department - */ - public DepartmentInterface findDepartment(String name); + /** + * Finds a department by name. Department objects contain only a name and a unique ID. + * + * @param name + * @return Department + */ + public DepartmentInterface findDepartment(String name); - /** - * Finds a department by id. Department objects contain only a name and a - * unique ID. - * - * @param id - * @return Department - */ - public DepartmentInterface getDepartment(String id); + /** + * Finds a department by id. Department objects contain only a name and a unique ID. + * + * @param id + * @return Department + */ + public DepartmentInterface getDepartment(String id); - /** - * Returns the cue's default department. The default department is assigned - * to any job that falls within a group that doesn't have a department. - * Usually this is Unassigned. - * - * @return Department - */ - public DepartmentInterface getDefaultDepartment(); + /** + * Returns the cue's default department. The default department is assigned to any job that falls + * within a group that doesn't have a department. Usually this is Unassigned. + * + * @return Department + */ + public DepartmentInterface getDefaultDepartment(); - /** - * Returns true if the department exists - * - * @param name - * @return - */ - public boolean departmentExists(String name); + /** + * Returns true if the department exists + * + * @param name + * @return + */ + public boolean departmentExists(String name); - /** - * Inserts a new department record. Departments are only a name and a unique - * ID. - * - * @param name - */ - public void insertDepartment(String name); + /** + * Inserts a new department record. Departments are only a name and a unique ID. + * + * @param name + */ + public void insertDepartment(String name); - /** - * Removes the specified department. - * - * @param d - */ - public void deleteDepartment(DepartmentInterface d); + /** + * Removes the specified department. + * + * @param d + */ + public void deleteDepartment(DepartmentInterface d); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/DependDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/DependDao.java index 3840b6e8d..053c20cf2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/DependDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/DependDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import java.util.List; @@ -46,185 +42,171 @@ */ public interface DependDao { - /** - * Returns a LightweightDependency from its ID - * - * @param id - * @return LightweightDependency - */ - LightweightDependency getDepend(String id); - - /** - * Returns a LightweightDependency from its ID - * - * @param signature - * @return LightweightDependency - */ - LightweightDependency getDependBySignature(String s); - - /** - * Gets a list of LightweightDependenies that depend on - * the specified job - * - * @param job - * @return List - */ - List getWhatDependsOn(JobInterface job); - - /** - * Get a list of LightweightDependenies that depend on this job - * and are either intenral, external, or either. - * The depends returned can depend on any part of the job. - * - * @param job - * @param target - * @return - */ - List getWhatDependsOn(JobInterface job, DependTarget target); - - /** - * Gets a list of LightweightDependencies that depend on - * the specified layer - * - * @param job - * @param layer - * @return List - */ - List getWhatDependsOn(LayerInterface layer); - - /** - * Gets a list of LightweightDependencies that depend on - * the specified frame - * - * @param frame - * @return - */ - List getWhatDependsOn(FrameInterface frame); - - /** - * Deletes a dependency - * - * @param depend - */ - void deleteDepend(LightweightDependency depend); - - /** - * Returns a list of depends where the specified job is the depender. Passing a - * depend target will limit the results to either internal or external. This - * method returns active depends only. - * - * @param Job - * @param DependTarget - * @return List - */ - List getWhatThisDependsOn(JobInterface job, DependTarget target); - - /** - * Returns a list of depends the layer depends on. Passing in a depend - * target will limit the results to either internal, external or both. - * This method returns active depends only. - * - * @param Layer - * @return List - */ - List getWhatThisDependsOn(LayerInterface layer, DependTarget target); - - /** - * Returns a list of depends the frame depends on. Passing in a depend - * target will limit the results to either inernal, external, or both.This - * method returns active depends only. - * - * @param Frame - * @return List - */ - List getWhatThisDependsOn(FrameInterface frame, DependTarget target); - - /** - * Returns a list of dependencies where the supplied frame is the element - * being depended on. - * - * @param frame - * @param active - * @return - */ - List getWhatDependsOn(FrameInterface frame, boolean active); - - - /** - * - * @param layer - * @param active - * @return - */ - List getWhatDependsOn(LayerInterface layer, boolean active); - - /** - * Returns a list of child FrameByFrame dependencies - * - * @param depend - * @return - */ - List getChildDepends(LightweightDependency depend); - - void insertDepend(JobOnJob d); - - void insertDepend(JobOnLayer d); - - void insertDepend(JobOnFrame d); - - void insertDepend(LayerOnJob d); - - void insertDepend(LayerOnLayer d); - - void insertDepend(LayerOnFrame d); - - void insertDepend(FrameOnJob d); - - void insertDepend(FrameOnLayer d); - - void insertDepend(FrameByFrame d); - - void insertDepend(FrameOnFrame d); - - void insertDepend(PreviousFrame d); - - void updateFrameState(FrameInterface f); - - /** - * Increment the depend count for the specified frame. - * - * @param f - * @throws DependException if the depend count was not - * incremented. - */ - void incrementDependCount(FrameInterface f); - - /** - * Decrement the depend count for the specified frame. - * Return false if the depend count is already 0, true - * if the depend count was decremented. - * - * @param f - */ - boolean decrementDependCount(FrameInterface f); - - /** - * Returns true if this is the thread that set - * the depend to inactive. - * - * @param depend - * @return - */ - boolean setInactive(LightweightDependency depend); - - /** - * Sets a dependency as active. If the dependency is - * already active return false, otherwise return true. - * Currently this only works on FrameOnFrame and LayerOnLayer. - * - * @param depend - * @return true if this thread actually updated the row. - */ - boolean setActive(LightweightDependency depend); + /** + * Returns a LightweightDependency from its ID + * + * @param id + * @return LightweightDependency + */ + LightweightDependency getDepend(String id); + + /** + * Returns a LightweightDependency from its ID + * + * @param signature + * @return LightweightDependency + */ + LightweightDependency getDependBySignature(String s); + + /** + * Gets a list of LightweightDependenies that depend on the specified job + * + * @param job + * @return List + */ + List getWhatDependsOn(JobInterface job); + + /** + * Get a list of LightweightDependenies that depend on this job and are either intenral, external, + * or either. The depends returned can depend on any part of the job. + * + * @param job + * @param target + * @return + */ + List getWhatDependsOn(JobInterface job, DependTarget target); + + /** + * Gets a list of LightweightDependencies that depend on the specified layer + * + * @param job + * @param layer + * @return List + */ + List getWhatDependsOn(LayerInterface layer); + + /** + * Gets a list of LightweightDependencies that depend on the specified frame + * + * @param frame + * @return + */ + List getWhatDependsOn(FrameInterface frame); + + /** + * Deletes a dependency + * + * @param depend + */ + void deleteDepend(LightweightDependency depend); + + /** + * Returns a list of depends where the specified job is the depender. Passing a depend target will + * limit the results to either internal or external. This method returns active depends only. + * + * @param Job + * @param DependTarget + * @return List + */ + List getWhatThisDependsOn(JobInterface job, DependTarget target); + + /** + * Returns a list of depends the layer depends on. Passing in a depend target will limit the + * results to either internal, external or both. This method returns active depends only. + * + * @param Layer + * @return List + */ + List getWhatThisDependsOn(LayerInterface layer, DependTarget target); + + /** + * Returns a list of depends the frame depends on. Passing in a depend target will limit the + * results to either inernal, external, or both.This method returns active depends only. + * + * @param Frame + * @return List + */ + List getWhatThisDependsOn(FrameInterface frame, DependTarget target); + + /** + * Returns a list of dependencies where the supplied frame is the element being depended on. + * + * @param frame + * @param active + * @return + */ + List getWhatDependsOn(FrameInterface frame, boolean active); + + /** + * + * @param layer + * @param active + * @return + */ + List getWhatDependsOn(LayerInterface layer, boolean active); + + /** + * Returns a list of child FrameByFrame dependencies + * + * @param depend + * @return + */ + List getChildDepends(LightweightDependency depend); + + void insertDepend(JobOnJob d); + + void insertDepend(JobOnLayer d); + + void insertDepend(JobOnFrame d); + + void insertDepend(LayerOnJob d); + + void insertDepend(LayerOnLayer d); + + void insertDepend(LayerOnFrame d); + + void insertDepend(FrameOnJob d); + + void insertDepend(FrameOnLayer d); + + void insertDepend(FrameByFrame d); + + void insertDepend(FrameOnFrame d); + + void insertDepend(PreviousFrame d); + + void updateFrameState(FrameInterface f); + + /** + * Increment the depend count for the specified frame. + * + * @param f + * @throws DependException if the depend count was not incremented. + */ + void incrementDependCount(FrameInterface f); + + /** + * Decrement the depend count for the specified frame. Return false if the depend count is already + * 0, true if the depend count was decremented. + * + * @param f + */ + boolean decrementDependCount(FrameInterface f); + + /** + * Returns true if this is the thread that set the depend to inactive. + * + * @param depend + * @return + */ + boolean setInactive(LightweightDependency depend); + + /** + * Sets a dependency as active. If the dependency is already active return false, otherwise return + * true. Currently this only works on FrameOnFrame and LayerOnLayer. + * + * @param depend + * @return true if this thread actually updated the row. + */ + boolean setActive(LightweightDependency depend); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/DispatcherDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/DispatcherDao.java index 2ac3a91df..1cf0932b0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/DispatcherDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/DispatcherDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import java.util.List; @@ -32,170 +28,158 @@ import com.imageworks.spcue.VirtualProc; /** -* DispatcherDao provides DAO methods used by the DispatchService -*/ + * DispatcherDao provides DAO methods used by the DispatchService + */ public interface DispatcherDao { - /** - * Finds the next frame on the specified job that can utilize - * the free resources on the host. - * - * @param host - * @param job - * @return - */ - DispatchFrame findNextDispatchFrame(JobInterface job, DispatchHost host); - - /** - * Returns the next frame based on the supplied job - * - * @param job - * @param proc - * @return DispatchFrame - */ - DispatchFrame findNextDispatchFrame(JobInterface job, VirtualProc proc); - - /** - * Finds the next frame on the specified job that can utilize - * the free resources on the host. - * - * @param host - * @param job - * @return - */ - List findNextDispatchFrames(JobInterface job, DispatchHost host, int limit); - - /** - * Returns the next frame based on the supplied job - * - * @param job - * @param proc - * @return DispatchFrame - */ - List findNextDispatchFrames(JobInterface job, VirtualProc proc, int limit); - - /** - * Return a list of jobs which could use resources of the specified - * host. It does not consider show priority. - * - * @param host - * @param numJobs - * @return - */ - Set findDispatchJobsForAllShows(DispatchHost host, int numJobs); - - /** - * Return a list of jobs which could use resources of the specified - * host - * - * @param host - * @param numJobs - * @return - */ - Set findDispatchJobs(DispatchHost host, int numJobs); - - /** - * Return a list of jobs which could use resources of the specified - * host that are in the specified group. - * - * @param host - * @param numJobs - * @return - */ - Set findDispatchJobs(DispatchHost host, GroupInterface g); - - /** - * Finds an under proced job if one exists and returns it, - * otherwise it returns null. - * - * @param excludeJob - * @param proc - * @return - */ - boolean findUnderProcedJob(JobInterface excludeJob, VirtualProc proc); - - /** - * Returns true if there exists a higher priority job than the base job - * - * @param baseJob - * @param proc - * @return boolean - */ - boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc); - - /** - * Dispatch the given host to the specified show. Look for a max of numJobs. - * - * @param host - * @param show - * @param numJobs - * @return - */ - Set findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs); - - /** - * Find a list of local dispatch jobs. - * - * @param host - * @return - */ - Set findLocalDispatchJobs(DispatchHost host); - - /** - * Return a list of frames from the given layer. - * - * @param layer - * @param proc - * @param limit - * @return - */ - List findNextDispatchFrames(LayerInterface layer, VirtualProc proc, - int limit); - - /** - * Return a list of frames from the given layer. - * - * @param layer - * @param host - * @param limit - * @return - */ - List findNextDispatchFrames(LayerInterface layer, DispatchHost host, - int limit); - - /** - * Return Scheduling Mode selected - * - * @return - */ - SchedulingMode getSchedulingMode(); - - /** - * Set Scheduling Mode. - * - * @param schedulingMode - */ - void setSchedulingMode(SchedulingMode schedulingMode); - - /** - * - PRIORITY_ONLY: Sort by priority only - * - FIFO: Whether or not to enable FIFO scheduling in the same priority. - * - BALANCED: Use a rank formula that takes into account time waiting, and number - * of cores required: rank = priority + (100 * (1 - (job.cores/job.int_min_cores))) + age in days - */ - enum SchedulingMode { - PRIORITY_ONLY, - FIFO, - BALANCED - } - - /** - * Clear bookableShows cache - * - * @return - */ - void clearCache(); + /** + * Finds the next frame on the specified job that can utilize the free resources on the host. + * + * @param host + * @param job + * @return + */ + DispatchFrame findNextDispatchFrame(JobInterface job, DispatchHost host); + + /** + * Returns the next frame based on the supplied job + * + * @param job + * @param proc + * @return DispatchFrame + */ + DispatchFrame findNextDispatchFrame(JobInterface job, VirtualProc proc); + + /** + * Finds the next frame on the specified job that can utilize the free resources on the host. + * + * @param host + * @param job + * @return + */ + List findNextDispatchFrames(JobInterface job, DispatchHost host, int limit); + + /** + * Returns the next frame based on the supplied job + * + * @param job + * @param proc + * @return DispatchFrame + */ + List findNextDispatchFrames(JobInterface job, VirtualProc proc, int limit); + + /** + * Return a list of jobs which could use resources of the specified host. It does not consider + * show priority. + * + * @param host + * @param numJobs + * @return + */ + Set findDispatchJobsForAllShows(DispatchHost host, int numJobs); + + /** + * Return a list of jobs which could use resources of the specified host + * + * @param host + * @param numJobs + * @return + */ + Set findDispatchJobs(DispatchHost host, int numJobs); + + /** + * Return a list of jobs which could use resources of the specified host that are in the specified + * group. + * + * @param host + * @param numJobs + * @return + */ + Set findDispatchJobs(DispatchHost host, GroupInterface g); + + /** + * Finds an under proced job if one exists and returns it, otherwise it returns null. + * + * @param excludeJob + * @param proc + * @return + */ + boolean findUnderProcedJob(JobInterface excludeJob, VirtualProc proc); + + /** + * Returns true if there exists a higher priority job than the base job + * + * @param baseJob + * @param proc + * @return boolean + */ + boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc); + + /** + * Dispatch the given host to the specified show. Look for a max of numJobs. + * + * @param host + * @param show + * @param numJobs + * @return + */ + Set findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs); + + /** + * Find a list of local dispatch jobs. + * + * @param host + * @return + */ + Set findLocalDispatchJobs(DispatchHost host); + + /** + * Return a list of frames from the given layer. + * + * @param layer + * @param proc + * @param limit + * @return + */ + List findNextDispatchFrames(LayerInterface layer, VirtualProc proc, int limit); + + /** + * Return a list of frames from the given layer. + * + * @param layer + * @param host + * @param limit + * @return + */ + List findNextDispatchFrames(LayerInterface layer, DispatchHost host, int limit); + + /** + * Return Scheduling Mode selected + * + * @return + */ + SchedulingMode getSchedulingMode(); + + /** + * Set Scheduling Mode. + * + * @param schedulingMode + */ + void setSchedulingMode(SchedulingMode schedulingMode); + + /** + * - PRIORITY_ONLY: Sort by priority only - FIFO: Whether or not to enable FIFO scheduling in the + * same priority. - BALANCED: Use a rank formula that takes into account time waiting, and number + * of cores required: rank = priority + (100 * (1 - (job.cores/job.int_min_cores))) + age in days + */ + enum SchedulingMode { + PRIORITY_ONLY, FIFO, BALANCED + } + + /** + * Clear bookableShows cache + * + * @return + */ + void clearCache(); } - - - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/FacilityDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/FacilityDao.java index 90c65859c..0d269f672 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/FacilityDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/FacilityDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import com.imageworks.spcue.FacilityEntity; @@ -24,52 +20,51 @@ public interface FacilityDao { - /** - * Returns the default facility - * - * @return - */ - public FacilityInterface getDefaultFacility(); + /** + * Returns the default facility + * + * @return + */ + public FacilityInterface getDefaultFacility(); - /** - * Gets a facility by Id - * - * @param id - * @return - */ - public FacilityInterface getFacility(String id); + /** + * Gets a facility by Id + * + * @param id + * @return + */ + public FacilityInterface getFacility(String id); - /** - * Returns true if a facility exists - * - * @param name - * @return - */ - public boolean facilityExists(String name); + /** + * Returns true if a facility exists + * + * @param name + * @return + */ + public boolean facilityExists(String name); - /** - * Insert and return a facility. - * - * @param name - * @return - */ - public FacilityInterface insertFacility(FacilityEntity facility); + /** + * Insert and return a facility. + * + * @param name + * @return + */ + public FacilityInterface insertFacility(FacilityEntity facility); - /** - * Deletes a facility record, if possible. - * - * @param facility - * @return - */ - public int deleteFacility(FacilityInterface facility); + /** + * Deletes a facility record, if possible. + * + * @param facility + * @return + */ + public int deleteFacility(FacilityInterface facility); - /** - * Rename the specified facility. - * - * @param facility - * @param name - * @return - */ - int updateFacilityName(FacilityInterface facility, String name); + /** + * Rename the specified facility. + * + * @param facility + * @param name + * @return + */ + int updateFacilityName(FacilityInterface facility, String name); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/FilterDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/FilterDao.java index 61b65ff75..8de1c83fe 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/FilterDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/FilterDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import java.util.List; @@ -33,27 +29,32 @@ */ public interface FilterDao { - List getActiveFilters(ShowInterface show); - List getFilters(ShowInterface show); + List getActiveFilters(ShowInterface show); - void updateSetFilterEnabled(FilterInterface f, boolean enabled); - void updateSetFilterName(FilterInterface f, String name); - void updateSetFilterType(FilterInterface f, FilterType type); + List getFilters(ShowInterface show); - void updateSetFilterOrder(FilterInterface f, double order); + void updateSetFilterEnabled(FilterInterface f, boolean enabled); - void deleteFilter(FilterInterface f); - void insertFilter(FilterEntity f); + void updateSetFilterName(FilterInterface f, String name); - void reorderFilters(ShowInterface s); + void updateSetFilterType(FilterInterface f, FilterType type); - void lowerFilterOrder(FilterInterface f, int by); - void raiseFilterOrder(FilterInterface f, int by); + void updateSetFilterOrder(FilterInterface f, double order); - FilterEntity getFilter(String id); - FilterEntity getFilter(FilterInterface filter); - FilterEntity findFilter(ShowInterface show, String name); + void deleteFilter(FilterInterface f); + void insertFilter(FilterEntity f); -} + void reorderFilters(ShowInterface s); + + void lowerFilterOrder(FilterInterface f, int by); + + void raiseFilterOrder(FilterInterface f, int by); + FilterEntity getFilter(String id); + + FilterEntity getFilter(FilterInterface filter); + + FilterEntity findFilter(ShowInterface show, String name); + +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/FrameDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/FrameDao.java index 64a52e144..20ce02fbf 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/FrameDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/FrameDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import java.util.List; @@ -39,375 +35,357 @@ public interface FrameDao { - /** - * finds the frame in the job that used the lowest - * amount of memory - * - * @param job - * @return - */ - public FrameDetail findLowestMemoryFrame(JobInterface job); - - /** - * finds the frame in the job that used the highest - * amount of memory, - * - * @param job - * @return - */ - public FrameDetail findHighestMemoryFrame(JobInterface job); - - /** - * Returns the data for the shortest succeeded frame. - * - * @param job - * @return - */ - public FrameDetail findShortestFrame(JobInterface job); - - /** - * Returns the data for the longest succeeded frame. - * - * @param job - * @return - */ - public FrameDetail findLongestFrame(JobInterface job); - - /** - * Checks to see how many retries a frame has. If that number - * is greater than or equal to the jobs max retries, the frame - * is marked as dead. - * - * @param frame - */ - void checkRetries(FrameInterface frame); - - /** - * Batch inserts a frameSet of frames. - * - * @param frame - */ - void insertFrames(LayerDetail layer, List frames); - - /** - * Retrieve a FrameDetail from something that implements Frame - * - * @param frame - * @return FrameDetail - */ - FrameDetail getFrameDetail(FrameInterface frame); - - /** - * Retrieve a FrameDetail from its unique ID. - * - * @param id - * @return FrameDetail - */ - FrameDetail getFrameDetail(String id); - - /** - * - * @param job - * @param name - * @return - */ - FrameDetail findFrameDetail(JobInterface job, String name); - - /** - * Returns a minimal Frame from its ID - * - * @param id - * @return Frame - */ - FrameInterface getFrame(String id); - - /** - * Finds a minimal frame from its job and frame name - * - * @param job - * @param name - * @return Frame - */ - FrameInterface findFrame(JobInterface job, String name); - - /** - * Finds a minimal frame from its layer and number. - * - * @param job - * @param name - * @return Frame - */ - FrameInterface findFrame(LayerInterface layer, int number); - - /** - * Find a list of minimal frames from a job and FrameLookupRequest. - * - * @param job - * @param r - * @return List - */ - List findFrames(FrameSearchInterface r); - - /** - * Find a list of FrameDetail objects from a job and FrameLookupRequest. - * - * @param job - * @param r - * @return List - */ - List findFrameDetails(FrameSearchInterface r); - - /** - * Updates the specified frame's state. - * - * @param frame - * @param state - */ - boolean updateFrameState(FrameInterface frame, FrameState state); - - /** - * Updates a frame to indicate its now running. - * - * @param proc - * @param frame - * @return - */ - void updateFrameStarted(VirtualProc proc, FrameInterface frame); - - /** - * Updates a frame to the stopped state. The frame MUST be - * in the Running state to be stopped. - * - * @param proc - * @param frame - * @param report - */ - boolean updateFrameStopped(FrameInterface frame, FrameState state, int exitStatus); - - /** - * Updates a frame to the stopped state. The frame MUST be - * in the Running state to be stopped. - * - * @param frame - * @param state - * @param exitStatus - * @param maxRss - * @return - */ - boolean updateFrameStopped(FrameInterface frame, FrameState state, int exitStatus, - long maxRss); - - /** - * Sets a frame to an unreserved waiting state. - * - * @param frame - * @return - */ - boolean updateFrameCleared(FrameInterface frame); - /** - * Sets a frame exitStatus to EXIT_STATUS_MEMORY_FAILURE - * - * @param frame - * @return whether the frame has been updated - */ - boolean updateFrameMemoryError(FrameInterface frame); - - /** - * Sets a frame to an unreserved waiting state. - * - * @param frame - * @return - */ - boolean updateFrameHostDown(FrameInterface frame); - - /** - * Returns a DispatchFrame object from the frame's uinique ID. - * - * @param uuid - * @return DispatchFrame - */ - DispatchFrame getDispatchFrame(String uuid); - - /** - * Set the specified frame to the Waiting state and its - * depend count to 0. - * - * @param frame - */ - void markFrameAsWaiting(FrameInterface frame); - - /** - * If the specified frame has active dependencies, reset - * the dependency count and set the frame state to Depend - * - * @param frame - */ - void markFrameAsDepend(FrameInterface frame); - - /** - * Reverses the specified frame range. The revese layer implementation is - * is more intensive than other reorder operations because we look up - * the dispatch order for each frame and then switch them. - * - * @param layer - * @param frameSet - */ - public void reorderLayerReverse(LayerInterface layer, FrameSet frameSet); - - /** - * - * Reorders specified frames to the end of the dispatch order. - * This works by finding the frame with the highest dispatch - * value, and updating the specified frames with higher values. - * The rest of the frames in the layer are not touched. - * - * @param layer - * @param frameSet - */ - public void reorderFramesLast(LayerInterface layer, FrameSet frameSet); - - /** - * Reorders specified frames to the top of the dispatch order. - * This works by finding the frame with the lowest dispatch - * order and updating targeted frames with even lower dispatcher orders, - * negative numbers are allowed. - * - * @param layer - * @param frameSet - */ - public void reorderFramesFirst(LayerInterface layer, FrameSet frameSet); - - /** - * This would reorder frames so that it would render the specified - * sequence on a staggered frame range. The frame set must be - * a staggered range. - * - * @param layer - * @param frameSet - */ - public void staggerLayer(LayerInterface layer, String range, int stagger); - - /** - * Returns a list of Running frames that have not had a proc - * assigned to them in over 5 min. This can happen when an - * operation aborts due to a deadlock. - * - * @return - */ - List getOrphanedFrames(); - - /** - * Return a list of all frames that have positive dependency - * counts for the specified dependency. - * - * @param depend - * @return - */ - List getDependentFrames(LightweightDependency depend); - - /** - * Returns true if the frame is succeeded. - * - * @param f - * @return - */ - public boolean isFrameComplete(FrameInterface f); - - /** - * Attempts to fix the case where a proc is assigned to a frame - * but the frame is in the waiting state. - * - * @param proc - * @param frame - * @return - */ - boolean updateFrameFixed(VirtualProc proc, FrameInterface frame); - - /** - * Return a ResourceUsage object which repesents the amount - * of clock and core time the frame has used up until this point. - * - * @param f - * @return - */ - ResourceUsage getResourceUsage(FrameInterface f); - - /** - * Update memory usage values and LLU time for the given frame. The - * frame must be in the Running state. If the frame - * is locked by another thread, the process is aborted because - * we'll most likely get a new update one minute later. - * - * @param f - * @param maxRss - * @param rss - * @param lluTime - * @throws FrameReservationException if the frame is locked - * by another thread. - */ - void updateFrameMemoryUsageAndLluTime(FrameInterface f, long maxRss, long rss, long lluTime); - - /** - * Attempt to put a exclusive row lock on the given - * frame. The frame must be in the specified state. - * - * @param frame - * @param state - * @throws FrameReservationException if the frame changes state before - * the lock can be applied. - */ - void lockFrameForUpdate(FrameInterface frame, FrameState state); - - /** - * Return true if the specified frame is an orphan. - * - * @param frame - * @return - */ - boolean isOrphan(FrameInterface frame); - - /** - * Update a frame's checkpoint state status. - * - * @param frame - * @param state - * @return - */ - boolean updateFrameCheckpointState(FrameInterface frame, CheckpointState state); - - /** - * Return a list of checkpoints that have failed to report back in - * within a certain cutoff time. - * - * @param cutoffTime - * @return - */ - List getStaleCheckpoints(int cutoffTimeMs); - - /** - * Create a frame state display override. - * - * @param frameId String - * @param override FrameStateDisplayOverride - */ - void setFrameStateDisplayOverride(String frameId, - FrameStateDisplayOverride override); - - /** - * Get the frame overrides for a specific frame - * - * @param frameId - * @return List - */ - FrameStateDisplayOverrideSeq getFrameStateDisplayOverrides(String frameId); - - /** - * Update a frame override with new text/color - * - * @param frameId - * @param override FrameStateDisplayOverride - */ - void updateFrameStateDisplayOverride(String frameId, - FrameStateDisplayOverride override); + /** + * finds the frame in the job that used the lowest amount of memory + * + * @param job + * @return + */ + public FrameDetail findLowestMemoryFrame(JobInterface job); + + /** + * finds the frame in the job that used the highest amount of memory, + * + * @param job + * @return + */ + public FrameDetail findHighestMemoryFrame(JobInterface job); + + /** + * Returns the data for the shortest succeeded frame. + * + * @param job + * @return + */ + public FrameDetail findShortestFrame(JobInterface job); + + /** + * Returns the data for the longest succeeded frame. + * + * @param job + * @return + */ + public FrameDetail findLongestFrame(JobInterface job); + + /** + * Checks to see how many retries a frame has. If that number is greater than or equal to the jobs + * max retries, the frame is marked as dead. + * + * @param frame + */ + void checkRetries(FrameInterface frame); + + /** + * Batch inserts a frameSet of frames. + * + * @param frame + */ + void insertFrames(LayerDetail layer, List frames); + + /** + * Retrieve a FrameDetail from something that implements Frame + * + * @param frame + * @return FrameDetail + */ + FrameDetail getFrameDetail(FrameInterface frame); + + /** + * Retrieve a FrameDetail from its unique ID. + * + * @param id + * @return FrameDetail + */ + FrameDetail getFrameDetail(String id); + + /** + * + * @param job + * @param name + * @return + */ + FrameDetail findFrameDetail(JobInterface job, String name); + + /** + * Returns a minimal Frame from its ID + * + * @param id + * @return Frame + */ + FrameInterface getFrame(String id); + + /** + * Finds a minimal frame from its job and frame name + * + * @param job + * @param name + * @return Frame + */ + FrameInterface findFrame(JobInterface job, String name); + + /** + * Finds a minimal frame from its layer and number. + * + * @param job + * @param name + * @return Frame + */ + FrameInterface findFrame(LayerInterface layer, int number); + + /** + * Find a list of minimal frames from a job and FrameLookupRequest. + * + * @param job + * @param r + * @return List + */ + List findFrames(FrameSearchInterface r); + + /** + * Find a list of FrameDetail objects from a job and FrameLookupRequest. + * + * @param job + * @param r + * @return List + */ + List findFrameDetails(FrameSearchInterface r); + + /** + * Updates the specified frame's state. + * + * @param frame + * @param state + */ + boolean updateFrameState(FrameInterface frame, FrameState state); + + /** + * Updates a frame to indicate its now running. + * + * @param proc + * @param frame + * @return + */ + void updateFrameStarted(VirtualProc proc, FrameInterface frame); + + /** + * Updates a frame to the stopped state. The frame MUST be in the Running state to be stopped. + * + * @param proc + * @param frame + * @param report + */ + boolean updateFrameStopped(FrameInterface frame, FrameState state, int exitStatus); + + /** + * Updates a frame to the stopped state. The frame MUST be in the Running state to be stopped. + * + * @param frame + * @param state + * @param exitStatus + * @param maxRss + * @return + */ + boolean updateFrameStopped(FrameInterface frame, FrameState state, int exitStatus, long maxRss); + + /** + * Sets a frame to an unreserved waiting state. + * + * @param frame + * @return + */ + boolean updateFrameCleared(FrameInterface frame); + + /** + * Sets a frame exitStatus to EXIT_STATUS_MEMORY_FAILURE + * + * @param frame + * @return whether the frame has been updated + */ + boolean updateFrameMemoryError(FrameInterface frame); + + /** + * Sets a frame to an unreserved waiting state. + * + * @param frame + * @return + */ + boolean updateFrameHostDown(FrameInterface frame); + + /** + * Returns a DispatchFrame object from the frame's uinique ID. + * + * @param uuid + * @return DispatchFrame + */ + DispatchFrame getDispatchFrame(String uuid); + + /** + * Set the specified frame to the Waiting state and its depend count to 0. + * + * @param frame + */ + void markFrameAsWaiting(FrameInterface frame); + + /** + * If the specified frame has active dependencies, reset the dependency count and set the frame + * state to Depend + * + * @param frame + */ + void markFrameAsDepend(FrameInterface frame); + + /** + * Reverses the specified frame range. The revese layer implementation is is more intensive than + * other reorder operations because we look up the dispatch order for each frame and then switch + * them. + * + * @param layer + * @param frameSet + */ + public void reorderLayerReverse(LayerInterface layer, FrameSet frameSet); + + /** + * + * Reorders specified frames to the end of the dispatch order. This works by finding the frame + * with the highest dispatch value, and updating the specified frames with higher values. The rest + * of the frames in the layer are not touched. + * + * @param layer + * @param frameSet + */ + public void reorderFramesLast(LayerInterface layer, FrameSet frameSet); + + /** + * Reorders specified frames to the top of the dispatch order. This works by finding the frame + * with the lowest dispatch order and updating targeted frames with even lower dispatcher orders, + * negative numbers are allowed. + * + * @param layer + * @param frameSet + */ + public void reorderFramesFirst(LayerInterface layer, FrameSet frameSet); + + /** + * This would reorder frames so that it would render the specified sequence on a staggered frame + * range. The frame set must be a staggered range. + * + * @param layer + * @param frameSet + */ + public void staggerLayer(LayerInterface layer, String range, int stagger); + + /** + * Returns a list of Running frames that have not had a proc assigned to them in over 5 min. This + * can happen when an operation aborts due to a deadlock. + * + * @return + */ + List getOrphanedFrames(); + + /** + * Return a list of all frames that have positive dependency counts for the specified dependency. + * + * @param depend + * @return + */ + List getDependentFrames(LightweightDependency depend); + + /** + * Returns true if the frame is succeeded. + * + * @param f + * @return + */ + public boolean isFrameComplete(FrameInterface f); + + /** + * Attempts to fix the case where a proc is assigned to a frame but the frame is in the waiting + * state. + * + * @param proc + * @param frame + * @return + */ + boolean updateFrameFixed(VirtualProc proc, FrameInterface frame); + + /** + * Return a ResourceUsage object which repesents the amount of clock and core time the frame has + * used up until this point. + * + * @param f + * @return + */ + ResourceUsage getResourceUsage(FrameInterface f); + + /** + * Update memory usage values and LLU time for the given frame. The frame must be in the Running + * state. If the frame is locked by another thread, the process is aborted because we'll most + * likely get a new update one minute later. + * + * @param f + * @param maxRss + * @param rss + * @param lluTime + * @throws FrameReservationException if the frame is locked by another thread. + */ + void updateFrameMemoryUsageAndLluTime(FrameInterface f, long maxRss, long rss, long lluTime); + + /** + * Attempt to put a exclusive row lock on the given frame. The frame must be in the specified + * state. + * + * @param frame + * @param state + * @throws FrameReservationException if the frame changes state before the lock can be applied. + */ + void lockFrameForUpdate(FrameInterface frame, FrameState state); + + /** + * Return true if the specified frame is an orphan. + * + * @param frame + * @return + */ + boolean isOrphan(FrameInterface frame); + + /** + * Update a frame's checkpoint state status. + * + * @param frame + * @param state + * @return + */ + boolean updateFrameCheckpointState(FrameInterface frame, CheckpointState state); + + /** + * Return a list of checkpoints that have failed to report back in within a certain cutoff time. + * + * @param cutoffTime + * @return + */ + List getStaleCheckpoints(int cutoffTimeMs); + + /** + * Create a frame state display override. + * + * @param frameId String + * @param override FrameStateDisplayOverride + */ + void setFrameStateDisplayOverride(String frameId, FrameStateDisplayOverride override); + + /** + * Get the frame overrides for a specific frame + * + * @param frameId + * @return List + */ + FrameStateDisplayOverrideSeq getFrameStateDisplayOverrides(String frameId); + + /** + * Update a frame override with new text/color + * + * @param frameId + * @param override FrameStateDisplayOverride + */ + void updateFrameStateDisplayOverride(String frameId, FrameStateDisplayOverride override); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/GroupDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/GroupDao.java index 87cd950d0..cedae2516 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/GroupDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/GroupDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import java.util.List; @@ -34,205 +30,205 @@ */ public interface GroupDao { - /** - * returns the group from its unique id - * - * @param id - * @return - */ - GroupInterface getGroup(String id); - - /** - * returns a list of groups using their unique ids - * @param id - * @return - */ - List getGroups(List id); - - /** - * - * @param show - * @return - */ - GroupDetail getRootGroupDetail(ShowInterface show); - - /** - * Returns the show's root group. - * - * @param show - * @return - */ - String getRootGroupId(ShowInterface show); - - /** - * Insert group into specified parent - * - * @param group - */ - void insertGroup(GroupDetail group, GroupInterface parent); - - /** - * - * @param group - */ - void insertGroup(GroupDetail group); - - /** - * Updates the groups department. - * - * @param group - * @param dept - */ - void updateDepartment(GroupInterface group, DepartmentInterface dept); - - /** - * Removes the specified group. You cannot delete a group that contains - * jobs or other groups or the shows root folder. - * - * @param group - */ - void deleteGroup(GroupInterface group); - - /** - * Sets the group's new parent. Triggers will handle any recursive level - * changes. - * - * @param group - * @param parent - * - * @throws EntityModificationError throws this if the group is the top level group - * which cannot be parented to another group. - */ - void updateGroupParent(GroupInterface group, GroupInterface parent); - - /** - * Sets the maximum number of procs the group should be running. - * - * @param group - * @param value - */ - void updateDefaultJobMaxCores(GroupInterface group, int value); - - /** - * Sets the minimum number of procs the group should be running. - * - * @param group - * @param value - */ - void updateDefaultJobMinCores(GroupInterface group, int value); - - /** - * Sets the maximum number of cores for this group - * - * @param group - * @param value - */ - public void updateMaxCores(GroupInterface group, int value); - - /** - * Set the minimum number of cores for this group - * - * @param group - * @param value - */ - - public void updateMinCores(GroupInterface group, int value); - - /** - * Sets the maximum number of gpus the group should be running. - * - * @param group - * @param value - */ - void updateDefaultJobMaxGpus(GroupInterface group, int value); - - /** - * Sets the minimum number of gpus the group should be running. - * - * @param group - * @param value - */ - void updateDefaultJobMinGpus(GroupInterface group, int value); - - /** - * Sets the maximum number of gpus for this group - * - * @param group - * @param value - */ - public void updateMaxGpus(GroupInterface group, int value); - - /** - * Set the minimum number of gpus for this group - * - * @param group - * @param value - */ - - public void updateMinGpus(GroupInterface group, int value); - - /** - * Renames the group - * - * @param group - * @param value - */ - void updateName(GroupInterface group, String value); - - /** - * Updates a group's priority. - * - * @param group - * @param value - */ - void updateDefaultJobPriority(GroupInterface group, int value); - - /** - * Returns a full GroupDetail object from its unique id - * - * @param id - * @return - */ - GroupDetail getGroupDetail(String id); - - /** - * Returns a recursive list of a group's children - * - * @param group - * @return - */ - List getChildrenRecursive(GroupInterface group); - - /** - * - * Returns a list of a groups immediate children - * - * @param group - * @return - */ - List getChildren(GroupInterface group); - - /** - * Returns true if the group of the specified job is at or over its min proc - * - * @param job - * @return - */ - boolean isOverMinCores(JobInterface job); - - /** - * Returns true if the group is managed. - * - * @param group - * @return - */ - boolean isManaged(GroupInterface group); - - /** - * Return a GroupDetail for the specified job. - * @param job - * @return - */ - GroupDetail getGroupDetail(JobInterface job); + /** + * returns the group from its unique id + * + * @param id + * @return + */ + GroupInterface getGroup(String id); + + /** + * returns a list of groups using their unique ids + * + * @param id + * @return + */ + List getGroups(List id); + + /** + * + * @param show + * @return + */ + GroupDetail getRootGroupDetail(ShowInterface show); + + /** + * Returns the show's root group. + * + * @param show + * @return + */ + String getRootGroupId(ShowInterface show); + + /** + * Insert group into specified parent + * + * @param group + */ + void insertGroup(GroupDetail group, GroupInterface parent); + + /** + * + * @param group + */ + void insertGroup(GroupDetail group); + + /** + * Updates the groups department. + * + * @param group + * @param dept + */ + void updateDepartment(GroupInterface group, DepartmentInterface dept); + + /** + * Removes the specified group. You cannot delete a group that contains jobs or other groups or + * the shows root folder. + * + * @param group + */ + void deleteGroup(GroupInterface group); + + /** + * Sets the group's new parent. Triggers will handle any recursive level changes. + * + * @param group + * @param parent + * + * @throws EntityModificationError throws this if the group is the top level group which cannot be + * parented to another group. + */ + void updateGroupParent(GroupInterface group, GroupInterface parent); + + /** + * Sets the maximum number of procs the group should be running. + * + * @param group + * @param value + */ + void updateDefaultJobMaxCores(GroupInterface group, int value); + + /** + * Sets the minimum number of procs the group should be running. + * + * @param group + * @param value + */ + void updateDefaultJobMinCores(GroupInterface group, int value); + + /** + * Sets the maximum number of cores for this group + * + * @param group + * @param value + */ + public void updateMaxCores(GroupInterface group, int value); + + /** + * Set the minimum number of cores for this group + * + * @param group + * @param value + */ + + public void updateMinCores(GroupInterface group, int value); + + /** + * Sets the maximum number of gpus the group should be running. + * + * @param group + * @param value + */ + void updateDefaultJobMaxGpus(GroupInterface group, int value); + + /** + * Sets the minimum number of gpus the group should be running. + * + * @param group + * @param value + */ + void updateDefaultJobMinGpus(GroupInterface group, int value); + + /** + * Sets the maximum number of gpus for this group + * + * @param group + * @param value + */ + public void updateMaxGpus(GroupInterface group, int value); + + /** + * Set the minimum number of gpus for this group + * + * @param group + * @param value + */ + + public void updateMinGpus(GroupInterface group, int value); + + /** + * Renames the group + * + * @param group + * @param value + */ + void updateName(GroupInterface group, String value); + + /** + * Updates a group's priority. + * + * @param group + * @param value + */ + void updateDefaultJobPriority(GroupInterface group, int value); + + /** + * Returns a full GroupDetail object from its unique id + * + * @param id + * @return + */ + GroupDetail getGroupDetail(String id); + + /** + * Returns a recursive list of a group's children + * + * @param group + * @return + */ + List getChildrenRecursive(GroupInterface group); + + /** + * + * Returns a list of a groups immediate children + * + * @param group + * @return + */ + List getChildren(GroupInterface group); + + /** + * Returns true if the group of the specified job is at or over its min proc + * + * @param job + * @return + */ + boolean isOverMinCores(JobInterface job); + + /** + * Returns true if the group is managed. + * + * @param group + * @return + */ + boolean isManaged(GroupInterface group); + + /** + * Return a GroupDetail for the specified job. + * + * @param job + * @return + */ + GroupDetail getGroupDetail(JobInterface job); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/HistoricalDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/HistoricalDao.java index 4cae1c61d..907ca6ab1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/HistoricalDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/HistoricalDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import java.util.List; @@ -25,22 +21,19 @@ public interface HistoricalDao { - /** - * Return all jobs that have been finished longer than - * the specified cut off in hours. - * - * @param cutoffHours - * @return - */ - List getFinishedJobs(int cutoffHours); - - - /** - * Transfer a job from the live tables to the historical tables. - * - * @param job - */ - void transferJob(JobInterface job); + /** + * Return all jobs that have been finished longer than the specified cut off in hours. + * + * @param cutoffHours + * @return + */ + List getFinishedJobs(int cutoffHours); + + /** + * Transfer a job from the live tables to the historical tables. + * + * @param job + */ + void transferJob(JobInterface job); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/HostDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/HostDao.java index 94ba316b1..9af9bd8ca 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/HostDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/HostDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import java.sql.Timestamp; @@ -34,305 +30,293 @@ import com.imageworks.spcue.grpc.report.HostReport; import com.imageworks.spcue.grpc.report.RenderHost; - /** * HostDao contains all SQL queries pretaining to host records. */ public interface HostDao { - /** - * Attempt to obtain an exclusive lock on the host. If another thread alrady - * has the host locked, a ResourceReservationFailureException is thrown. - * - * @param host HostInterface - * @throws ResourceReservationFailureException when an exclusive lock cannot - * be made. - */ - void lockForUpdate(HostInterface host); - - /** - * returns true if the specified host id is locked - * - * @param host HostInterface - * @return Boolean - */ - boolean isHostLocked(HostInterface host); - - /** - * deletes the passed host - * - * @param host HostInterface object to delete - */ - void deleteHost(HostInterface host); - - /** - * deletes the down state hosts - */ - void deleteDownHosts(); - - /** - * updates a host with the passed hardware state - * - * @param host HostInterface - * @param state HardwareState - */ - void updateHostState(HostInterface host, HardwareState state); - - /** - * updates a host with the passed free temporary directory - * - * @param host - * @param freeTempDir - */ - void updateHostFreeTempDir(HostInterface host, Long freeTempDir); - - /** - * returns a full host detail - * - * @param host HostInterface - * @return HostDetail - */ - HostEntity getHostDetail(HostInterface host); - - /** - * returns full host detail - * - * @param id String - * @return HostEntity - */ - HostEntity getHostDetail(String id); - - /** - * returns full host detail - * - * @param name String - * @return HostEntity - */ - HostEntity findHostDetail(String name); - - /** - * Return a DispatchHost object from its unique host name - * - * @param fqdn String - * @return DispatchHost - */ - DispatchHost findDispatchHost(String fqdn); - - /** - * Return a dispatch host object by id - * - * @param id String - * @return DispatchHost - */ - DispatchHost getDispatchHost(String id); - - /** - * Returns a host object by name - * - * @param name String - * @return HostInterface - */ - HostInterface findHost(String name); - - /** - * Returns a host object by ID. - * - * @param id String - * @return HostInterface - */ - HostInterface getHost(String id); - - /** - * Return the host involved with the given LocalJobAssignment. - * - * @param l LocalHostAssignment - * @return HostInterface - */ - HostInterface getHost(LocalHostAssignment l); - - /** - * Inserts a render host and its supporting procs into an allocation. - * - * @param report RenderHost - * @param a AllocationInterface - * @param useLongNames boolean - */ - void insertRenderHost(RenderHost report, AllocationInterface a, boolean useLongNames); - - /** - * Checks to see if a render host exists by name and returns true if it - * does, false if it doesn't. - * - * @param hostname String - * @return boolean - */ - boolean hostExists(String hostname); - - /** - * Updates the host's lock state. Open, Locked, NimbyLocked. Records the - * source of the lock. - * - * @param host HostInterface - * @param state LockState - * @param source Source - */ - void updateHostLock(HostInterface host, LockState state, Source source); - - /** - * Sets the reboot when idle boolean to true or false. If true the cue will - * issue the reboot command to hosts that ping in idle then set the flag - * back to false. - * - * @param host HostInterface - * @param enabled boolean - */ - void updateHostRebootWhenIdle(HostInterface host, boolean enabled); - - /** - * Updates a host's allocation - * - * @param host HostInterface - * @param alloc AllocationInterface - */ - void updateHostSetAllocation(HostInterface host, AllocationInterface alloc); - - /** - * - * @param id String - * @param tag String - * @param type HostTagType - */ - void tagHost(String id, String tag, HostTagType type); - - /** - * - * @param host HostInterface - * @param tag String - * @param type HostTagType - */ - void tagHost(HostInterface host, String tag, HostTagType type); - - /** - * - * @param host HostInterface - * @param type HostTagType - */ - void removeTagsByType(HostInterface host, HostTagType type); - - /** - * removes a tag - * - * @param host HostInterface - * @param tag String - */ - void removeTag(HostInterface host, String tag); - - /** - * renames a tag from oldTag to newTag - * - * @param host HostInterface - * @param oldTag String - * @param newTag String - */ - void renameTag(HostInterface host, String oldTag, String newTag); - - /** - * You must run this AFTER you've changed any type of job tags. The reason - * this is not a trigger or something of that nature is because is an - * intense process. - * - * @param id String - */ - void recalcuateTags(final String id); - - /** - * - * @param host HostInterface - * @param mode ThreadMode - */ - void updateThreadMode(HostInterface host, ThreadMode mode); - - /** - * Update the specified host's hardware information. - * - * @param host HostInterface - * @param totalMemory long - * @param freeMemory long - * @param totalSwap long - * @param freeSwap long - * @param totalMcp long - * @param freeMcp long - * @param totalGpuMemory long - * @param freeGpuMemory long - * @param load int - * @param os String - */ - void updateHostStats(HostInterface host, - long totalMemory, long freeMemory, - long totalSwap, long freeSwap, - long totalMcp, long freeMcp, - long totalGpuMemory, long freeGpuMemory, - int load, Timestamp bootTime, String os); - - /** - * Return true if the HardwareState is Up, false if it is anything else. - * - * @param host HostInterface - * @return boolean - */ - boolean isHostUp(HostInterface host); - - /** - * Return the number of whole stranded cores on this host. The must have - * less than Dispacher.MEM_STRANDED_THRESHHOLD for the cores to be - * considered stranded. - * - * @param h HostInterface - * @return int - */ - int getStrandedCoreUnits(HostInterface h); - - /** - * Return the number of whole stranded gpus on this host. The must have - * less than Dispacher.MEM_STRANDED_THRESHHOLD for the gpus to be - * considered stranded. - * - * @param h HostInterface - * @return int - */ - int getStrandedGpus(HostInterface h); - - /** - * Return true if the host is preferring a particular show. - * - * @param h HostInterface - * @return boolean - */ - boolean isPreferShow(HostInterface h); - - /** - * Return true if the host is a NIMBY host. - * - * @param h HostInterface - * @return boolean - */ - boolean isNimbyHost(HostInterface h); - - /** - * Update the host's operating system setting. - * - * @param host HostInterface - * @param os String - */ - void updateHostOs(HostInterface host, String os); - - /** - * Update a host's resource pool using the latest host report. - * - * @param host HostInterface - * @param report HostReport - */ - void updateHostResources(HostInterface host, HostReport report); + /** + * Attempt to obtain an exclusive lock on the host. If another thread alrady has the host locked, + * a ResourceReservationFailureException is thrown. + * + * @param host HostInterface + * @throws ResourceReservationFailureException when an exclusive lock cannot be made. + */ + void lockForUpdate(HostInterface host); + + /** + * returns true if the specified host id is locked + * + * @param host HostInterface + * @return Boolean + */ + boolean isHostLocked(HostInterface host); + + /** + * deletes the passed host + * + * @param host HostInterface object to delete + */ + void deleteHost(HostInterface host); + + /** + * deletes the down state hosts + */ + void deleteDownHosts(); + + /** + * updates a host with the passed hardware state + * + * @param host HostInterface + * @param state HardwareState + */ + void updateHostState(HostInterface host, HardwareState state); + + /** + * updates a host with the passed free temporary directory + * + * @param host + * @param freeTempDir + */ + void updateHostFreeTempDir(HostInterface host, Long freeTempDir); + + /** + * returns a full host detail + * + * @param host HostInterface + * @return HostDetail + */ + HostEntity getHostDetail(HostInterface host); + + /** + * returns full host detail + * + * @param id String + * @return HostEntity + */ + HostEntity getHostDetail(String id); + + /** + * returns full host detail + * + * @param name String + * @return HostEntity + */ + HostEntity findHostDetail(String name); + + /** + * Return a DispatchHost object from its unique host name + * + * @param fqdn String + * @return DispatchHost + */ + DispatchHost findDispatchHost(String fqdn); + + /** + * Return a dispatch host object by id + * + * @param id String + * @return DispatchHost + */ + DispatchHost getDispatchHost(String id); + + /** + * Returns a host object by name + * + * @param name String + * @return HostInterface + */ + HostInterface findHost(String name); + + /** + * Returns a host object by ID. + * + * @param id String + * @return HostInterface + */ + HostInterface getHost(String id); + + /** + * Return the host involved with the given LocalJobAssignment. + * + * @param l LocalHostAssignment + * @return HostInterface + */ + HostInterface getHost(LocalHostAssignment l); + + /** + * Inserts a render host and its supporting procs into an allocation. + * + * @param report RenderHost + * @param a AllocationInterface + * @param useLongNames boolean + */ + void insertRenderHost(RenderHost report, AllocationInterface a, boolean useLongNames); + + /** + * Checks to see if a render host exists by name and returns true if it does, false if it doesn't. + * + * @param hostname String + * @return boolean + */ + boolean hostExists(String hostname); + + /** + * Updates the host's lock state. Open, Locked, NimbyLocked. Records the source of the lock. + * + * @param host HostInterface + * @param state LockState + * @param source Source + */ + void updateHostLock(HostInterface host, LockState state, Source source); + + /** + * Sets the reboot when idle boolean to true or false. If true the cue will issue the reboot + * command to hosts that ping in idle then set the flag back to false. + * + * @param host HostInterface + * @param enabled boolean + */ + void updateHostRebootWhenIdle(HostInterface host, boolean enabled); + + /** + * Updates a host's allocation + * + * @param host HostInterface + * @param alloc AllocationInterface + */ + void updateHostSetAllocation(HostInterface host, AllocationInterface alloc); + + /** + * + * @param id String + * @param tag String + * @param type HostTagType + */ + void tagHost(String id, String tag, HostTagType type); + + /** + * + * @param host HostInterface + * @param tag String + * @param type HostTagType + */ + void tagHost(HostInterface host, String tag, HostTagType type); + + /** + * + * @param host HostInterface + * @param type HostTagType + */ + void removeTagsByType(HostInterface host, HostTagType type); + + /** + * removes a tag + * + * @param host HostInterface + * @param tag String + */ + void removeTag(HostInterface host, String tag); + + /** + * renames a tag from oldTag to newTag + * + * @param host HostInterface + * @param oldTag String + * @param newTag String + */ + void renameTag(HostInterface host, String oldTag, String newTag); + + /** + * You must run this AFTER you've changed any type of job tags. The reason this is not a trigger + * or something of that nature is because is an intense process. + * + * @param id String + */ + void recalcuateTags(final String id); + + /** + * + * @param host HostInterface + * @param mode ThreadMode + */ + void updateThreadMode(HostInterface host, ThreadMode mode); + + /** + * Update the specified host's hardware information. + * + * @param host HostInterface + * @param totalMemory long + * @param freeMemory long + * @param totalSwap long + * @param freeSwap long + * @param totalMcp long + * @param freeMcp long + * @param totalGpuMemory long + * @param freeGpuMemory long + * @param load int + * @param os String + */ + void updateHostStats(HostInterface host, long totalMemory, long freeMemory, long totalSwap, + long freeSwap, long totalMcp, long freeMcp, long totalGpuMemory, long freeGpuMemory, int load, + Timestamp bootTime, String os); + + /** + * Return true if the HardwareState is Up, false if it is anything else. + * + * @param host HostInterface + * @return boolean + */ + boolean isHostUp(HostInterface host); + + /** + * Return the number of whole stranded cores on this host. The must have less than + * Dispacher.MEM_STRANDED_THRESHHOLD for the cores to be considered stranded. + * + * @param h HostInterface + * @return int + */ + int getStrandedCoreUnits(HostInterface h); + + /** + * Return the number of whole stranded gpus on this host. The must have less than + * Dispacher.MEM_STRANDED_THRESHHOLD for the gpus to be considered stranded. + * + * @param h HostInterface + * @return int + */ + int getStrandedGpus(HostInterface h); + + /** + * Return true if the host is preferring a particular show. + * + * @param h HostInterface + * @return boolean + */ + boolean isPreferShow(HostInterface h); + + /** + * Return true if the host is a NIMBY host. + * + * @param h HostInterface + * @return boolean + */ + boolean isNimbyHost(HostInterface h); + + /** + * Update the host's operating system setting. + * + * @param host HostInterface + * @param os String + */ + void updateHostOs(HostInterface host, String os); + + /** + * Update a host's resource pool using the latest host report. + * + * @param host HostInterface + * @param report HostReport + */ + void updateHostResources(HostInterface host, HostReport report); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/JobDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/JobDao.java index 6597c4b83..3fbfd2651 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/JobDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/JobDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import java.util.List; @@ -41,462 +37,444 @@ public interface JobDao { - /** - * Updates all jobs in the speficed group to the - * max cores value. - * - * @param g - * @param cores - */ - public void updateMaxCores(GroupInterface g, int cores); - - /** - * Updates all jobs in the specifid group to the - * min cores value. - * - * @param g - * @param cores - */ - public void updateMinCores(GroupInterface g, int cores); - - /** - * Updates all jobs in the speficed group to the - * max gpu value. - * - * @param g - * @param gpu - */ - public void updateMaxGpus(GroupInterface g, int gpus); - - /** - * Updates all jobs in the specifid group to the - * min gpu value. - * - * @param g - * @param gpu - */ - public void updateMinGpus(GroupInterface g, int gpus); - - /** - * Updates all jobs in the specified group to the - * set priority. - * - * @param g - * @param priority - */ - public void updatePriority(GroupInterface g, int priority); - - /** - * Updates a jobs parent group to specified group - * - * @param job - * @param group - */ - void updateParent(JobInterface job, GroupDetail group, Inherit[] inherit); - - /** - * Returns an execution summary for the specified job. - * - * @param job - * @return - */ - ExecutionSummary getExecutionSummary(JobInterface job); - - /** - * returns a FrameStateTotals object with all of the - * job's frame state totals. - * - * @param job - * @return - */ - FrameStateTotals getFrameStateTotals(JobInterface job); - - /** - * Returns a DispatchJob from its unique id - * - * @param uuid - * @return - */ - DispatchJob getDispatchJob(String uuid); - - /** - * Returns true if the job has no more frames that - * can possibly be dispatched. - * - * @param job - * @return - */ - boolean isJobComplete(JobInterface job); - - /** - * Inserts a JobDetail. The job will not be pending until its - * activated. - * - * @param j - */ - void insertJob(JobDetail j, JobLogUtil jobLogUtil); - - /** - * Finds a Job from its name. This method returns only - * the current running job. - * - * @param name - * @return - */ - JobInterface findJob(String name); - - /** - * Finds a JobDetail from its name. This method returns only - * the current running job. - * - * @param name - * @return - */ - JobDetail findJobDetail(String name); - - /** - * Gets a JobDetail from its unique ID - * - * @param id - * @return - */ - JobDetail getJobDetail(String id); - - /** - * Returns a job by its ID - * - * @param id - * @return - */ - JobInterface getJob(String id); - - /** - * Returns a list of jobs assigned to a specific task. - * - * @param idl - * @return - */ - List getJobs(TaskEntity t); - - /** - * Finds all the jobs in a show. - * - * @param show - * @return - */ - List findJobs(ShowInterface show); - - /** - * - * @param group - * @return - */ - List findJobs(GroupInterface group); - - /** - * Returns true if an active job with the specified name exists - * - * @param name - * @return - */ - boolean exists(String name); - - /** - * Deletes specified job from DB - * - * @param job - */ - void deleteJob(JobInterface job); - - /** - * Activate job in lauching state. - * - * @param job - */ - void activateJob(JobInterface job, JobState jobState); - - /** - * updates the state of a job with new job state - * - * @param job - * @param state - */ - void updateState(JobInterface job, JobState state); - - /** - * updates a job to the finished state. returns true - * if the job was updated - * - * @param job - */ - boolean updateJobFinished(JobInterface job); - - /** - * reteurns true if job is over its minimum proc - * - * @param job - * @return boolean - */ - boolean isOverMinCores(JobInterface job); - - /** - * returns true if a job has pending frames. - * - * @param job - * @return - */ - boolean hasPendingFrames(JobInterface job); - - /** - * returns true if job is over max procs - * - * @param job - * @return - */ - boolean isOverMaxCores(JobInterface job); - - /** - * returns true if job is at its max proc - * - * @param job - * @return - */ - boolean isAtMaxCores(JobInterface job); - - /** - * Return true if adding given core units to the job - * will set the job over its max core value. - * - * @param job - * @param coreUnits - * @return - */ - boolean isOverMaxCores(JobInterface job, int coreUnits); - - /** - * returns true if job is over max gpus - * - * @param job - * @return - */ - boolean isOverMaxGpus(JobInterface job); - - /** - * returns true if job is at its max gpus - * - * @param job - * @return - */ - boolean isAtMaxGpus(JobInterface job); - - /** - * Return true if adding given gpus to the job - * will set the job over its max gpus value. - * - * @param job - * @param gpus - * @return - */ - boolean isOverMaxGpus(JobInterface job, int gpus); - - /** - * sets the jobs new priority value - * - * @param j - * @param v - */ - void updatePriority(JobInterface j, int v); - - /** - * sets the jobs new min proc value - * - * @param j - * @param v - */ - void updateMinCores(JobInterface j, int v); - - /** - * sets the jobs new max proc value - * - * @param j - * @param v - */ - void updateMaxCores(JobInterface j, int v); - - /** - * sets the jobs new min gpu value - * - * @param j - * @param v - */ - void updateMinGpus(JobInterface j, int v); - - /** - * sets the jobs new max gpu value - * - * @param j - * @param v - */ - void updateMaxGpus(JobInterface j, int v); - - /** - * Update a job's paused state - * - * @param j - * @param b - */ - void updatePaused(JobInterface j, boolean b); - - /** - * Update a jobs auto-eat state - * - * @param j - * @param b - */ - void updateAutoEat(JobInterface j, boolean b); - - /** - * Updates the int_max_retries column with the value of - * max_retries. Checks to make sure max_retries - * is greater than 0 and less than or equal to - * MAX_FRAME_RETRIES - * - * @param Job - * @param max_retries - */ - void updateMaxFrameRetries(JobInterface j, int max_retries); - - /** - * Inserts a map into the job's env table - * - * - * @param job - * @param env - */ - void insertEnvironment(JobInterface job, Map env); - - /** - * Update jobs max RSS. Only updates if the passed in value - * is greater than the current value of int_max_rss - * - * @param job - * @param env - */ - void updateMaxRSS(JobInterface job, long maxRss); - - /** - * Inserts a key/value pair into the jobs env table - * - * @param job - * @param key - * @param value - */ - void insertEnvironment(JobInterface job, String key, String value); - - /** - * Grabs the job environment - * - * @param job - * @return - */ - Map getEnvironment(JobInterface job); - - /** - * Updates the job's log path in the DB. This doesn't touch the file - * system. - * - * @param job - * @param path - */ - public void updateLogPath(JobInterface job, String path); - - /** - * - * @param name - * @return - */ - public JobDetail findLastJob(String name); - - /** - * Returns true of the cue has some pending jobs - * - * @return - */ - public boolean cueHasPendingJobs(FacilityInterface f); - - /** - * Enables/disables autobooking for specified job. - * - * @param value - */ - public void enableAutoBooking(JobInterface job, boolean value); - - /** - * Enables/disables auto unbooking for specified job. - * - * @param job - * @param value - */ - void enableAutoUnBooking(JobInterface job, boolean value); - - /** - * Maps the post job to the specified job - * - * @param job - */ - void mapPostJob(BuildableJob job); - - /** - * Activates the specified job's post job - * - * @param job - */ - void activatePostJob(JobInterface job); - - /** - * Update all jobs in the specified group to the - * specified department. - * - * @param group - */ - void updateDepartment(GroupInterface group, DepartmentInterface dept); - - /** - * Update the specified job to the specified department. - * - * @param group - */ - void updateDepartment(JobInterface job, DepartmentInterface dept); - - /** - * Set the job's new parent. The job will automatically - * inherit all relevant settings from the group. - * - * @param job - * @param dest - */ - void updateParent(JobInterface job, GroupDetail dest); - - /** - * Update layer usage with processor time usage. - * This happens when the proc has completed or failed some work. - * - * @param proc - * @param newState - */ - void updateUsage(JobInterface job, ResourceUsage usage, int exitStatus); - - /** - * Returns true if the job is launching - * - * @param j - * @return - */ - boolean isLaunching(JobInterface j); - - void updateEmail(JobInterface job, String email); - - String getEmail(JobInterface job); + /** + * Updates all jobs in the speficed group to the max cores value. + * + * @param g + * @param cores + */ + public void updateMaxCores(GroupInterface g, int cores); + + /** + * Updates all jobs in the specifid group to the min cores value. + * + * @param g + * @param cores + */ + public void updateMinCores(GroupInterface g, int cores); + + /** + * Updates all jobs in the speficed group to the max gpu value. + * + * @param g + * @param gpu + */ + public void updateMaxGpus(GroupInterface g, int gpus); + + /** + * Updates all jobs in the specifid group to the min gpu value. + * + * @param g + * @param gpu + */ + public void updateMinGpus(GroupInterface g, int gpus); + + /** + * Updates all jobs in the specified group to the set priority. + * + * @param g + * @param priority + */ + public void updatePriority(GroupInterface g, int priority); + + /** + * Updates a jobs parent group to specified group + * + * @param job + * @param group + */ + void updateParent(JobInterface job, GroupDetail group, Inherit[] inherit); + + /** + * Returns an execution summary for the specified job. + * + * @param job + * @return + */ + ExecutionSummary getExecutionSummary(JobInterface job); + + /** + * returns a FrameStateTotals object with all of the job's frame state totals. + * + * @param job + * @return + */ + FrameStateTotals getFrameStateTotals(JobInterface job); + + /** + * Returns a DispatchJob from its unique id + * + * @param uuid + * @return + */ + DispatchJob getDispatchJob(String uuid); + + /** + * Returns true if the job has no more frames that can possibly be dispatched. + * + * @param job + * @return + */ + boolean isJobComplete(JobInterface job); + + /** + * Inserts a JobDetail. The job will not be pending until its activated. + * + * @param j + */ + void insertJob(JobDetail j, JobLogUtil jobLogUtil); + + /** + * Finds a Job from its name. This method returns only the current running job. + * + * @param name + * @return + */ + JobInterface findJob(String name); + + /** + * Finds a JobDetail from its name. This method returns only the current running job. + * + * @param name + * @return + */ + JobDetail findJobDetail(String name); + + /** + * Gets a JobDetail from its unique ID + * + * @param id + * @return + */ + JobDetail getJobDetail(String id); + + /** + * Returns a job by its ID + * + * @param id + * @return + */ + JobInterface getJob(String id); + + /** + * Returns a list of jobs assigned to a specific task. + * + * @param idl + * @return + */ + List getJobs(TaskEntity t); + + /** + * Finds all the jobs in a show. + * + * @param show + * @return + */ + List findJobs(ShowInterface show); + + /** + * + * @param group + * @return + */ + List findJobs(GroupInterface group); + + /** + * Returns true if an active job with the specified name exists + * + * @param name + * @return + */ + boolean exists(String name); + + /** + * Deletes specified job from DB + * + * @param job + */ + void deleteJob(JobInterface job); + + /** + * Activate job in lauching state. + * + * @param job + */ + void activateJob(JobInterface job, JobState jobState); + + /** + * updates the state of a job with new job state + * + * @param job + * @param state + */ + void updateState(JobInterface job, JobState state); + + /** + * updates a job to the finished state. returns true if the job was updated + * + * @param job + */ + boolean updateJobFinished(JobInterface job); + + /** + * reteurns true if job is over its minimum proc + * + * @param job + * @return boolean + */ + boolean isOverMinCores(JobInterface job); + + /** + * returns true if a job has pending frames. + * + * @param job + * @return + */ + boolean hasPendingFrames(JobInterface job); + + /** + * returns true if job is over max procs + * + * @param job + * @return + */ + boolean isOverMaxCores(JobInterface job); + + /** + * returns true if job is at its max proc + * + * @param job + * @return + */ + boolean isAtMaxCores(JobInterface job); + + /** + * Return true if adding given core units to the job will set the job over its max core value. + * + * @param job + * @param coreUnits + * @return + */ + boolean isOverMaxCores(JobInterface job, int coreUnits); + + /** + * returns true if job is over max gpus + * + * @param job + * @return + */ + boolean isOverMaxGpus(JobInterface job); + + /** + * returns true if job is at its max gpus + * + * @param job + * @return + */ + boolean isAtMaxGpus(JobInterface job); + + /** + * Return true if adding given gpus to the job will set the job over its max gpus value. + * + * @param job + * @param gpus + * @return + */ + boolean isOverMaxGpus(JobInterface job, int gpus); + + /** + * sets the jobs new priority value + * + * @param j + * @param v + */ + void updatePriority(JobInterface j, int v); + + /** + * sets the jobs new min proc value + * + * @param j + * @param v + */ + void updateMinCores(JobInterface j, int v); + + /** + * sets the jobs new max proc value + * + * @param j + * @param v + */ + void updateMaxCores(JobInterface j, int v); + + /** + * sets the jobs new min gpu value + * + * @param j + * @param v + */ + void updateMinGpus(JobInterface j, int v); + + /** + * sets the jobs new max gpu value + * + * @param j + * @param v + */ + void updateMaxGpus(JobInterface j, int v); + + /** + * Update a job's paused state + * + * @param j + * @param b + */ + void updatePaused(JobInterface j, boolean b); + + /** + * Update a jobs auto-eat state + * + * @param j + * @param b + */ + void updateAutoEat(JobInterface j, boolean b); + + /** + * Updates the int_max_retries column with the value of max_retries. Checks to make sure + * max_retries is greater than 0 and less than or equal to MAX_FRAME_RETRIES + * + * @param Job + * @param max_retries + */ + void updateMaxFrameRetries(JobInterface j, int max_retries); + + /** + * Inserts a map into the job's env table + * + * + * @param job + * @param env + */ + void insertEnvironment(JobInterface job, Map env); + + /** + * Update jobs max RSS. Only updates if the passed in value is greater than the current value of + * int_max_rss + * + * @param job + * @param env + */ + void updateMaxRSS(JobInterface job, long maxRss); + + /** + * Inserts a key/value pair into the jobs env table + * + * @param job + * @param key + * @param value + */ + void insertEnvironment(JobInterface job, String key, String value); + + /** + * Grabs the job environment + * + * @param job + * @return + */ + Map getEnvironment(JobInterface job); + + /** + * Updates the job's log path in the DB. This doesn't touch the file system. + * + * @param job + * @param path + */ + public void updateLogPath(JobInterface job, String path); + + /** + * + * @param name + * @return + */ + public JobDetail findLastJob(String name); + + /** + * Returns true of the cue has some pending jobs + * + * @return + */ + public boolean cueHasPendingJobs(FacilityInterface f); + + /** + * Enables/disables autobooking for specified job. + * + * @param value + */ + public void enableAutoBooking(JobInterface job, boolean value); + + /** + * Enables/disables auto unbooking for specified job. + * + * @param job + * @param value + */ + void enableAutoUnBooking(JobInterface job, boolean value); + + /** + * Maps the post job to the specified job + * + * @param job + */ + void mapPostJob(BuildableJob job); + + /** + * Activates the specified job's post job + * + * @param job + */ + void activatePostJob(JobInterface job); + + /** + * Update all jobs in the specified group to the specified department. + * + * @param group + */ + void updateDepartment(GroupInterface group, DepartmentInterface dept); + + /** + * Update the specified job to the specified department. + * + * @param group + */ + void updateDepartment(JobInterface job, DepartmentInterface dept); + + /** + * Set the job's new parent. The job will automatically inherit all relevant settings from the + * group. + * + * @param job + * @param dest + */ + void updateParent(JobInterface job, GroupDetail dest); + + /** + * Update layer usage with processor time usage. This happens when the proc has completed or + * failed some work. + * + * @param proc + * @param newState + */ + void updateUsage(JobInterface job, ResourceUsage usage, int exitStatus); + + /** + * Returns true if the job is launching + * + * @param j + * @return + */ + boolean isLaunching(JobInterface j); + + void updateEmail(JobInterface job, String email); + + String getEmail(JobInterface job); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/LayerDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/LayerDao.java index c4b07edf9..06d83737f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/LayerDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/LayerDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import java.util.List; @@ -35,437 +31,417 @@ public interface LayerDao { - /** - * - * @param layer - * @return - */ - public ExecutionSummary getExecutionSummary(LayerInterface layer); - - /** - * return the frame state totals for the specified layer - * - * @param layer - * @return - */ - public FrameStateTotals getFrameStateTotals(LayerInterface layer); - - /** - * returns a list of layers by job - * - * @param job - * @return - */ - public List getLayerDetails(JobInterface job); - - /** - * Returns true if supplied layer is complete. - * - * @param layer - * @return boolean - */ - boolean isLayerComplete(LayerInterface layer); - - /** - * Returns true if supplied layer is dispatchable. - * - * @param l - * @return boolean - */ - boolean isLayerDispatchable(LayerInterface l); - - /** - * Inserts a LayerDetail - * - * @param l - */ - void insertLayerDetail(LayerDetail l); - - /** - * gets a layer detail from an object that implements layer - * - * @param layer - * @return LayerDetail - */ - LayerDetail getLayerDetail(LayerInterface layer); - - /** - * get layer detail from the the unique id - * - * @param id - * @return - */ - LayerDetail getLayerDetail(String id); - - /** - * get a layer detail from the job and layer name - * - * @param job - * @param name - * @return - */ - LayerDetail findLayerDetail(JobInterface job, String name); - - /** - * Get a minimal layer from the layer id - * - * @param id - * @return - */ - LayerInterface getLayer(String id); - - /** - * Find a minimal layer from the job and layer name - * - * @param job - * @param name - * @return - */ - LayerInterface findLayer(JobInterface job, String name); - - /** - * update the number of min cores the layer requires - * - * @param layer - * @param val - */ - void updateLayerMinCores(LayerInterface layer, int val); - - - /** - * update the number of gpus the layer requires - * - * @param layer - * @param val - */ - void updateLayerMinGpus(LayerInterface layer, int val); - - /** - * update the amount of memory required by all subsequent - * running frames in the specified layer. - * - * @param layer - * @param val - */ - void updateLayerMinMemory(LayerInterface layer, long kb); - - /** - * update the amount of gpu memory in kb required by all subsequent - * running frames in the specified layer. - * - * @param layer - * @param val - */ - void updateLayerMinGpuMemory(LayerInterface layer, long val); - - /** - * Update a layer with new host tags. - * - * @param layer - * @param val - */ - void updateLayerTags(LayerInterface layer, Set tags); - - /** - * Insert a key/value pair into the layer environment - * - * @param layer - * @param key - * @param value - */ - void insertLayerEnvironment(LayerInterface layer, String key, String value); - - /** - * Insert a map key/value pairs into the layer environment - * - * @param layer - * @param env - */ - void insertLayerEnvironment(LayerInterface layer, Map env); - - /** - * Get the layer environment map - * - * @param layer - * @return - */ - Map getLayerEnvironment(LayerInterface layer); - - /** - * Updated the layers MaxRSS value. If force is true then the - * value is updated no matter what the current value is. If force - * is false, the value is only updated the val is greater than than - * the existing value. - * - * @param layer - * @param val - */ - void updateLayerMaxRSS(LayerInterface layer, long val, boolean force); - - /** - * Increases the value of the minimum memory when the supplied - * value is larger than the current value - * - * @param layer - * @param val - */ - void increaseLayerMinMemory(LayerInterface layer, long val); - - /** - * Increases the value of the minimum gpu when the supplied - * value is larger than the current value - * - * @param layer - * @param val - */ - void increaseLayerMinGpuMemory(LayerInterface layer, long val); - - /** - * Tries to find a max RSS value for layer in the specified job. The - * layer must have at least 25% of its pending frames completed - * for this to return a valid result. If the layer cannot be - * found then 0 is returned. - * - * @param job - * @param name - * @return - */ - long findPastMaxRSS(JobInterface job, String name); - - /** - * Returns a list of layers from the specified job. - * - * @param job - * @return - */ - public List getLayers(JobInterface job); - - /** - * Update all layers of the set type in specified job - * with the new tags. - * - * @param job - * @param tags - * @param type - */ - void updateTags(JobInterface job, String tags, LayerType type); - - /** - * Update all layers of the set type in the specified - * job with the new memory requirement. - * - * @param job - * @param mem - * @param type - */ - void updateMinMemory(JobInterface job, long mem, LayerType type); - - /** - * Update all layers of the set type in the specified - * job with the new gpu requirement. - * - * @param job - * @param mem - * @param type - */ - void updateMinGpuMemory(JobInterface job, long mem, LayerType type); - - /** - * Update all layers of the set type in the specified job - * with the new max cores requirement. - * - * @param job - * @param cores - * @param type - */ - void updateMaxCores(JobInterface job, int cores, LayerType type); - - /** - * Update all layers of the set type in the specified job - * with the new min cores requirement. - * - * @param job - * @param cores - * @param type - */ - void updateMinCores(JobInterface job, int cores, LayerType type); - - /** - * Update all layers of the set type in the specified job - * with the new min gpu requirement. - * - * @param job - * @param gpus - * @param type - */ - void updateMinGpus(JobInterface job, int gpus, LayerType type); - - /** - * Update a layer's max cores value, which limits how - * much threading can go on. - * - * @param layer - * @param threadable - */ - void updateThreadable(LayerInterface layer, boolean threadable); - - /** - * Update a layer's timeout value, which limits how - * much the frame can run on a host. - * - * @param layer - * @param timeout - */ - void updateTimeout(LayerInterface layer, int timeout); - - /** - * Update a layer's LLU timeout value, which limits how - * much the frame can run on a host without updates in the log file. - * - * @param layer - * @param timeout_llu - */ - void updateTimeoutLLU(LayerInterface layer, int timeout_llu); - - /** - * Lowers the minimum memory on a layer if the layer - * is using less memory and the currnet min memory is - * the dispatcher default. - * - * @param layer - * @param val - * @return - */ - boolean balanceLayerMinMemory(LayerInterface layer, long val); - - /** - * Appends a tag to the current set of tags. If the tag - * already exists then nothing happens. - * - * @param layer - * @param val - */ - void appendLayerTags(LayerInterface layer, String val); - - /** - * Returns true if the layer can be optimized to use - * util based on the specified criteria. - * - * @param l - * @param succeeded - * @param avg - * @return - */ - boolean isOptimizable(LayerInterface l, int succeeded, float avg); - - /** - * Update layer usage with processor time usage. - * This happens when the proc has completed or failed some work. - * - * @param layer - * @param newState - * @param exitStatus - */ - void updateUsage(LayerInterface layer, ResourceUsage usage, int exitStatus); - - /** - * Returns true of the layer is launching. - * - * @param l - * @return - */ - boolean isLaunching(LayerInterface l); - - /** - * Return true if the application running in the given layer - * is threadable. - * - * @param l - * @return - */ - boolean isThreadable(LayerInterface l); - - /** - * Enable/disable memory optimizer. - * - * @param layer - * @param state - */ - void enableMemoryOptimizer(LayerInterface layer, boolean state); - - /** - * Return a list of outputs mapped to the given layer. - * - * @param layer - * @return - */ - List getLayerOutputs(LayerInterface layer); - - /** - * Add a list of filespecs to the given layer's output table. - * - * @param layer - * @param specs - */ - void insertLayerOutput(LayerInterface layer, String spec); - - /** - * Return the thread stats for the given layer. - * - * @param layer - * @return - */ - List getThreadStats(LayerInterface layer); - - /** - * Set the layer's max cores value to the given int. The - * max cores value will not allow the dispatcher to - * book over the given number of cores. - * - * @param layer - * @param val - */ - void updateLayerMaxCores(LayerInterface layer, int val); - - /** - * Set the layer's max gpus value to the given int. The - * max gpu value will not allow the dispatcher to - * book over the given number of gpu. - * - * @param layer - * @param val - */ - void updateLayerMaxGpus(LayerInterface layer, int val); - - /** - * Add a limit to the given layer. - * - * @param layer - * @param limit_id - */ - void addLimit(LayerInterface layer, String limitId); - - /** - * Remove a limit to the given layer. - * - * @param layer - * @param limit_id - */ - void dropLimit(LayerInterface layer, String limitId); - - /** - * Return a list of limits on the layer. - * - * @param layer - */ - List getLimits(LayerInterface layer); - - /** - * Return a list of limit names on the layer. - * - * @param layer - */ - List getLimitNames(LayerInterface layer); + /** + * + * @param layer + * @return + */ + public ExecutionSummary getExecutionSummary(LayerInterface layer); + + /** + * return the frame state totals for the specified layer + * + * @param layer + * @return + */ + public FrameStateTotals getFrameStateTotals(LayerInterface layer); + + /** + * returns a list of layers by job + * + * @param job + * @return + */ + public List getLayerDetails(JobInterface job); + + /** + * Returns true if supplied layer is complete. + * + * @param layer + * @return boolean + */ + boolean isLayerComplete(LayerInterface layer); + + /** + * Returns true if supplied layer is dispatchable. + * + * @param l + * @return boolean + */ + boolean isLayerDispatchable(LayerInterface l); + + /** + * Inserts a LayerDetail + * + * @param l + */ + void insertLayerDetail(LayerDetail l); + + /** + * gets a layer detail from an object that implements layer + * + * @param layer + * @return LayerDetail + */ + LayerDetail getLayerDetail(LayerInterface layer); + + /** + * get layer detail from the the unique id + * + * @param id + * @return + */ + LayerDetail getLayerDetail(String id); + + /** + * get a layer detail from the job and layer name + * + * @param job + * @param name + * @return + */ + LayerDetail findLayerDetail(JobInterface job, String name); + + /** + * Get a minimal layer from the layer id + * + * @param id + * @return + */ + LayerInterface getLayer(String id); + + /** + * Find a minimal layer from the job and layer name + * + * @param job + * @param name + * @return + */ + LayerInterface findLayer(JobInterface job, String name); + + /** + * update the number of min cores the layer requires + * + * @param layer + * @param val + */ + void updateLayerMinCores(LayerInterface layer, int val); + + /** + * update the number of gpus the layer requires + * + * @param layer + * @param val + */ + void updateLayerMinGpus(LayerInterface layer, int val); + + /** + * update the amount of memory required by all subsequent running frames in the specified layer. + * + * @param layer + * @param val + */ + void updateLayerMinMemory(LayerInterface layer, long kb); + + /** + * update the amount of gpu memory in kb required by all subsequent running frames in the + * specified layer. + * + * @param layer + * @param val + */ + void updateLayerMinGpuMemory(LayerInterface layer, long val); + + /** + * Update a layer with new host tags. + * + * @param layer + * @param val + */ + void updateLayerTags(LayerInterface layer, Set tags); + + /** + * Insert a key/value pair into the layer environment + * + * @param layer + * @param key + * @param value + */ + void insertLayerEnvironment(LayerInterface layer, String key, String value); + + /** + * Insert a map key/value pairs into the layer environment + * + * @param layer + * @param env + */ + void insertLayerEnvironment(LayerInterface layer, Map env); + + /** + * Get the layer environment map + * + * @param layer + * @return + */ + Map getLayerEnvironment(LayerInterface layer); + + /** + * Updated the layers MaxRSS value. If force is true then the value is updated no matter what the + * current value is. If force is false, the value is only updated the val is greater than than the + * existing value. + * + * @param layer + * @param val + */ + void updateLayerMaxRSS(LayerInterface layer, long val, boolean force); + + /** + * Increases the value of the minimum memory when the supplied value is larger than the current + * value + * + * @param layer + * @param val + */ + void increaseLayerMinMemory(LayerInterface layer, long val); + + /** + * Increases the value of the minimum gpu when the supplied value is larger than the current value + * + * @param layer + * @param val + */ + void increaseLayerMinGpuMemory(LayerInterface layer, long val); + + /** + * Tries to find a max RSS value for layer in the specified job. The layer must have at least 25% + * of its pending frames completed for this to return a valid result. If the layer cannot be found + * then 0 is returned. + * + * @param job + * @param name + * @return + */ + long findPastMaxRSS(JobInterface job, String name); + + /** + * Returns a list of layers from the specified job. + * + * @param job + * @return + */ + public List getLayers(JobInterface job); + + /** + * Update all layers of the set type in specified job with the new tags. + * + * @param job + * @param tags + * @param type + */ + void updateTags(JobInterface job, String tags, LayerType type); + + /** + * Update all layers of the set type in the specified job with the new memory requirement. + * + * @param job + * @param mem + * @param type + */ + void updateMinMemory(JobInterface job, long mem, LayerType type); + + /** + * Update all layers of the set type in the specified job with the new gpu requirement. + * + * @param job + * @param mem + * @param type + */ + void updateMinGpuMemory(JobInterface job, long mem, LayerType type); + + /** + * Update all layers of the set type in the specified job with the new max cores requirement. + * + * @param job + * @param cores + * @param type + */ + void updateMaxCores(JobInterface job, int cores, LayerType type); + + /** + * Update all layers of the set type in the specified job with the new min cores requirement. + * + * @param job + * @param cores + * @param type + */ + void updateMinCores(JobInterface job, int cores, LayerType type); + + /** + * Update all layers of the set type in the specified job with the new min gpu requirement. + * + * @param job + * @param gpus + * @param type + */ + void updateMinGpus(JobInterface job, int gpus, LayerType type); + + /** + * Update a layer's max cores value, which limits how much threading can go on. + * + * @param layer + * @param threadable + */ + void updateThreadable(LayerInterface layer, boolean threadable); + + /** + * Update a layer's timeout value, which limits how much the frame can run on a host. + * + * @param layer + * @param timeout + */ + void updateTimeout(LayerInterface layer, int timeout); + + /** + * Update a layer's LLU timeout value, which limits how much the frame can run on a host without + * updates in the log file. + * + * @param layer + * @param timeout_llu + */ + void updateTimeoutLLU(LayerInterface layer, int timeout_llu); + + /** + * Lowers the minimum memory on a layer if the layer is using less memory and the currnet min + * memory is the dispatcher default. + * + * @param layer + * @param val + * @return + */ + boolean balanceLayerMinMemory(LayerInterface layer, long val); + + /** + * Appends a tag to the current set of tags. If the tag already exists then nothing happens. + * + * @param layer + * @param val + */ + void appendLayerTags(LayerInterface layer, String val); + + /** + * Returns true if the layer can be optimized to use util based on the specified criteria. + * + * @param l + * @param succeeded + * @param avg + * @return + */ + boolean isOptimizable(LayerInterface l, int succeeded, float avg); + + /** + * Update layer usage with processor time usage. This happens when the proc has completed or + * failed some work. + * + * @param layer + * @param newState + * @param exitStatus + */ + void updateUsage(LayerInterface layer, ResourceUsage usage, int exitStatus); + + /** + * Returns true of the layer is launching. + * + * @param l + * @return + */ + boolean isLaunching(LayerInterface l); + + /** + * Return true if the application running in the given layer is threadable. + * + * @param l + * @return + */ + boolean isThreadable(LayerInterface l); + + /** + * Enable/disable memory optimizer. + * + * @param layer + * @param state + */ + void enableMemoryOptimizer(LayerInterface layer, boolean state); + + /** + * Return a list of outputs mapped to the given layer. + * + * @param layer + * @return + */ + List getLayerOutputs(LayerInterface layer); + + /** + * Add a list of filespecs to the given layer's output table. + * + * @param layer + * @param specs + */ + void insertLayerOutput(LayerInterface layer, String spec); + + /** + * Return the thread stats for the given layer. + * + * @param layer + * @return + */ + List getThreadStats(LayerInterface layer); + + /** + * Set the layer's max cores value to the given int. The max cores value will not allow the + * dispatcher to book over the given number of cores. + * + * @param layer + * @param val + */ + void updateLayerMaxCores(LayerInterface layer, int val); + + /** + * Set the layer's max gpus value to the given int. The max gpu value will not allow the + * dispatcher to book over the given number of gpu. + * + * @param layer + * @param val + */ + void updateLayerMaxGpus(LayerInterface layer, int val); + + /** + * Add a limit to the given layer. + * + * @param layer + * @param limit_id + */ + void addLimit(LayerInterface layer, String limitId); + + /** + * Remove a limit to the given layer. + * + * @param layer + * @param limit_id + */ + void dropLimit(LayerInterface layer, String limitId); + + /** + * Return a list of limits on the layer. + * + * @param layer + */ + List getLimits(LayerInterface layer); + + /** + * Return a list of limit names on the layer. + * + * @param layer + */ + List getLimitNames(LayerInterface layer); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/LimitDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/LimitDao.java index c74d1d11b..686c35831 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/LimitDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/LimitDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import com.imageworks.spcue.LimitEntity; @@ -24,53 +20,53 @@ public interface LimitDao { - /** - * Insert and return a facility. - * - * @param limit - * @return - */ - public String createLimit(String name, int maxValue); + /** + * Insert and return a facility. + * + * @param limit + * @return + */ + public String createLimit(String name, int maxValue); - /** - * Deletes a limit record, if possible. - * - * @param limit - * @return - */ - public void deleteLimit(LimitInterface limit); + /** + * Deletes a limit record, if possible. + * + * @param limit + * @return + */ + public void deleteLimit(LimitInterface limit); - /** - * Find a limit by it's name - * - * @param name - * @return LimitEntity - */ - public LimitEntity findLimit(String name); + /** + * Find a limit by it's name + * + * @param name + * @return LimitEntity + */ + public LimitEntity findLimit(String name); - /** - * Gets a limit by Id - * - * @param id - * @return LimitEntity - */ - public LimitEntity getLimit(String id); + /** + * Gets a limit by Id + * + * @param id + * @return LimitEntity + */ + public LimitEntity getLimit(String id); - /** - * Set the specified limit's name. - * - * @param limit - * @param name - * @return - */ - public void setLimitName(LimitInterface limit, String name); + /** + * Set the specified limit's name. + * + * @param limit + * @param name + * @return + */ + public void setLimitName(LimitInterface limit, String name); - /** - * Set the specified limit's max value. - * - * @param limit - * @param value - * @return - */ - public void setMaxValue(LimitInterface limit, int value); + /** + * Set the specified limit's max value. + * + * @param limit + * @param value + * @return + */ + public void setMaxValue(LimitInterface limit, int value); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/MaintenanceDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/MaintenanceDao.java index 29e2faa57..6359ea936 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/MaintenanceDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/MaintenanceDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import com.imageworks.spcue.MaintenanceTask; @@ -28,38 +24,37 @@ */ public interface MaintenanceDao { - /** - * Set hosts to the down state that have not pinged in within 5 minutes and - * return the number hosts that failed the check. - * - * @return int - */ - int setUpHostsToDown(); - - /** - * Lock specified task - * - * @param task - * @return - */ - boolean lockTask(MaintenanceTask task); - - /** - * Locks a test for the specified number of minutes. No other thread will - * execute this task, even if the task is unlocked for N amount of time. - * - * @param task - * @param minutes - * @return - */ - public boolean lockTask(MaintenanceTask task, int minutes); - - /** - * Unlock specified task - * - * @param task - */ - void unlockTask(MaintenanceTask task); + /** + * Set hosts to the down state that have not pinged in within 5 minutes and return the number + * hosts that failed the check. + * + * @return int + */ + int setUpHostsToDown(); + + /** + * Lock specified task + * + * @param task + * @return + */ + boolean lockTask(MaintenanceTask task); + + /** + * Locks a test for the specified number of minutes. No other thread will execute this task, even + * if the task is unlocked for N amount of time. + * + * @param task + * @param minutes + * @return + */ + public boolean lockTask(MaintenanceTask task, int minutes); + + /** + * Unlock specified task + * + * @param task + */ + void unlockTask(MaintenanceTask task); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/MatcherDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/MatcherDao.java index ef1f9099f..0f5dd5928 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/MatcherDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/MatcherDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import java.util.List; @@ -27,13 +23,16 @@ public interface MatcherDao { - void insertMatcher(MatcherEntity matcher); - void deleteMatcher(MatcherInterface matcher); - void updateMatcher(MatcherEntity matcher); + void insertMatcher(MatcherEntity matcher); - MatcherEntity getMatcher(String id); - MatcherEntity getMatcher(MatcherInterface matcher); - List getMatchers(FilterInterface filter); + void deleteMatcher(MatcherInterface matcher); -} + void updateMatcher(MatcherEntity matcher); + + MatcherEntity getMatcher(String id); + MatcherEntity getMatcher(MatcherInterface matcher); + + List getMatchers(FilterInterface filter); + +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/NestedWhiteboardDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/NestedWhiteboardDao.java index 0b48b8006..7f137d670 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/NestedWhiteboardDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/NestedWhiteboardDao.java @@ -2,29 +2,23 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; - import com.imageworks.spcue.ShowInterface; import com.imageworks.spcue.grpc.host.NestedHostSeq; import com.imageworks.spcue.grpc.job.NestedGroup; - /** * A DAO for nested data structures being returned to the client. * @@ -32,20 +26,19 @@ */ public interface NestedWhiteboardDao { - /** - * returns a grouped whiteboard for specified show. - * - * @param show - * @return - */ - NestedGroup getJobWhiteboard(ShowInterface show); - - /** - * get a list of hosts - * - * @return List - */ - NestedHostSeq getHostWhiteboard(); + /** + * returns a grouped whiteboard for specified show. + * + * @param show + * @return + */ + NestedGroup getJobWhiteboard(ShowInterface show); + + /** + * get a list of hosts + * + * @return List + */ + NestedHostSeq getHostWhiteboard(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/OwnerDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/OwnerDao.java index fbbd7e7be..f12c1820a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/OwnerDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/OwnerDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import com.imageworks.spcue.Entity; @@ -26,57 +22,55 @@ public interface OwnerDao { - /** - * Return true if the given owner owns the particualar host. - * - * @param owner - * @param host - * @return - */ - boolean isOwner(OwnerEntity owner, HostInterface host); + /** + * Return true if the given owner owns the particualar host. + * + * @param owner + * @param host + * @return + */ + boolean isOwner(OwnerEntity owner, HostInterface host); - /** - * Get an owner record by ID. - * - * @param id - */ - OwnerEntity getOwner(String id); + /** + * Get an owner record by ID. + * + * @param id + */ + OwnerEntity getOwner(String id); - /** - * Return the owner of the given host. - * - * @param host - * @return - */ - OwnerEntity getOwner(HostInterface host); + /** + * Return the owner of the given host. + * + * @param host + * @return + */ + OwnerEntity getOwner(HostInterface host); - /** - * Return an owner record by name. - * - * @param name - */ - OwnerEntity findOwner(String name); + /** + * Return an owner record by name. + * + * @param name + */ + OwnerEntity findOwner(String name); - /** - * Delete the specified owner and all his/her deeds. - * Return true if the owner was actually deleted. - * False if not. - */ - boolean deleteOwner(Entity owner); + /** + * Delete the specified owner and all his/her deeds. Return true if the owner was actually + * deleted. False if not. + */ + boolean deleteOwner(Entity owner); - /** - * Insert a new owner record. - * - * @param owner - */ - void insertOwner(OwnerEntity owner, ShowInterface show); + /** + * Insert a new owner record. + * + * @param owner + */ + void insertOwner(OwnerEntity owner, ShowInterface show); - /** - * Set the owner's show. This can be null. - * - * @param owner - * @param show - */ - void updateShow(Entity owner, ShowInterface show); + /** + * Set the owner's show. This can be null. + * + * @param owner + * @param show + */ + void updateShow(Entity owner, ShowInterface show); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/PointDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/PointDao.java index af0deac92..a1afdcf34 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/PointDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/PointDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import java.util.List; @@ -29,104 +25,102 @@ public interface PointDao { - /** - * Inserts a render into the point table - * - * @param t - * @return - */ - void insertPointConf(PointDetail t); - - /** - * Inserts and returns an empty render point detail - * - * @param show - * @param dept - * @return - */ - PointDetail insertPointConf(ShowInterface show, DepartmentInterface dept); - - /** - * Returns true if the department is being managed by track-it. - * - * @param show - * @param dept - * @return - */ - boolean isManaged(ShowInterface show, DepartmentInterface dept); - - /** - * Returns true if a render point config already exists - * for the specified show and department - * - * @param show - * @param dept - * @return - */ - boolean pointConfExists(ShowInterface show, DepartmentInterface dept); - - /** - * Updates the number of cores managed by this department - * - * @param cdept - * @param cores - */ - void updateManagedCores(PointInterface cdept, int cores); - - /** - * Enables TI managed. - * - * @param p - * @param task - * @param cores - */ - void updateEnableManaged(PointInterface cdept, String task, int cores); - - /** - * Disables TI mananaged. - * - * @param p - */ - void updateDisableManaged(PointInterface cdept); - - /** - * Returns a list of all managed point configs. - * - * @return - */ - List getManagedPointConfs(); - - /** - * Returns a DepartmentConfigDetail by unique ID - * - * @param id - * @return - */ - PointDetail getPointConfDetail(String id); - - /** - * Returns a DepartmentConfigDetail using the specified show and department - * - * - * @param show - * @param dept - * @return - */ - PointDetail getPointConfigDetail(ShowInterface show, DepartmentInterface dept); - - /** - * Updates the time at which the point config was last updated. - * - * @param t - */ - void updatePointConfUpdateTime(PointInterface t); - - /** - * - * @param job - * @return - */ - boolean isOverMinCores(JobInterface job); + /** + * Inserts a render into the point table + * + * @param t + * @return + */ + void insertPointConf(PointDetail t); + + /** + * Inserts and returns an empty render point detail + * + * @param show + * @param dept + * @return + */ + PointDetail insertPointConf(ShowInterface show, DepartmentInterface dept); + + /** + * Returns true if the department is being managed by track-it. + * + * @param show + * @param dept + * @return + */ + boolean isManaged(ShowInterface show, DepartmentInterface dept); + + /** + * Returns true if a render point config already exists for the specified show and department + * + * @param show + * @param dept + * @return + */ + boolean pointConfExists(ShowInterface show, DepartmentInterface dept); + + /** + * Updates the number of cores managed by this department + * + * @param cdept + * @param cores + */ + void updateManagedCores(PointInterface cdept, int cores); + + /** + * Enables TI managed. + * + * @param p + * @param task + * @param cores + */ + void updateEnableManaged(PointInterface cdept, String task, int cores); + + /** + * Disables TI mananaged. + * + * @param p + */ + void updateDisableManaged(PointInterface cdept); + + /** + * Returns a list of all managed point configs. + * + * @return + */ + List getManagedPointConfs(); + + /** + * Returns a DepartmentConfigDetail by unique ID + * + * @param id + * @return + */ + PointDetail getPointConfDetail(String id); + + /** + * Returns a DepartmentConfigDetail using the specified show and department + * + * + * @param show + * @param dept + * @return + */ + PointDetail getPointConfigDetail(ShowInterface show, DepartmentInterface dept); + + /** + * Updates the time at which the point config was last updated. + * + * @param t + */ + void updatePointConfUpdateTime(PointInterface t); + + /** + * + * @param job + * @return + */ + boolean isOverMinCores(JobInterface job); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/ProcDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/ProcDao.java index dcdf8d097..221b4f99b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/ProcDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/ProcDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import java.util.List; @@ -38,278 +34,270 @@ */ public interface ProcDao { - /** - * Returns the amount of reserved memory a proc has - * - * @param proc - * @return - */ - - long getReservedMemory(ProcInterface proc); - - /** - * Returns the amount of reserved gpu memory a proc has - * - * @param proc - * @return - */ - - long getReservedGpuMemory(ProcInterface proc); - - /** - * Removes a little bit of reserved memory from every other running frame - * in order to give some to the target proc. - * - * @param targetProc - * @param targetMem - * @return - */ - boolean balanceUnderUtilizedProcs(ProcInterface targetProc, long targetMem); - - /** - * Increase a proc's reserved memory. - * - * @param id - * @param value - * @return - */ - boolean increaseReservedMemory(ProcInterface p, long value); - - /** - * Set a proc's reserved memory. - * - * @param id - * @param value - * @return - */ - void updateReservedMemory(ProcInterface p, long value); - - /** - * verifies the mapping bewtween a proc id and a frame id - * - * @param procid - * @param frameid - * @return - */ - boolean verifyRunningProc(String procid, String frameid); - - /** - * Creates a new virtual proc - * - * @param proc - */ - void insertVirtualProc(VirtualProc proc); - - /** - * Deletes an existing virtual proc - * - * @param proc - */ - boolean deleteVirtualProc(VirtualProc proc); - - /** - * Clears a virtual proc assignement. This keeps the proc - * around but sets pk_frame to null. This would normally - * happen after a frame completes and before the proc is - * dispatched again. - * - * @param proc - */ - boolean clearVirtualProcAssignment(ProcInterface proc); - - /** - * Clear a proc assignment by frame id. Return true - * if an assignment was cleared. - * - * @param frame - * @return - */ - boolean clearVirtualProcAssignment(FrameInterface frame); - - /** - * Updates an existing proc's assignment - * - * @param proc - */ - void updateVirtualProcAssignment(VirtualProc proc); - - /** - * Update a procs memory usage based on the given - * frame it should be running. - * - * @param proc - * @param usedKb - * @param maxKb - */ - void updateProcMemoryUsage(FrameInterface f, long rss, long maxRss, - long vsize, long maxVsize, long usedGpuMemory, - long maxUsedGpuMemory, long usedSwapMemory, - byte[] children); - - /** - * get aq virual proc from its unique id - * - * @param id - * @return - */ - VirtualProc getVirtualProc(String id); - - /** - * get a virtual proc from the frame its assigned to - * - * @param frame - * @return - */ - VirtualProc findVirtualProc(FrameInterface frame); - - /** - * gets a list of virtual procs from a FrameLookupRequest - * - * @param job - * @param req - * @return - */ - List findVirtualProcs(FrameSearchInterface s); - - /** - * get the list of procs from the host. - * - * @param host - * @return - */ - List findVirtualProcs(HostInterface host); - - /** - * find all procs booked on a specified layer - * - * @param layer - * @return - */ - List findVirtualProcs(LayerInterface layer); - - /** - * find all procs booked on specified job - * - * @param job - * @return - */ - List findVirtualProcs(JobInterface job); - - /** - * - * @return - */ - List findOrphanedVirtualProcs(); - - /** - * - * @return - */ - List findOrphanedVirtualProcs(int limit); - - /** - * Returns procs with a host in a particular hardware state. - * - * @param state - * @return - */ - public List findVirtualProcs(HardwareState state); - - /** - * Returns a list if procs using a ProcSearchInterface object. - * - * @param r - A ProcSearchInterface object - * @return a list of virtual procs - */ - List findVirtualProcs(ProcSearchInterface r); - - /** - * Unbooks a list of virtual procs using a batch query - * - * @param procs - * @return - */ - void unbookVirtualProcs(List procs); - - /** - * Unbooks a single virtual proc - * - * @param procs - * @return - */ - void unbookProc(ProcInterface proc); - - /** - * Used to set the unbook flag on a proc to true or false. - * - * @param proc - * @param unbooked - */ - public boolean setUnbookState(ProcInterface proc, boolean unbooked); - - /** - * Updates the proc record with the name of its redirect target. - * - * @param p - * @param r - */ - public boolean setRedirectTarget(ProcInterface p, Redirect r); - - /** - * Returns the unique id of the proc's current show - * - * @param p - * @return - */ - public String getCurrentShowId(ProcInterface p); - - /** - * Returns the unique id of the procs current job - * - * @param p - * @return - */ - public String getCurrentJobId(ProcInterface p); - - /** - * Returns the unique id of the procs current layer - * - * @param p - * @return - */ - public String getCurrentLayerId(ProcInterface p); - - /** - * Returns the unique id of the procs current frame - * - * @param p - * @return - */ - public String getCurrentFrameId(ProcInterface p); - - /** - * Returns an array of booked virutal procs. - * - * @param r - * @return - */ - List findBookedVirtualProcs(ProcSearchInterface r); - - /** - * Return true if the proc is an orphan. - * - * @param proc - * @return - */ - boolean isOrphan(ProcInterface proc); - - /** - * Return a list of all procs that are booked as part - * of the given local job assignment. - * - * @param l - * @return - */ - List findVirtualProcs(LocalHostAssignment l); + /** + * Returns the amount of reserved memory a proc has + * + * @param proc + * @return + */ + + long getReservedMemory(ProcInterface proc); + + /** + * Returns the amount of reserved gpu memory a proc has + * + * @param proc + * @return + */ + + long getReservedGpuMemory(ProcInterface proc); + + /** + * Removes a little bit of reserved memory from every other running frame in order to give some to + * the target proc. + * + * @param targetProc + * @param targetMem + * @return + */ + boolean balanceUnderUtilizedProcs(ProcInterface targetProc, long targetMem); + + /** + * Increase a proc's reserved memory. + * + * @param id + * @param value + * @return + */ + boolean increaseReservedMemory(ProcInterface p, long value); + + /** + * Set a proc's reserved memory. + * + * @param id + * @param value + * @return + */ + void updateReservedMemory(ProcInterface p, long value); + + /** + * verifies the mapping bewtween a proc id and a frame id + * + * @param procid + * @param frameid + * @return + */ + boolean verifyRunningProc(String procid, String frameid); + + /** + * Creates a new virtual proc + * + * @param proc + */ + void insertVirtualProc(VirtualProc proc); + + /** + * Deletes an existing virtual proc + * + * @param proc + */ + boolean deleteVirtualProc(VirtualProc proc); + + /** + * Clears a virtual proc assignement. This keeps the proc around but sets pk_frame to null. This + * would normally happen after a frame completes and before the proc is dispatched again. + * + * @param proc + */ + boolean clearVirtualProcAssignment(ProcInterface proc); + + /** + * Clear a proc assignment by frame id. Return true if an assignment was cleared. + * + * @param frame + * @return + */ + boolean clearVirtualProcAssignment(FrameInterface frame); + + /** + * Updates an existing proc's assignment + * + * @param proc + */ + void updateVirtualProcAssignment(VirtualProc proc); + + /** + * Update a procs memory usage based on the given frame it should be running. + * + * @param proc + * @param usedKb + * @param maxKb + */ + void updateProcMemoryUsage(FrameInterface f, long rss, long maxRss, long vsize, long maxVsize, + long usedGpuMemory, long maxUsedGpuMemory, long usedSwapMemory, byte[] children); + + /** + * get aq virual proc from its unique id + * + * @param id + * @return + */ + VirtualProc getVirtualProc(String id); + + /** + * get a virtual proc from the frame its assigned to + * + * @param frame + * @return + */ + VirtualProc findVirtualProc(FrameInterface frame); + + /** + * gets a list of virtual procs from a FrameLookupRequest + * + * @param job + * @param req + * @return + */ + List findVirtualProcs(FrameSearchInterface s); + + /** + * get the list of procs from the host. + * + * @param host + * @return + */ + List findVirtualProcs(HostInterface host); + + /** + * find all procs booked on a specified layer + * + * @param layer + * @return + */ + List findVirtualProcs(LayerInterface layer); + + /** + * find all procs booked on specified job + * + * @param job + * @return + */ + List findVirtualProcs(JobInterface job); + + /** + * + * @return + */ + List findOrphanedVirtualProcs(); + + /** + * + * @return + */ + List findOrphanedVirtualProcs(int limit); + + /** + * Returns procs with a host in a particular hardware state. + * + * @param state + * @return + */ + public List findVirtualProcs(HardwareState state); + + /** + * Returns a list if procs using a ProcSearchInterface object. + * + * @param r - A ProcSearchInterface object + * @return a list of virtual procs + */ + List findVirtualProcs(ProcSearchInterface r); + + /** + * Unbooks a list of virtual procs using a batch query + * + * @param procs + * @return + */ + void unbookVirtualProcs(List procs); + + /** + * Unbooks a single virtual proc + * + * @param procs + * @return + */ + void unbookProc(ProcInterface proc); + + /** + * Used to set the unbook flag on a proc to true or false. + * + * @param proc + * @param unbooked + */ + public boolean setUnbookState(ProcInterface proc, boolean unbooked); + + /** + * Updates the proc record with the name of its redirect target. + * + * @param p + * @param r + */ + public boolean setRedirectTarget(ProcInterface p, Redirect r); + + /** + * Returns the unique id of the proc's current show + * + * @param p + * @return + */ + public String getCurrentShowId(ProcInterface p); + + /** + * Returns the unique id of the procs current job + * + * @param p + * @return + */ + public String getCurrentJobId(ProcInterface p); + + /** + * Returns the unique id of the procs current layer + * + * @param p + * @return + */ + public String getCurrentLayerId(ProcInterface p); + + /** + * Returns the unique id of the procs current frame + * + * @param p + * @return + */ + public String getCurrentFrameId(ProcInterface p); + + /** + * Returns an array of booked virutal procs. + * + * @param r + * @return + */ + List findBookedVirtualProcs(ProcSearchInterface r); + + /** + * Return true if the proc is an orphan. + * + * @param proc + * @return + */ + boolean isOrphan(ProcInterface proc); + + /** + * Return a list of all procs that are booked as part of the given local job assignment. + * + * @param l + * @return + */ + List findVirtualProcs(LocalHostAssignment l); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/RedirectDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/RedirectDao.java index 0459f478c..9ab9dee31 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/RedirectDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/RedirectDao.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.dao; import org.springframework.transaction.annotation.Propagation; @@ -23,48 +20,48 @@ import com.imageworks.spcue.Redirect; -@Transactional(propagation=Propagation.MANDATORY) +@Transactional(propagation = Propagation.MANDATORY) public interface RedirectDao { - /** - * Check for redirect existence. - * - * @param key Redirect key - * - * @return True if redirect exists - */ - boolean containsKey(String key); + /** + * Check for redirect existence. + * + * @param key Redirect key + * + * @return True if redirect exists + */ + boolean containsKey(String key); - /** - * Count redirects in a group. - * - * @param groupId the group to query - * - * @return count of redirects in group - */ - int countRedirectsWithGroup(String groupId); + /** + * Count redirects in a group. + * + * @param groupId the group to query + * + * @return count of redirects in group + */ + int countRedirectsWithGroup(String groupId); - /** - * Delete all expired redirects. - * - * @return number of redirects deleted - */ - int deleteExpired(); + /** + * Delete all expired redirects. + * + * @return number of redirects deleted + */ + int deleteExpired(); - /** - * Add redirect. - * - * @param key Redirect key - * - * @param r Redirect to add - */ - void put(String key, Redirect r); + /** + * Add redirect. + * + * @param key Redirect key + * + * @param r Redirect to add + */ + void put(String key, Redirect r); - /** - * Delete and return specified redirect. - * - * @param key Redirect key - * - * @return the redirect that was deleted or null - */ - Redirect remove(String key); + /** + * Delete and return specified redirect. + * + * @param key Redirect key + * + * @return the redirect that was deleted or null + */ + Redirect remove(String key); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/ServiceDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/ServiceDao.java index 0933ae8d3..77ba25f21 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/ServiceDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/ServiceDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import com.imageworks.spcue.ServiceEntity; @@ -24,24 +20,23 @@ public interface ServiceDao { - void insert(ServiceEntity service); + void insert(ServiceEntity service); - void insert(ServiceOverrideEntity service); + void insert(ServiceOverrideEntity service); - ServiceEntity get(String identifier); + ServiceEntity get(String identifier); - void update(ServiceEntity service); + void update(ServiceEntity service); - void update(ServiceOverrideEntity service); + void update(ServiceOverrideEntity service); - void delete(ServiceOverrideEntity service); + void delete(ServiceOverrideEntity service); - void delete(ServiceEntity service); + void delete(ServiceEntity service); - ServiceOverrideEntity getOverride(String id); + ServiceOverrideEntity getOverride(String id); - ServiceOverrideEntity getOverride(String id, String show); + ServiceOverrideEntity getOverride(String id, String show); - boolean isOverridden(String service, String show); + boolean isOverridden(String service, String show); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/ShowDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/ShowDao.java index b12c0b097..709bb5582 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/ShowDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/ShowDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import com.imageworks.spcue.HostInterface; @@ -28,129 +24,124 @@ */ public interface ShowDao { - /** - * find show detail by name - * - * @param name - * @return ShowDetail - */ - ShowEntity findShowDetail(String name); - - /** - * get show detail from its unique id - * - * @param id - * @return ShowDetail - */ - ShowEntity getShowDetail(String id); - - /** - * Get show detail from its preferred show. - * - * @param id - * @return ShowDetail - */ - ShowEntity getShowDetail(HostInterface host); - - /** - * create a show from ShowDetail - * - * @param show - */ - void insertShow(ShowEntity show); - - /** - * return true if show exists, false if not - * - * @param name - * @return boolean - */ - boolean showExists(String name); - - /** - * - * @param s - * @param val - */ - void updateShowDefaultMinCores(ShowInterface s, int val); - - /** - * - * @param s - * @param val - */ - void updateShowDefaultMaxCores(ShowInterface s, int val); - - /** - * - * @param s - * @param val - */ - void updateShowDefaultMinGpus(ShowInterface s, int val); - - /** - * - * @param s - * @param val - */ - void updateShowDefaultMaxGpus(ShowInterface s, int val); - - - /** - * Disabling this would stop new proc assignement. The show would get no new - * procs, but any procs already assigned to a job would continue to - * dispatch. - * - * @param s - * @param enabled - */ - void updateBookingEnabled(ShowInterface s, boolean enabled); - - /** - * Disabling dispatching would unbook each proc after it had completed a - * frame. - * - * @param s - * @param enabled - */ - void updateDispatchingEnabled(ShowInterface s, boolean enabled); - - /** - * Deletes a show if no data has been added to it. - * - * @param s - */ - void delete(ShowInterface s); - - /** - * Updates the show frame counter. This counts all failed succceeded frames, - * forver. - * - * @param s - * @param exitStatus - */ - void updateFrameCounters(ShowInterface s, int exitStatus); - - /** - * Set the enabled status of a show to true/false. - * - * @param s - * @param enabled - */ - void updateActive(ShowInterface s, boolean enabled); - - /** - * An array of email addresses for which all job comments are echoed to. - * - * @param s - * @param emails - */ - void updateShowCommentEmail(ShowInterface s, String[] emails); - - /** - * Scheduled task to update shows. Set show as inactive if it has at - * least 1 job in job_history service th - */ - void updateShowsStatus(); + /** + * find show detail by name + * + * @param name + * @return ShowDetail + */ + ShowEntity findShowDetail(String name); + + /** + * get show detail from its unique id + * + * @param id + * @return ShowDetail + */ + ShowEntity getShowDetail(String id); + + /** + * Get show detail from its preferred show. + * + * @param id + * @return ShowDetail + */ + ShowEntity getShowDetail(HostInterface host); + + /** + * create a show from ShowDetail + * + * @param show + */ + void insertShow(ShowEntity show); + + /** + * return true if show exists, false if not + * + * @param name + * @return boolean + */ + boolean showExists(String name); + + /** + * + * @param s + * @param val + */ + void updateShowDefaultMinCores(ShowInterface s, int val); + + /** + * + * @param s + * @param val + */ + void updateShowDefaultMaxCores(ShowInterface s, int val); + + /** + * + * @param s + * @param val + */ + void updateShowDefaultMinGpus(ShowInterface s, int val); + + /** + * + * @param s + * @param val + */ + void updateShowDefaultMaxGpus(ShowInterface s, int val); + + /** + * Disabling this would stop new proc assignement. The show would get no new procs, but any procs + * already assigned to a job would continue to dispatch. + * + * @param s + * @param enabled + */ + void updateBookingEnabled(ShowInterface s, boolean enabled); + + /** + * Disabling dispatching would unbook each proc after it had completed a frame. + * + * @param s + * @param enabled + */ + void updateDispatchingEnabled(ShowInterface s, boolean enabled); + + /** + * Deletes a show if no data has been added to it. + * + * @param s + */ + void delete(ShowInterface s); + + /** + * Updates the show frame counter. This counts all failed succceeded frames, forver. + * + * @param s + * @param exitStatus + */ + void updateFrameCounters(ShowInterface s, int exitStatus); + + /** + * Set the enabled status of a show to true/false. + * + * @param s + * @param enabled + */ + void updateActive(ShowInterface s, boolean enabled); + + /** + * An array of email addresses for which all job comments are echoed to. + * + * @param s + * @param emails + */ + void updateShowCommentEmail(ShowInterface s, String[] emails); + + /** + * Scheduled task to update shows. Set show as inactive if it has at least 1 job in job_history + * service th + */ + void updateShowsStatus(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/SubscriptionDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/SubscriptionDao.java index d0fe43eb7..ffc083d7d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/SubscriptionDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/SubscriptionDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import com.imageworks.spcue.AllocationInterface; @@ -27,100 +23,95 @@ public interface SubscriptionDao { - /** - * returns true if the subscription has running procs - * - * @param sub SubscriptionInterface - * @return boolean - */ - boolean hasRunningProcs(SubscriptionInterface sub); - - /** - * Return true if the given show is at or over its size value for the given - * allocation. - * - * @param show ShowInterface - * @param alloc AllocationInterface - * @return boolean - */ - boolean isShowAtOrOverSize(ShowInterface show, AllocationInterface alloc); - - /** - * Return true if the given show is over its size value for the given - * allocation. - * - * @param show ShowInterface - * @param alloc AllocationInterface - * @return boolean - */ - boolean isShowOverSize(ShowInterface show, AllocationInterface alloc); - - /** - * Return true if adding the given coreUnits would put the show over its - * burst value for the given allocation. - * - * @param show ShowInterface - * @param alloc AllocationInterface - * @param coreUnits int - * @return boolean - */ - boolean isShowOverBurst(ShowInterface show, AllocationInterface alloc, int coreUnits); - - /** - * Return true if the given show is at or over its burst value for the given - * allocation. - * - * @param show ShowInterface - * @param alloc AllocationInterface - * @return boolean - */ - boolean isShowAtOrOverBurst(ShowInterface show, AllocationInterface alloc); - - /** - * Return true if the show that is utilizing the given proc has exceeded its - * burst. - * - * @param proc VirtualProc - * @return boolean - */ - boolean isShowOverSize(VirtualProc proc); - - /** - * Return a SubscriptionDetail from its unique id - * - * @param id String - * @return SubscriptionEntity - */ - SubscriptionEntity getSubscriptionDetail(String id); - - /** - * Insert a new subscription - * - * @param detail SubscriptionEntity - */ - void insertSubscription(SubscriptionEntity detail); - - /** - * Delete specified subscription - * - * @param sub SubscriptionInterface - */ - void deleteSubscription(SubscriptionInterface sub); - - /** - * update the size of a subscription - * - * @param sub SubscriptionInterface - * @param size int - */ - void updateSubscriptionSize(SubscriptionInterface sub, int size); - - /** - * update the subscription burst - * - * @param sub SubscriptionInterface - * @param size int - */ - void updateSubscriptionBurst(SubscriptionInterface sub, int size); + /** + * returns true if the subscription has running procs + * + * @param sub SubscriptionInterface + * @return boolean + */ + boolean hasRunningProcs(SubscriptionInterface sub); + + /** + * Return true if the given show is at or over its size value for the given allocation. + * + * @param show ShowInterface + * @param alloc AllocationInterface + * @return boolean + */ + boolean isShowAtOrOverSize(ShowInterface show, AllocationInterface alloc); + + /** + * Return true if the given show is over its size value for the given allocation. + * + * @param show ShowInterface + * @param alloc AllocationInterface + * @return boolean + */ + boolean isShowOverSize(ShowInterface show, AllocationInterface alloc); + + /** + * Return true if adding the given coreUnits would put the show over its burst value for the given + * allocation. + * + * @param show ShowInterface + * @param alloc AllocationInterface + * @param coreUnits int + * @return boolean + */ + boolean isShowOverBurst(ShowInterface show, AllocationInterface alloc, int coreUnits); + + /** + * Return true if the given show is at or over its burst value for the given allocation. + * + * @param show ShowInterface + * @param alloc AllocationInterface + * @return boolean + */ + boolean isShowAtOrOverBurst(ShowInterface show, AllocationInterface alloc); + + /** + * Return true if the show that is utilizing the given proc has exceeded its burst. + * + * @param proc VirtualProc + * @return boolean + */ + boolean isShowOverSize(VirtualProc proc); + + /** + * Return a SubscriptionDetail from its unique id + * + * @param id String + * @return SubscriptionEntity + */ + SubscriptionEntity getSubscriptionDetail(String id); + + /** + * Insert a new subscription + * + * @param detail SubscriptionEntity + */ + void insertSubscription(SubscriptionEntity detail); + + /** + * Delete specified subscription + * + * @param sub SubscriptionInterface + */ + void deleteSubscription(SubscriptionInterface sub); + + /** + * update the size of a subscription + * + * @param sub SubscriptionInterface + * @param size int + */ + void updateSubscriptionSize(SubscriptionInterface sub, int size); + + /** + * update the subscription burst + * + * @param sub SubscriptionInterface + * @param size int + */ + void updateSubscriptionBurst(SubscriptionInterface sub, int size); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/TaskDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/TaskDao.java index 20a4f72cd..d7a3d5e00 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/TaskDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/TaskDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import com.imageworks.spcue.DepartmentInterface; @@ -28,104 +24,102 @@ public interface TaskDao { - /** - * Delete all tasks for the specified dept config - * - * @param d - */ - void deleteTasks(PointInterface cdept); - - /** - * Delete all tasks for the specified show and dept - * - * @param d - */ - void deleteTasks(ShowInterface show, DepartmentInterface dept); - - /** - * Inserts a new task. A task is a shot based department priority. - * - * @param task - */ - void insertTask(TaskEntity task); - - /** - * Remove specified task. - * - * @param task - */ - void deleteTask(TaskInterface task); - - /** - * Returns a task from its unique id - * - * @param id - */ - TaskEntity getTaskDetail(String id); - - /** - * Returns a job's task representation - * - * @param j - * @return - */ - TaskEntity getTaskDetail(JobInterface j); - - /** - * Updates the specified tasks min procs - * - * @param t - * @param value - */ - void updateTaskMinCores(TaskInterface t, int value); - - /** - * Inserts a task if if does not exist, otherwise its updated. - * - * @param t - */ - void mergeTask(TaskEntity t); - - /** - * Returns true if the task is ti-managed. - */ - boolean isManaged(TaskInterface t); - - /** - * Adjusts the specified task's min cores to value. Only use adjust when the - * task is managed. - * - * @param t - * @param value - */ - void adjustTaskMinCores(TaskInterface t, int value); - - /** - * - * @param cdept - */ - void clearTaskAdjustments(PointInterface cdept); - - /** - * - * @param t - */ - void clearTaskAdjustment(TaskInterface t); - - /** - * Returns a TaskDetail from a department id and shot name. - * - * @param d - * @param shot - * @return - */ - TaskEntity getTaskDetail(DepartmentInterface d, String shot); - - /** - * Returns true if the specified job is being managed by a task. - * - * @param Job - */ - boolean isManaged(JobInterface j); + /** + * Delete all tasks for the specified dept config + * + * @param d + */ + void deleteTasks(PointInterface cdept); + + /** + * Delete all tasks for the specified show and dept + * + * @param d + */ + void deleteTasks(ShowInterface show, DepartmentInterface dept); + + /** + * Inserts a new task. A task is a shot based department priority. + * + * @param task + */ + void insertTask(TaskEntity task); + + /** + * Remove specified task. + * + * @param task + */ + void deleteTask(TaskInterface task); + + /** + * Returns a task from its unique id + * + * @param id + */ + TaskEntity getTaskDetail(String id); + + /** + * Returns a job's task representation + * + * @param j + * @return + */ + TaskEntity getTaskDetail(JobInterface j); + + /** + * Updates the specified tasks min procs + * + * @param t + * @param value + */ + void updateTaskMinCores(TaskInterface t, int value); + + /** + * Inserts a task if if does not exist, otherwise its updated. + * + * @param t + */ + void mergeTask(TaskEntity t); + + /** + * Returns true if the task is ti-managed. + */ + boolean isManaged(TaskInterface t); + + /** + * Adjusts the specified task's min cores to value. Only use adjust when the task is managed. + * + * @param t + * @param value + */ + void adjustTaskMinCores(TaskInterface t, int value); + + /** + * + * @param cdept + */ + void clearTaskAdjustments(PointInterface cdept); + + /** + * + * @param t + */ + void clearTaskAdjustment(TaskInterface t); + + /** + * Returns a TaskDetail from a department id and shot name. + * + * @param d + * @param shot + * @return + */ + TaskEntity getTaskDetail(DepartmentInterface d, String shot); + + /** + * Returns true if the specified job is being managed by a task. + * + * @param Job + */ + boolean isManaged(JobInterface j); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/TrackitDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/TrackitDao.java index 19c0e2784..545b87be5 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/TrackitDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/TrackitDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import java.util.List; @@ -25,15 +21,14 @@ public interface TrackitDao { - /** - * Downloads a list of all tasks for the specified department - * and inserts them into the Task table. - * - * @param show - * @param department - * @return - */ - List getTasks(String show, String dept); + /** + * Downloads a list of all tasks for the specified department and inserts them into the Task + * table. + * + * @param show + * @param department + * @return + */ + List getTasks(String show, String dept); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/WhiteboardDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/WhiteboardDao.java index f01709f0c..8b1f83ec2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/WhiteboardDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/WhiteboardDao.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao; import java.util.List; @@ -91,614 +87,610 @@ */ public interface WhiteboardDao { - /** - * Returns a list of hosts - * - * @param h HostInterface - * @return ProcSeq - */ - ProcSeq getProcs(HostInterface h); - - /** - * Returns a list of hosts - * - * @param r HostSearchInterface - * @return HostSeq - */ - HostSeq getHosts(HostSearchInterface r); - - /** - * Returns a list of jobs - * - * @param r JobSearchInterface - * @return JobSeq - */ - JobSeq getJobs(JobSearchInterface r); - - /** - * Returns a list of job names - * - * @param r JobSearchInterface - * @return List of Strings - */ - - List getJobNames(JobSearchInterface r); - - /** - * Returns the comments for the specified job - * - * @param j JobInterface - * @return CommentSeq - */ - CommentSeq getComments(JobInterface j); - - /** - * Returns the comments for the specified host - * - * @param h HostInterface - * @return CommentSeq - */ - CommentSeq getComments(HostInterface h); - - /** - * returns the host a proc is part of - * - * @param id String - * @return Host - */ - Host getHost(String id); - - /** - * returns the host by name - * - * @param name String - * @return Host - */ - Host findHost(String name); - - /** - * Return a dependency by its unique id - * - * @param id String - * @return Depend - */ - Depend getDepend(String id); - - /** - * Returns a list of all dependencies this job is involved with. - * - * @param job JobInterface - * @return DependSeq - */ - DependSeq getDepends(JobInterface job); - - /** - * Returns an array of depends that depend on the specified job. - * - * @param job JobInterface - * @return DependSeq - */ - DependSeq getWhatDependsOnThis(JobInterface job); - - /** - * Returns an array of depends that depend on the specified layer. - * - * @param layer LayerInterface - * @return DependSeq - */ - DependSeq getWhatDependsOnThis(LayerInterface layer); - - /** - * Returns an array of depends that depend on the specified job. - * - * @param frame FrameInterface - * @return DependSeq - */ - DependSeq getWhatDependsOnThis(FrameInterface frame); - - /** - * Returns an array of depends that the specified job is waiting on. - * - * @param job JobInterface - * @return DependSeq - */ - DependSeq getWhatThisDependsOn(JobInterface job); - - /** - * Returns an array of depends that the specified layer is waiting on. - * - * @param layer LayerInterface - * @return DependSeq - */ - DependSeq getWhatThisDependsOn(LayerInterface layer); - - /** - * Returns an array of depends that the specified frame is waiting on. - * - * @param frame FrameInterface - * @return DependSeq - */ - DependSeq getWhatThisDependsOn(FrameInterface frame); - - /** - * Returns the specified dependency - * - * @param depend DependInterface - * @return Depend - */ - Depend getDepend(DependInterface depend); - - Filter findFilter(String show, String name); - - Filter findFilter(ShowInterface show, String name); - - Filter getFilter(FilterInterface filter); - - MatcherSeq getMatchers(FilterInterface filter); - - Matcher getMatcher(MatcherInterface matcher); - - ActionSeq getActions(FilterInterface filter); - - Action getAction(ActionInterface action); - - /** - * Returns the frame by unique ID - * - * @param id String - * @return Frame - */ - Frame getFrame(String id); - - /** - * Returns a list of filters by show - * - * @param show ShowInterface - * @return FilterSeq - */ - - FilterSeq getFilters(ShowInterface show); - - /** - * Frame search - * - * @param r FrameSearchInterface - * @return FrameSeq - */ - FrameSeq getFrames(FrameSearchInterface r); - - /** - * Returns a list of layers for the specified job. - * - * @param job JobInterface - * @return LayerSeq - */ - LayerSeq getLayers(JobInterface job); - - /** - * Returns a layer from its unique ID - * - * @param id String - * @return Layer - */ - Layer getLayer(String id); - - /** - * Returns a list of limits for the specified layer. - * - * @param id String - * @return Layer - */ - List getLimits(LayerInterface layer); - - /** - * - * @param group GroupInterface - * @return JobSeq - */ - JobSeq getJobs(GroupInterface group); - - /** - * Finds an active job record based on the name - * - * @param name String - * @return Job - */ - Job findJob(String name); - - /** - * Gets an active job based on the Id - * - * @param id String - * @return Job - */ - Job getJob(String id); - - /** - * returns a subscription by its id - * - * @return Subscription - */ - Subscription getSubscription(String id); - - /** - * Find subscription using the show, facility, and alloc name. - * - * @param show String - * @param alloc String - * @return Subscription - */ - Subscription findSubscription(String show, String alloc); - - /** - * returns a list of subscriptions - * - * @param show ShowInterface - * @return SubscriptionSeq - */ - SubscriptionSeq getSubscriptions(ShowInterface show); - - /** - * returns all subscriptions on the specified allocation - * - * @param alloc AllocationInterface - * @return SubscriptionSeq - */ - SubscriptionSeq getSubscriptions(AllocationInterface alloc); - - /** - * returns a show by Id. - * - * @param id String - * @return Show - */ - Show getShow(String id); - - /** - * returns a show by its name. - * - * @param name String - * @return Show - */ - Show findShow(String name); - - /** - * - * return a list of shows from a whiteboard request - * - * @return ShowSeq - */ - ShowSeq getShows(); - - /** - * returns an allocation by Id. - * - * @param id String - * @return Allocation - */ - Allocation getAllocation(String id); - - /** - * returns a show by its name. - * - * @param name String - * @return Allocation - */ - Allocation findAllocation(String name); - - /** - * - * return the current list of allocations - * - * @return List of Allocations - */ - AllocationSeq getAllocations(); - - /** - * - * return the current list of allocations - * - * @param facility FacilityInterface - * @return List of Allocations - */ - AllocationSeq getAllocations(FacilityInterface facility); - - - /** - * - * @param show ShowInterface - * @return Group - */ - Group getRootGroup(ShowInterface show); - - /** - * - * @param id String - * @return Group - */ - Group getGroup(String id); - - /** - * Finds a group by show name and group name - * - * @param show String - * @param group String - * @return Group - */ - Group findGroup(String show, String group); - - /** - * - * - * @param show ShowInterface - * @return GroupSeq - */ - GroupSeq getGroups(ShowInterface show); - - /** - * - * @param group GroupInterface - * @return GroupSeq - */ - GroupSeq getGroups(GroupInterface group); - - - /** - * - * @param job String - * @param layer String - * @return Layer - */ - Layer findLayer(String job, String layer); - - /** - * - * @param job String - * @param layer String - * @param frame int - * @return Frame - */ - Frame findFrame(String job, String layer, int frame); - - - /** - * returns an UpdatedFrameCheckResult which contains an array of updated frames. - * - * @param job JobInterface - * @param layers List of LayerInterfaces - * @param lastUpdate int - * @return UpdatedFrameCheckResult - */ - UpdatedFrameCheckResult getUpdatedFrames(JobInterface job, - List layers, int lastUpdate); - - /** - * - * @param show ShowInterface - * @return DepartmentSeq - */ - DepartmentSeq getDepartments (ShowInterface show); - - /** - * - * @param show ShowInterface - * @param name String - * @return Department - */ - Department getDepartment(ShowInterface show, String name); - - /** - * Returns a list of available department names - * - * @return List of Strings - */ - List getDepartmentNames(); - - /** - * - * @param show ShowInterface - * @param dept DepartmentInterface - * @param shot String - * @return Task - */ - Task getTask(ShowInterface show, DepartmentInterface dept, String shot); - - /** - * - * @param show ShowInterface - * @param dept DepartmentInterface - * @return List of Tasks - */ - TaskSeq getTasks(ShowInterface show, DepartmentInterface dept); - - /** - * Returns procs from a ProcSearchInterface criteria. - * - * @param p ProcSearchInterface - * @return ProcSeq - */ - ProcSeq getProcs(ProcSearchInterface p); - - /** - * Return the grpc representation of the given AbstractDepend. - * - * @param depend AbstractDepend - * @return Depend - */ - Depend getDepend(AbstractDepend depend); - - /** - * Return the Host record for the given Deed. - * - * @param deed DeedEntity - * @return Host - */ - Host getHost(DeedEntity deed); - - /** - * Return the Owner of the given Deed. - * - * @param deed DeedEntity - * @return Owner - */ - Owner getOwner(DeedEntity deed); - - /** - * Return a list of all Deeds controlled by the given Owner. - * - * @param owner OwnerEntity - * @return DeedSeq - */ - DeedSeq getDeeds(OwnerEntity owner); - - /** - * Return a list of all Hosts controlled by the given Owner. - * - * @param owner OwnerEntity - * @return HostSeq - */ - HostSeq getHosts(OwnerEntity owner); - - /** - * Return the Owner of the given host. - * - * @param host HostInterface - * @return Owner - */ - Owner getOwner(HostInterface host); - - /** - * Return the Deed for the given Host. - * - * @param host HostInterface - * @return Deed - */ - Deed getDeed(HostInterface host); - - /** - * Return the owner by name. - * - * @param name String - * @return Owner - */ - Owner getOwner(String name); - - /** - * Return a list of owners by show. - * - * @param show ShowInterface - * @return List of Owners - */ - List getOwners(ShowInterface show); - - /** - * Return a list of Deeds by show. - * - * @param show ShowInterface - * @return DeedSeq - */ - DeedSeq getDeeds(ShowInterface show); - - /** - * Return a RenderPartion from its associated LocalHostAssignment. - * - * @param l LocalHostAssignment - * @return RenderPartition - */ - RenderPartition getRenderPartition(LocalHostAssignment l); - - /** - * Return a list or RenderPartition for the given Host. - * - * @param host HostInterface - * @return RenderPartitionSeq - */ - RenderPartitionSeq getRenderPartitions(HostInterface host); - - /** - * Return a facility by name or id. - * - * @param name String - * @return Facility - */ - Facility getFacility(String name); - - /** - * Return the full list of facilities. - * - * @return List of Facilities - */ - FacilitySeq getFacilities(); - - /** - * Return a list of all active shows. - * - * @return ShowSeq - */ - ShowSeq getActiveShows(); - - /** - * Return the given service. - * - * @param id String - * @return Service - */ - Service getService(String id); - - /** - * Return the list of cluster wide service defaults. - * - * @return ServiceSeq - */ - ServiceSeq getDefaultServices(); - - /** - * Return the list of service overrides for a particular show. - * - * @param show ShowInterface - * @return List of ServiceOverrides - */ - ServiceOverrideSeq getServiceOverrides(ShowInterface show); - - /** - * Return the given show override. - * - * @param show ShowInterface - * @param name String - * @return ServiceOverride - */ - ServiceOverride getServiceOverride(ShowInterface show, String name); - - /** - * Find a service by name. - * - * @param name String - * @return Service - */ - Service findService(String name); - - /** - * Find a limit by name. - * - * @param name String - * @return Service - */ - Limit findLimit(String name); - - /** - * Return a service by ID. - * - * @param id String - * @return Limit - */ - Limit getLimit(String id); - - /** - * Returns a list of all limits. - * - * @param id String - * @return Layer - */ - List getLimits(); + /** + * Returns a list of hosts + * + * @param h HostInterface + * @return ProcSeq + */ + ProcSeq getProcs(HostInterface h); + + /** + * Returns a list of hosts + * + * @param r HostSearchInterface + * @return HostSeq + */ + HostSeq getHosts(HostSearchInterface r); + + /** + * Returns a list of jobs + * + * @param r JobSearchInterface + * @return JobSeq + */ + JobSeq getJobs(JobSearchInterface r); + + /** + * Returns a list of job names + * + * @param r JobSearchInterface + * @return List of Strings + */ + + List getJobNames(JobSearchInterface r); + + /** + * Returns the comments for the specified job + * + * @param j JobInterface + * @return CommentSeq + */ + CommentSeq getComments(JobInterface j); + + /** + * Returns the comments for the specified host + * + * @param h HostInterface + * @return CommentSeq + */ + CommentSeq getComments(HostInterface h); + + /** + * returns the host a proc is part of + * + * @param id String + * @return Host + */ + Host getHost(String id); + + /** + * returns the host by name + * + * @param name String + * @return Host + */ + Host findHost(String name); + + /** + * Return a dependency by its unique id + * + * @param id String + * @return Depend + */ + Depend getDepend(String id); + + /** + * Returns a list of all dependencies this job is involved with. + * + * @param job JobInterface + * @return DependSeq + */ + DependSeq getDepends(JobInterface job); + + /** + * Returns an array of depends that depend on the specified job. + * + * @param job JobInterface + * @return DependSeq + */ + DependSeq getWhatDependsOnThis(JobInterface job); + + /** + * Returns an array of depends that depend on the specified layer. + * + * @param layer LayerInterface + * @return DependSeq + */ + DependSeq getWhatDependsOnThis(LayerInterface layer); + + /** + * Returns an array of depends that depend on the specified job. + * + * @param frame FrameInterface + * @return DependSeq + */ + DependSeq getWhatDependsOnThis(FrameInterface frame); + + /** + * Returns an array of depends that the specified job is waiting on. + * + * @param job JobInterface + * @return DependSeq + */ + DependSeq getWhatThisDependsOn(JobInterface job); + + /** + * Returns an array of depends that the specified layer is waiting on. + * + * @param layer LayerInterface + * @return DependSeq + */ + DependSeq getWhatThisDependsOn(LayerInterface layer); + + /** + * Returns an array of depends that the specified frame is waiting on. + * + * @param frame FrameInterface + * @return DependSeq + */ + DependSeq getWhatThisDependsOn(FrameInterface frame); + + /** + * Returns the specified dependency + * + * @param depend DependInterface + * @return Depend + */ + Depend getDepend(DependInterface depend); + + Filter findFilter(String show, String name); + + Filter findFilter(ShowInterface show, String name); + + Filter getFilter(FilterInterface filter); + + MatcherSeq getMatchers(FilterInterface filter); + + Matcher getMatcher(MatcherInterface matcher); + + ActionSeq getActions(FilterInterface filter); + + Action getAction(ActionInterface action); + + /** + * Returns the frame by unique ID + * + * @param id String + * @return Frame + */ + Frame getFrame(String id); + + /** + * Returns a list of filters by show + * + * @param show ShowInterface + * @return FilterSeq + */ + + FilterSeq getFilters(ShowInterface show); + + /** + * Frame search + * + * @param r FrameSearchInterface + * @return FrameSeq + */ + FrameSeq getFrames(FrameSearchInterface r); + + /** + * Returns a list of layers for the specified job. + * + * @param job JobInterface + * @return LayerSeq + */ + LayerSeq getLayers(JobInterface job); + + /** + * Returns a layer from its unique ID + * + * @param id String + * @return Layer + */ + Layer getLayer(String id); + + /** + * Returns a list of limits for the specified layer. + * + * @param id String + * @return Layer + */ + List getLimits(LayerInterface layer); + + /** + * + * @param group GroupInterface + * @return JobSeq + */ + JobSeq getJobs(GroupInterface group); + + /** + * Finds an active job record based on the name + * + * @param name String + * @return Job + */ + Job findJob(String name); + + /** + * Gets an active job based on the Id + * + * @param id String + * @return Job + */ + Job getJob(String id); + + /** + * returns a subscription by its id + * + * @return Subscription + */ + Subscription getSubscription(String id); + + /** + * Find subscription using the show, facility, and alloc name. + * + * @param show String + * @param alloc String + * @return Subscription + */ + Subscription findSubscription(String show, String alloc); + + /** + * returns a list of subscriptions + * + * @param show ShowInterface + * @return SubscriptionSeq + */ + SubscriptionSeq getSubscriptions(ShowInterface show); + + /** + * returns all subscriptions on the specified allocation + * + * @param alloc AllocationInterface + * @return SubscriptionSeq + */ + SubscriptionSeq getSubscriptions(AllocationInterface alloc); + + /** + * returns a show by Id. + * + * @param id String + * @return Show + */ + Show getShow(String id); + + /** + * returns a show by its name. + * + * @param name String + * @return Show + */ + Show findShow(String name); + + /** + * + * return a list of shows from a whiteboard request + * + * @return ShowSeq + */ + ShowSeq getShows(); + + /** + * returns an allocation by Id. + * + * @param id String + * @return Allocation + */ + Allocation getAllocation(String id); + + /** + * returns a show by its name. + * + * @param name String + * @return Allocation + */ + Allocation findAllocation(String name); + + /** + * + * return the current list of allocations + * + * @return List of Allocations + */ + AllocationSeq getAllocations(); + + /** + * + * return the current list of allocations + * + * @param facility FacilityInterface + * @return List of Allocations + */ + AllocationSeq getAllocations(FacilityInterface facility); + + /** + * + * @param show ShowInterface + * @return Group + */ + Group getRootGroup(ShowInterface show); + + /** + * + * @param id String + * @return Group + */ + Group getGroup(String id); + + /** + * Finds a group by show name and group name + * + * @param show String + * @param group String + * @return Group + */ + Group findGroup(String show, String group); + + /** + * + * + * @param show ShowInterface + * @return GroupSeq + */ + GroupSeq getGroups(ShowInterface show); + + /** + * + * @param group GroupInterface + * @return GroupSeq + */ + GroupSeq getGroups(GroupInterface group); + + /** + * + * @param job String + * @param layer String + * @return Layer + */ + Layer findLayer(String job, String layer); + + /** + * + * @param job String + * @param layer String + * @param frame int + * @return Frame + */ + Frame findFrame(String job, String layer, int frame); + + /** + * returns an UpdatedFrameCheckResult which contains an array of updated frames. + * + * @param job JobInterface + * @param layers List of LayerInterfaces + * @param lastUpdate int + * @return UpdatedFrameCheckResult + */ + UpdatedFrameCheckResult getUpdatedFrames(JobInterface job, List layers, + int lastUpdate); + + /** + * + * @param show ShowInterface + * @return DepartmentSeq + */ + DepartmentSeq getDepartments(ShowInterface show); + + /** + * + * @param show ShowInterface + * @param name String + * @return Department + */ + Department getDepartment(ShowInterface show, String name); + + /** + * Returns a list of available department names + * + * @return List of Strings + */ + List getDepartmentNames(); + + /** + * + * @param show ShowInterface + * @param dept DepartmentInterface + * @param shot String + * @return Task + */ + Task getTask(ShowInterface show, DepartmentInterface dept, String shot); + + /** + * + * @param show ShowInterface + * @param dept DepartmentInterface + * @return List of Tasks + */ + TaskSeq getTasks(ShowInterface show, DepartmentInterface dept); + + /** + * Returns procs from a ProcSearchInterface criteria. + * + * @param p ProcSearchInterface + * @return ProcSeq + */ + ProcSeq getProcs(ProcSearchInterface p); + + /** + * Return the grpc representation of the given AbstractDepend. + * + * @param depend AbstractDepend + * @return Depend + */ + Depend getDepend(AbstractDepend depend); + + /** + * Return the Host record for the given Deed. + * + * @param deed DeedEntity + * @return Host + */ + Host getHost(DeedEntity deed); + + /** + * Return the Owner of the given Deed. + * + * @param deed DeedEntity + * @return Owner + */ + Owner getOwner(DeedEntity deed); + + /** + * Return a list of all Deeds controlled by the given Owner. + * + * @param owner OwnerEntity + * @return DeedSeq + */ + DeedSeq getDeeds(OwnerEntity owner); + + /** + * Return a list of all Hosts controlled by the given Owner. + * + * @param owner OwnerEntity + * @return HostSeq + */ + HostSeq getHosts(OwnerEntity owner); + + /** + * Return the Owner of the given host. + * + * @param host HostInterface + * @return Owner + */ + Owner getOwner(HostInterface host); + + /** + * Return the Deed for the given Host. + * + * @param host HostInterface + * @return Deed + */ + Deed getDeed(HostInterface host); + + /** + * Return the owner by name. + * + * @param name String + * @return Owner + */ + Owner getOwner(String name); + + /** + * Return a list of owners by show. + * + * @param show ShowInterface + * @return List of Owners + */ + List getOwners(ShowInterface show); + + /** + * Return a list of Deeds by show. + * + * @param show ShowInterface + * @return DeedSeq + */ + DeedSeq getDeeds(ShowInterface show); + + /** + * Return a RenderPartion from its associated LocalHostAssignment. + * + * @param l LocalHostAssignment + * @return RenderPartition + */ + RenderPartition getRenderPartition(LocalHostAssignment l); + + /** + * Return a list or RenderPartition for the given Host. + * + * @param host HostInterface + * @return RenderPartitionSeq + */ + RenderPartitionSeq getRenderPartitions(HostInterface host); + + /** + * Return a facility by name or id. + * + * @param name String + * @return Facility + */ + Facility getFacility(String name); + + /** + * Return the full list of facilities. + * + * @return List of Facilities + */ + FacilitySeq getFacilities(); + + /** + * Return a list of all active shows. + * + * @return ShowSeq + */ + ShowSeq getActiveShows(); + + /** + * Return the given service. + * + * @param id String + * @return Service + */ + Service getService(String id); + + /** + * Return the list of cluster wide service defaults. + * + * @return ServiceSeq + */ + ServiceSeq getDefaultServices(); + + /** + * Return the list of service overrides for a particular show. + * + * @param show ShowInterface + * @return List of ServiceOverrides + */ + ServiceOverrideSeq getServiceOverrides(ShowInterface show); + + /** + * Return the given show override. + * + * @param show ShowInterface + * @param name String + * @return ServiceOverride + */ + ServiceOverride getServiceOverride(ShowInterface show, String name); + + /** + * Find a service by name. + * + * @param name String + * @return Service + */ + Service findService(String name); + + /** + * Find a limit by name. + * + * @param name String + * @return Service + */ + Limit findLimit(String name); + + /** + * Return a service by ID. + * + * @param id String + * @return Limit + */ + Limit getLimit(String id); + + /** + * Returns a list of all limits. + * + * @param id String + * @return Layer + */ + List getLimits(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/CriteriaException.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/CriteriaException.java index f6dcaa6e9..8d1e2df8b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/CriteriaException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/CriteriaException.java @@ -2,44 +2,39 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.criteria; @SuppressWarnings("serial") public class CriteriaException extends RuntimeException { - public CriteriaException() { - // TODO Auto-generated constructor stub - } + public CriteriaException() { + // TODO Auto-generated constructor stub + } - public CriteriaException(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public CriteriaException(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public CriteriaException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public CriteriaException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } - public CriteriaException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public CriteriaException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/CriteriaInterface.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/CriteriaInterface.java index eda8b0c13..567e50a32 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/CriteriaInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/CriteriaInterface.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.dao.criteria; @@ -20,12 +18,19 @@ import java.util.List; public interface CriteriaInterface { - String toString(); - void setFirstResult(int firstResult); - void setMaxResults(int maxResults); - void addSort(Sort o); - String getWhereClause(); - String getFilteredQuery(String query); - List getValues(); - Object[] getValuesArray(); + String toString(); + + void setFirstResult(int firstResult); + + void setMaxResults(int maxResults); + + void addSort(Sort o); + + String getWhereClause(); + + String getFilteredQuery(String query); + + List getValues(); + + Object[] getValuesArray(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Direction.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Direction.java index ab4a42166..7e68b87ee 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Direction.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Direction.java @@ -2,25 +2,19 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.criteria; public enum Direction { - ASC, - DESC + ASC, DESC } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchFactory.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchFactory.java index 7a957bdd9..45c40bc55 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchFactory.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchFactory.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.dao.criteria; @@ -27,55 +25,55 @@ import com.imageworks.spcue.grpc.job.FrameSearchCriteria; public class FrameSearchFactory { - private DatabaseEngine dbEngine; + private DatabaseEngine dbEngine; - public FrameSearchInterface create() { - return new FrameSearch(); - } + public FrameSearchInterface create() { + return new FrameSearch(); + } - public FrameSearchInterface create(List frameIds) { - FrameSearchInterface frameSearch = create(); - frameSearch.filterByFrameIds(frameIds); - return frameSearch; - } + public FrameSearchInterface create(List frameIds) { + FrameSearchInterface frameSearch = create(); + frameSearch.filterByFrameIds(frameIds); + return frameSearch; + } - public FrameSearchInterface create(JobInterface job) { - FrameSearchInterface frameSearch = create(); - frameSearch.filterByJob(job); - return frameSearch; - } + public FrameSearchInterface create(JobInterface job) { + FrameSearchInterface frameSearch = create(); + frameSearch.filterByJob(job); + return frameSearch; + } - public FrameSearchInterface create(FrameInterface frame) { - FrameSearchInterface frameSearch = create(); - frameSearch.filterByFrame(frame); - return frameSearch; - } + public FrameSearchInterface create(FrameInterface frame) { + FrameSearchInterface frameSearch = create(); + frameSearch.filterByFrame(frame); + return frameSearch; + } - public FrameSearchInterface create(JobInterface job, FrameSearchCriteria criteria) { - FrameSearchInterface frameSearch = create(); - frameSearch.setCriteria(criteria); - frameSearch.filterByJob(job); - return frameSearch; - } + public FrameSearchInterface create(JobInterface job, FrameSearchCriteria criteria) { + FrameSearchInterface frameSearch = create(); + frameSearch.setCriteria(criteria); + frameSearch.filterByJob(job); + return frameSearch; + } - public FrameSearchInterface create(LayerInterface layer) { - FrameSearchInterface frameSearch = create(); - frameSearch.filterByLayer(layer); - return frameSearch; - } + public FrameSearchInterface create(LayerInterface layer) { + FrameSearchInterface frameSearch = create(); + frameSearch.filterByLayer(layer); + return frameSearch; + } - public FrameSearchInterface create(LayerInterface layer, FrameSearchCriteria criteria) { - FrameSearchInterface frameSearch = create(); - frameSearch.setCriteria(criteria); - frameSearch.filterByLayer(layer); - return frameSearch; - } + public FrameSearchInterface create(LayerInterface layer, FrameSearchCriteria criteria) { + FrameSearchInterface frameSearch = create(); + frameSearch.setCriteria(criteria); + frameSearch.filterByLayer(layer); + return frameSearch; + } - public DatabaseEngine getDbEngine() { - return dbEngine; - } + public DatabaseEngine getDbEngine() { + return dbEngine; + } - public void setDbEngine(DatabaseEngine dbEngine) { - this.dbEngine = dbEngine; - } + public void setDbEngine(DatabaseEngine dbEngine) { + this.dbEngine = dbEngine; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchInterface.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchInterface.java index 3c17b411e..1d2017f8d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchInterface.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.dao.criteria; @@ -26,28 +24,37 @@ import com.imageworks.spcue.grpc.job.FrameState; public interface FrameSearchInterface extends CriteriaInterface { - int DEFAULT_PAGE = 1; - int DEFAULT_LIMIT = 1000; - - FrameSearchCriteria getCriteria(); - void setCriteria(FrameSearchCriteria criteria); - String getSortedQuery(String query); - void filterByFrameIds(List frameIds); - void filterByJob(JobInterface job); - void filterByFrame(FrameInterface frame); - void filterByLayer(LayerInterface layer); - void filterByLayers(List layers); - void filterByFrameStates(List frameStates); - void filterByFrameSet(String frameSet); - void filterByMemoryRange(String range); - void filterByDurationRange(String range); - void filterByChangeDate(int changeDate); - - static FrameSearchCriteria criteriaFactory() { - return FrameSearchCriteria.newBuilder() - .setPage(DEFAULT_PAGE) - .setLimit(DEFAULT_LIMIT) - .setChangeDate(0) - .build(); - } + int DEFAULT_PAGE = 1; + int DEFAULT_LIMIT = 1000; + + FrameSearchCriteria getCriteria(); + + void setCriteria(FrameSearchCriteria criteria); + + String getSortedQuery(String query); + + void filterByFrameIds(List frameIds); + + void filterByJob(JobInterface job); + + void filterByFrame(FrameInterface frame); + + void filterByLayer(LayerInterface layer); + + void filterByLayers(List layers); + + void filterByFrameStates(List frameStates); + + void filterByFrameSet(String frameSet); + + void filterByMemoryRange(String range); + + void filterByDurationRange(String range); + + void filterByChangeDate(int changeDate); + + static FrameSearchCriteria criteriaFactory() { + return FrameSearchCriteria.newBuilder().setPage(DEFAULT_PAGE).setLimit(DEFAULT_LIMIT) + .setChangeDate(0).build(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchFactory.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchFactory.java index d11093810..31039d917 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchFactory.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchFactory.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.dao.criteria; @@ -24,23 +22,23 @@ public class HostSearchFactory { - private DatabaseEngine dbEngine; + private DatabaseEngine dbEngine; - public HostSearchInterface create(HostSearchCriteria criteria) { - return new HostSearch(criteria); - } + public HostSearchInterface create(HostSearchCriteria criteria) { + return new HostSearch(criteria); + } - public HostSearchInterface create(AllocationEntity allocEntity) { - HostSearchInterface hostSearch = create(HostSearchInterface.criteriaFactory()); - hostSearch.filterByAlloc(allocEntity); - return hostSearch; - } + public HostSearchInterface create(AllocationEntity allocEntity) { + HostSearchInterface hostSearch = create(HostSearchInterface.criteriaFactory()); + hostSearch.filterByAlloc(allocEntity); + return hostSearch; + } - public DatabaseEngine getDbEngine() { - return dbEngine; - } + public DatabaseEngine getDbEngine() { + return dbEngine; + } - public void setDbEngine(DatabaseEngine dbEngine) { - this.dbEngine = dbEngine; - } + public void setDbEngine(DatabaseEngine dbEngine) { + this.dbEngine = dbEngine; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchInterface.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchInterface.java index 6ba001210..4d162b238 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchInterface.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.dao.criteria; @@ -21,10 +19,11 @@ import com.imageworks.spcue.grpc.host.HostSearchCriteria; public interface HostSearchInterface extends CriteriaInterface { - HostSearchCriteria getCriteria(); - void filterByAlloc(AllocationInterface alloc); + HostSearchCriteria getCriteria(); - static HostSearchCriteria criteriaFactory() { - return HostSearchCriteria.newBuilder().build(); - } + void filterByAlloc(AllocationInterface alloc); + + static HostSearchCriteria criteriaFactory() { + return HostSearchCriteria.newBuilder().build(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchFactory.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchFactory.java index a9468a098..00ac412ba 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchFactory.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchFactory.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.dao.criteria; @@ -23,29 +21,29 @@ import com.imageworks.spcue.dao.criteria.postgres.JobSearch; public class JobSearchFactory { - private DatabaseEngine dbEngine; - - public JobSearchInterface create() { - return new JobSearch(); - } - - public JobSearchInterface create(JobSearchCriteria criteria) { - JobSearchInterface jobSearch = create(); - jobSearch.setCriteria(criteria); - return jobSearch; - } - - public JobSearchInterface create(ShowInterface show) { - JobSearchInterface jobSearch = create(); - jobSearch.filterByShow(show); - return jobSearch; - } - - public DatabaseEngine getDbEngine() { - return dbEngine; - } - - public void setDbEngine(DatabaseEngine dbEngine) { - this.dbEngine = dbEngine; - } + private DatabaseEngine dbEngine; + + public JobSearchInterface create() { + return new JobSearch(); + } + + public JobSearchInterface create(JobSearchCriteria criteria) { + JobSearchInterface jobSearch = create(); + jobSearch.setCriteria(criteria); + return jobSearch; + } + + public JobSearchInterface create(ShowInterface show) { + JobSearchInterface jobSearch = create(); + jobSearch.filterByShow(show); + return jobSearch; + } + + public DatabaseEngine getDbEngine() { + return dbEngine; + } + + public void setDbEngine(DatabaseEngine dbEngine) { + this.dbEngine = dbEngine; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchInterface.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchInterface.java index 63315d11f..8b84891b4 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchInterface.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.dao.criteria; @@ -21,13 +19,13 @@ import com.imageworks.spcue.grpc.job.JobSearchCriteria; public interface JobSearchInterface extends CriteriaInterface { - JobSearchCriteria getCriteria(); - void setCriteria(JobSearchCriteria criteria); - void filterByShow(ShowInterface show); + JobSearchCriteria getCriteria(); - static JobSearchCriteria criteriaFactory() { - return JobSearchCriteria.newBuilder() - .setIncludeFinished(false) - .build(); - } + void setCriteria(JobSearchCriteria criteria); + + void filterByShow(ShowInterface show); + + static JobSearchCriteria criteriaFactory() { + return JobSearchCriteria.newBuilder().setIncludeFinished(false).build(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Phrase.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Phrase.java index 5405839eb..c57f43b18 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Phrase.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Phrase.java @@ -2,53 +2,45 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.criteria; /** - * Describes a simple phrase of a SQL WHERE clause. - * Examples: + * Describes a simple phrase of a SQL WHERE clause. Examples: * - * column / comparison/ value - * proc.ts_updated != 123456 - * user.str_name == 'bob' + * column / comparison/ value proc.ts_updated != 123456 user.str_name == 'bob' */ public class Phrase { - private final String column; - private final String comparison; - private final String value; + private final String column; + private final String comparison; + private final String value; - public Phrase(String column, String comparison, String value) { - this.column = column; - this.comparison = comparison; - this.value = value; - } + public Phrase(String column, String comparison, String value) { + this.column = column; + this.comparison = comparison; + this.value = value; + } - public String getColumn() { - return column; - } + public String getColumn() { + return column; + } - public String getComparison() { - return comparison; - } + public String getComparison() { + return comparison; + } - public String getValue() { - return value; - } + public String getValue() { + return value; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchFactory.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchFactory.java index d754dce13..abd787538 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchFactory.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchFactory.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.dao.criteria; @@ -22,30 +20,30 @@ import com.imageworks.spcue.grpc.host.ProcSearchCriteria; public class ProcSearchFactory { - private DatabaseEngine dbEngine; - - public ProcSearchInterface create() { - return new ProcSearch(); - } - - public ProcSearchInterface create(ProcSearchCriteria criteria) { - ProcSearchInterface procSearch = create(); - procSearch.setCriteria(criteria); - return procSearch; - } - - public ProcSearchInterface create(ProcSearchCriteria criteria, Sort sort) { - ProcSearchInterface procSearch = create(); - procSearch.setCriteria(criteria); - procSearch.addSort(sort); - return procSearch; - } - - public DatabaseEngine getDbEngine() { - return dbEngine; - } - - public void setDbEngine(DatabaseEngine dbEngine) { - this.dbEngine = dbEngine; - } + private DatabaseEngine dbEngine; + + public ProcSearchInterface create() { + return new ProcSearch(); + } + + public ProcSearchInterface create(ProcSearchCriteria criteria) { + ProcSearchInterface procSearch = create(); + procSearch.setCriteria(criteria); + return procSearch; + } + + public ProcSearchInterface create(ProcSearchCriteria criteria, Sort sort) { + ProcSearchInterface procSearch = create(); + procSearch.setCriteria(criteria); + procSearch.addSort(sort); + return procSearch; + } + + public DatabaseEngine getDbEngine() { + return dbEngine; + } + + public void setDbEngine(DatabaseEngine dbEngine) { + this.dbEngine = dbEngine; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchInterface.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchInterface.java index 5c32e7182..aa092dd09 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchInterface.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.dao.criteria; @@ -28,19 +26,29 @@ import com.imageworks.spcue.grpc.host.ProcSearchCriteria; public interface ProcSearchInterface extends CriteriaInterface { - ProcSearchCriteria getCriteria(); - void setCriteria(ProcSearchCriteria criteria); - void notJobs(List jobs); - void notGroups(List groups); - void filterByDurationRange(LessThanIntegerSearchCriterion criterion); - void filterByDurationRange(GreaterThanIntegerSearchCriterion criterion); - void filterByDurationRange(InRangeIntegerSearchCriterion criterion); - void filterByHost(HostInterface host); - void sortByHostName(); - void sortByDispatchedTime(); - void sortByBookedTime(); - - static ProcSearchCriteria criteriaFactory() { - return ProcSearchCriteria.newBuilder().build(); - } + ProcSearchCriteria getCriteria(); + + void setCriteria(ProcSearchCriteria criteria); + + void notJobs(List jobs); + + void notGroups(List groups); + + void filterByDurationRange(LessThanIntegerSearchCriterion criterion); + + void filterByDurationRange(GreaterThanIntegerSearchCriterion criterion); + + void filterByDurationRange(InRangeIntegerSearchCriterion criterion); + + void filterByHost(HostInterface host); + + void sortByHostName(); + + void sortByDispatchedTime(); + + void sortByBookedTime(); + + static ProcSearchCriteria criteriaFactory() { + return ProcSearchCriteria.newBuilder().build(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Sort.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Sort.java index 3c7c8477e..a53a19d77 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Sort.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Sort.java @@ -2,47 +2,42 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.criteria; public class Sort { - private final String col; - private final Direction dir; + private final String col; + private final Direction dir; - public Sort(String col, Direction dir) { - this.col = col; - this.dir = dir; - } + public Sort(String col, Direction dir) { + this.col = col; + this.dir = dir; + } - public static final Sort asc(String col) { - return new Sort(col, Direction.ASC); - } + public static final Sort asc(String col) { + return new Sort(col, Direction.ASC); + } - public static final Sort desc(String col) { - return new Sort(col, Direction.DESC); - } + public static final Sort desc(String col) { + return new Sort(col, Direction.DESC); + } - public String getColumn() { - return this.col; - } + public String getColumn() { + return this.col; + } - public Direction getDirection() { - return this.dir; - } + public Direction getDirection() { + return this.dir; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/Criteria.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/Criteria.java index d4d6a6396..904fcfcd8 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/Criteria.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/Criteria.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.dao.criteria.postgres; @@ -40,272 +38,288 @@ public abstract class Criteria implements CriteriaInterface { - List chunks = new ArrayList(12); - List values = new ArrayList(32); - Integer limit; + List chunks = new ArrayList(12); + List values = new ArrayList(32); + Integer limit; - boolean built = false; - private int firstResult = 1; - private int maxResults = 0; - private ArrayList order = new ArrayList(); + boolean built = false; + private int firstResult = 1; + private int maxResults = 0; + private ArrayList order = new ArrayList(); - abstract void buildWhereClause(); + abstract void buildWhereClause(); - public String toString() { return this.getWhereClause(); } + public String toString() { + return this.getWhereClause(); + } - public void setFirstResult(int firstResult) { - this.firstResult = Math.max(firstResult, 1); - } + public void setFirstResult(int firstResult) { + this.firstResult = Math.max(firstResult, 1); + } - public void setMaxResults(int maxResults) { - this.maxResults = maxResults; - } + public void setMaxResults(int maxResults) { + this.maxResults = maxResults; + } - public void addSort(Sort sort) { - this.order.add(sort); - } + public void addSort(Sort sort) { + this.order.add(sort); + } - public List getValues() { - return values; - } + public List getValues() { + return values; + } - public Object[] getValuesArray() { - return values.toArray(); - } + public Object[] getValuesArray() { + return values.toArray(); + } - public String getWhereClause() { - build(); - return generateWhereClause(); - } + public String getWhereClause() { + build(); + return generateWhereClause(); + } - public String getFilteredQuery(String query) { - build(); - return queryWithPaging(query); - } + public String getFilteredQuery(String query) { + build(); + return queryWithPaging(query); + } - private void build() { - if (!built) { - buildWhereClause(); - } - built = true; + private void build() { + if (!built) { + buildWhereClause(); } - - private String generateWhereClause() { - return chunks.stream() - .map(StringBuilder::toString) - .collect(Collectors.joining(" AND ")); + built = true; + } + + private String generateWhereClause() { + return chunks.stream().map(StringBuilder::toString).collect(Collectors.joining(" AND ")); + } + + private String queryWithPaging(String query) { + if (firstResult > 1 || maxResults > 0) { + if (order.size() == 0) { + query = query.replaceFirst("SELECT ", "SELECT row_number() OVER () AS RN,"); + } else { + query = + query.replaceFirst("SELECT ", "SELECT row_number() OVER (" + getOrder() + ") AS RN, "); + } } - private String queryWithPaging(String query) { - if (firstResult > 1 || maxResults > 0) { - if (order.size() == 0) { - query = query.replaceFirst("SELECT ", "SELECT row_number() OVER () AS RN,"); - } else { - query = query.replaceFirst("SELECT ", "SELECT row_number() OVER (" + getOrder() + ") AS RN, "); - } - } - - StringBuilder sb = new StringBuilder(4096); - if (maxResults > 0 || firstResult > 1) { - sb.append("SELECT * FROM ( "); - } - - sb.append(query); - sb.append(" "); - if (chunks.size() > 0) { - sb.append("AND "); - sb.append( - chunks.stream() - .map(StringBuilder::toString) - .collect(Collectors.joining(" AND "))); - } - - if (firstResult > 1 || maxResults > 0) { - sb.append(") AS getQueryT WHERE "); - } - - if (firstResult > 1) { - sb.append (" RN >= ? "); - values.add(firstResult); - } - - if (maxResults > 0) { - if (firstResult > 1) { - sb.append(" AND "); - } - sb.append(" RN < ? "); - values.add(firstResult + maxResults); - } - - if (limit != null) { - sb.append(" LIMIT "); - sb.append(limit); - sb.append(" "); - } - - return sb.toString(); + StringBuilder sb = new StringBuilder(4096); + if (maxResults > 0 || firstResult > 1) { + sb.append("SELECT * FROM ( "); } - private String getOrder() { - if (order.size() < 1) { - return ""; - } - return " ORDER BY " + order.stream() - .map(sort -> sort.getColumn() + " " + sort.getDirection().toString()) - .collect(Collectors.joining(", ")); + sb.append(query); + sb.append(" "); + if (chunks.size() > 0) { + sb.append("AND "); + sb.append(chunks.stream().map(StringBuilder::toString).collect(Collectors.joining(" AND "))); } - void addPhrase(String col, Collection s) { - if (s == null || s.size() == 0) { return; } - - StringBuilder sb = new StringBuilder(1024); - sb.append("("); - for (String w: s) { - sb.append(col); - sb.append("=?"); - sb.append(" OR "); - values.add(w); - } - sb.delete(sb.length()-4, sb.length()); - sb.append(")"); - chunks.add(sb); + if (firstResult > 1 || maxResults > 0) { + sb.append(") AS getQueryT WHERE "); } - void addPhrases(Collection phrases, String inclusion) { - if (phrases.size() == 0) { return; } - StringBuilder sb = new StringBuilder(1024); - sb.append("("); - for (Phrase p: phrases) { - sb.append(p.getColumn()); - sb.append(p.getComparison()); - sb.append("?"); - sb.append(" "); - sb.append(inclusion); - sb.append(" "); - values.add(p.getValue()); - } - sb.delete(sb.length()-4, sb.length()); - sb.append(")"); - chunks.add(sb); + if (firstResult > 1) { + sb.append(" RN >= ? "); + values.add(firstResult); } - void addPhrase(String col, String v) { - if (v == null) { return; } - addPhrase(col, ImmutableList.of(v)); + if (maxResults > 0) { + if (firstResult > 1) { + sb.append(" AND "); + } + sb.append(" RN < ? "); + values.add(firstResult + maxResults); } - void addRegexPhrase(String col, Set s) { - if (s == null) { return; } - if (s.size() == 0) { return; } - StringBuilder sb = new StringBuilder(1024); - sb.append("("); - for (String w: s) { - sb.append(String.format("%s ~ ?", col)); - sb.append(" OR "); - values.add(w); - } - sb.delete(sb.length()-4, sb.length()); - sb.append(")"); - chunks.add(sb); + if (limit != null) { + sb.append(" LIMIT "); + sb.append(limit); + sb.append(" "); } - void addLikePhrase(String col, Set s) { - if (s == null) { return; } - if (s.size() == 0) { return; } - StringBuilder sb = new StringBuilder(1024); - sb.append("("); - for (String w: s) { - sb.append(col); - sb.append(" LIKE ?"); - sb.append(" OR "); - values.add("%" + w + "%"); - } - sb.delete(sb.length()-4, sb.length()); - sb.append(")"); - chunks.add(sb); - } + return sb.toString(); + } - void addGreaterThanTimestamp(String col, Timestamp timestamp) { - if (timestamp == null) { return; } - StringBuilder sb = new StringBuilder(128); - sb.append("("); - sb.append(col); - sb.append(" > ?"); - sb.append(") "); - values.add(timestamp); - chunks.add(sb); + private String getOrder() { + if (order.size() < 1) { + return ""; } - - void addLessThanTimestamp(String col, Timestamp timestamp) { - if (timestamp == null) { return; } - StringBuilder sb = new StringBuilder(128); - sb.append("("); - sb.append(col); - sb.append(" < ?"); - sb.append(") "); - values.add(timestamp); - chunks.add(sb); + return " ORDER BY " + + order.stream().map(sort -> sort.getColumn() + " " + sort.getDirection().toString()) + .collect(Collectors.joining(", ")); + } + + void addPhrase(String col, Collection s) { + if (s == null || s.size() == 0) { + return; } - void addRangePhrase(String col, EqualsIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " = ?"); - chunks.add(sb); - values.add(criterion.getValue()); + StringBuilder sb = new StringBuilder(1024); + sb.append("("); + for (String w : s) { + sb.append(col); + sb.append("=?"); + sb.append(" OR "); + values.add(w); } - - void addRangePhrase(String col, LessThanIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + "<=? "); - chunks.add(sb); - values.add(criterion.getValue()); + sb.delete(sb.length() - 4, sb.length()); + sb.append(")"); + chunks.add(sb); + } + + void addPhrases(Collection phrases, String inclusion) { + if (phrases.size() == 0) { + return; } - - void addRangePhrase(String col, GreaterThanIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " >= ? "); - chunks.add(sb); - values.add(criterion.getValue()); + StringBuilder sb = new StringBuilder(1024); + sb.append("("); + for (Phrase p : phrases) { + sb.append(p.getColumn()); + sb.append(p.getComparison()); + sb.append("?"); + sb.append(" "); + sb.append(inclusion); + sb.append(" "); + values.add(p.getValue()); } - - void addRangePhrase(String col, InRangeIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " >= ? AND " + col + " <= ? "); - chunks.add(sb); - values.add(criterion.getMin()); - values.add(criterion.getMax()); + sb.delete(sb.length() - 4, sb.length()); + sb.append(")"); + chunks.add(sb); + } + + void addPhrase(String col, String v) { + if (v == null) { + return; } + addPhrase(col, ImmutableList.of(v)); + } - void addRangePhrase(String col, EqualsFloatSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " = ?"); - chunks.add(sb); - values.add(criterion.getValue()); + void addRegexPhrase(String col, Set s) { + if (s == null) { + return; } - - void addRangePhrase(String col, LessThanFloatSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " <= ? "); - chunks.add(sb); - values.add(criterion.getValue()); + if (s.size() == 0) { + return; } - - void addRangePhrase(String col, GreaterThanFloatSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " >= ? "); - chunks.add(sb); - values.add(criterion.getValue()); + StringBuilder sb = new StringBuilder(1024); + sb.append("("); + for (String w : s) { + sb.append(String.format("%s ~ ?", col)); + sb.append(" OR "); + values.add(w); } - - void addRangePhrase(String col, InRangeFloatSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " >= ? "); - chunks.add(sb); - values.add(criterion.getMin()); - values.add(criterion.getMax()); + sb.delete(sb.length() - 4, sb.length()); + sb.append(")"); + chunks.add(sb); + } + + void addLikePhrase(String col, Set s) { + if (s == null) { + return; } - - boolean isValid(String v) { - return v != null && !v.isEmpty(); + if (s.size() == 0) { + return; + } + StringBuilder sb = new StringBuilder(1024); + sb.append("("); + for (String w : s) { + sb.append(col); + sb.append(" LIKE ?"); + sb.append(" OR "); + values.add("%" + w + "%"); + } + sb.delete(sb.length() - 4, sb.length()); + sb.append(")"); + chunks.add(sb); + } + + void addGreaterThanTimestamp(String col, Timestamp timestamp) { + if (timestamp == null) { + return; + } + StringBuilder sb = new StringBuilder(128); + sb.append("("); + sb.append(col); + sb.append(" > ?"); + sb.append(") "); + values.add(timestamp); + chunks.add(sb); + } + + void addLessThanTimestamp(String col, Timestamp timestamp) { + if (timestamp == null) { + return; } + StringBuilder sb = new StringBuilder(128); + sb.append("("); + sb.append(col); + sb.append(" < ?"); + sb.append(") "); + values.add(timestamp); + chunks.add(sb); + } + + void addRangePhrase(String col, EqualsIntegerSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" " + col + " = ?"); + chunks.add(sb); + values.add(criterion.getValue()); + } + + void addRangePhrase(String col, LessThanIntegerSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" " + col + "<=? "); + chunks.add(sb); + values.add(criterion.getValue()); + } + + void addRangePhrase(String col, GreaterThanIntegerSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" " + col + " >= ? "); + chunks.add(sb); + values.add(criterion.getValue()); + } + + void addRangePhrase(String col, InRangeIntegerSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" " + col + " >= ? AND " + col + " <= ? "); + chunks.add(sb); + values.add(criterion.getMin()); + values.add(criterion.getMax()); + } + + void addRangePhrase(String col, EqualsFloatSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" " + col + " = ?"); + chunks.add(sb); + values.add(criterion.getValue()); + } + + void addRangePhrase(String col, LessThanFloatSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" " + col + " <= ? "); + chunks.add(sb); + values.add(criterion.getValue()); + } + + void addRangePhrase(String col, GreaterThanFloatSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" " + col + " >= ? "); + chunks.add(sb); + values.add(criterion.getValue()); + } + + void addRangePhrase(String col, InRangeFloatSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" " + col + " >= ? "); + chunks.add(sb); + values.add(criterion.getMin()); + values.add(criterion.getMax()); + } + + boolean isValid(String v) { + return v != null && !v.isEmpty(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/FrameSearch.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/FrameSearch.java index de33e29cd..198e0899d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/FrameSearch.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/FrameSearch.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.dao.criteria.postgres; @@ -36,185 +34,189 @@ import com.imageworks.spcue.util.FrameSet; public class FrameSearch extends Criteria implements FrameSearchInterface { - private static final int MAX_RESULTS = 1000; - private static final Logger logger = LogManager.getLogger(FrameSearch.class); - private static final Pattern PATTERN_SINGLE_FRAME = Pattern.compile("^([0-9]+)$"); - private static final Pattern PATTERN_RANGE = Pattern.compile("^([0-9]+)\\-([0-9]+)$"); - private static final Pattern PATTERN_FLOAT_RANGE = Pattern.compile("^([0-9\\.]+)\\-([0-9\\.]+)$"); - private static final int RANGE_MAX_SIZE = 1000; - - private FrameSearchCriteria criteria; - private String sortedQuery; - - public FrameSearch() { - criteria = FrameSearchInterface.criteriaFactory(); + private static final int MAX_RESULTS = 1000; + private static final Logger logger = LogManager.getLogger(FrameSearch.class); + private static final Pattern PATTERN_SINGLE_FRAME = Pattern.compile("^([0-9]+)$"); + private static final Pattern PATTERN_RANGE = Pattern.compile("^([0-9]+)\\-([0-9]+)$"); + private static final Pattern PATTERN_FLOAT_RANGE = Pattern.compile("^([0-9\\.]+)\\-([0-9\\.]+)$"); + private static final int RANGE_MAX_SIZE = 1000; + + private FrameSearchCriteria criteria; + private String sortedQuery; + + public FrameSearch() { + criteria = FrameSearchInterface.criteriaFactory(); + } + + @Override + public FrameSearchCriteria getCriteria() { + return criteria; + } + + @Override + public void setCriteria(FrameSearchCriteria criteria) { + this.criteria = criteria; + } + + @Override + public String getSortedQuery(String query) { + if (built) { + return sortedQuery; } - @Override - public FrameSearchCriteria getCriteria() { - return criteria; - } - - @Override - public void setCriteria(FrameSearchCriteria criteria) { - this.criteria = criteria; - } - - @Override - public String getSortedQuery(String query) { - if (built) { - return sortedQuery; - } - - int limit = criteria.getLimit(); - int page = criteria.getPage(); - - if (limit <= 0 || limit >= MAX_RESULTS) { - criteria = criteria.toBuilder().setLimit(MAX_RESULTS).build(); - } - if (page <= 0) { - page = 1; - } - - StringBuilder sb = new StringBuilder(query.length() + 256); - sb.append("SELECT * FROM ("); - sb.append(getFilteredQuery(query)); - sb.append(" ) AS getSortedQueryT WHERE row_number > ?"); - sb.append(" AND row_number <= ?"); - values.add((page - 1) * limit); - values.add(page * limit); - sortedQuery = sb.toString(); - return sortedQuery; - } - - @Override - public void filterByFrameIds(List frameIds) { - criteria = criteria.toBuilder().addAllIds(frameIds).build(); - } + int limit = criteria.getLimit(); + int page = criteria.getPage(); - @Override - public void filterByJob(JobInterface job) { - addPhrase("job.pk_job", job.getJobId()); + if (limit <= 0 || limit >= MAX_RESULTS) { + criteria = criteria.toBuilder().setLimit(MAX_RESULTS).build(); } - - @Override - public void filterByFrame(FrameInterface frame) { - filterByFrameIds(ImmutableList.of(frame.getFrameId())); + if (page <= 0) { + page = 1; } - @Override - public void filterByLayer(LayerInterface layer) { - addPhrase("layer.pk_layer", layer.getLayerId()); + StringBuilder sb = new StringBuilder(query.length() + 256); + sb.append("SELECT * FROM ("); + sb.append(getFilteredQuery(query)); + sb.append(" ) AS getSortedQueryT WHERE row_number > ?"); + sb.append(" AND row_number <= ?"); + values.add((page - 1) * limit); + values.add(page * limit); + sortedQuery = sb.toString(); + return sortedQuery; + } + + @Override + public void filterByFrameIds(List frameIds) { + criteria = criteria.toBuilder().addAllIds(frameIds).build(); + } + + @Override + public void filterByJob(JobInterface job) { + addPhrase("job.pk_job", job.getJobId()); + } + + @Override + public void filterByFrame(FrameInterface frame) { + filterByFrameIds(ImmutableList.of(frame.getFrameId())); + } + + @Override + public void filterByLayer(LayerInterface layer) { + addPhrase("layer.pk_layer", layer.getLayerId()); + } + + @Override + public void filterByLayers(List layers) { + addPhrase("layer.pk_layer", + layers.stream().map(LayerInterface::getLayerId).collect(Collectors.toList())); + } + + @Override + public void filterByFrameStates(List frameStates) { + addPhrase("frame.str_state", + frameStates.stream().map(FrameState::toString).collect(Collectors.toSet())); + } + + @Override + public void filterByFrameSet(String frameSet) { + StringBuilder sb = new StringBuilder(8096); + Matcher matchRange = PATTERN_RANGE.matcher(frameSet); + Matcher matchSingle = PATTERN_SINGLE_FRAME.matcher(frameSet); + + if (matchSingle.matches()) { + sb.append("frame.int_number=?"); + values.add(Integer.valueOf(matchSingle.group(1))); + } else if (matchRange.matches()) { + sb.append(" ( frame.int_number >= ? AND "); + sb.append(" frame.int_number <= ? )"); + values.add(Integer.valueOf(matchRange.group(1))); + values.add(Integer.valueOf(matchRange.group(2))); + } else { + FrameSet set = new FrameSet(frameSet); + int num_frames = set.size(); + if (num_frames <= RANGE_MAX_SIZE) { + sb.append("("); + for (int i = 0; i < num_frames; i++) { + sb.append("frame.int_number=? OR "); + values.add(set.get(i)); + } + sb.delete(sb.length() - 4, sb.length()); + sb.append(") "); + } } - - @Override - public void filterByLayers(List layers) { - addPhrase( - "layer.pk_layer", - layers.stream().map(LayerInterface::getLayerId).collect(Collectors.toList())); + chunks.add(sb); + } + + @Override + public void filterByMemoryRange(String range) { + StringBuilder sb = new StringBuilder(128); + Matcher matchRange = PATTERN_FLOAT_RANGE.matcher(range); + try { + if (matchRange.matches()) { + values.add(CueUtil.GB * Float.valueOf(matchRange.group(1))); + values.add(CueUtil.GB * Float.valueOf(matchRange.group(2))); + sb.append(" (frame.int_mem_max_used >= ? AND frame.int_mem_max_used <= ?) "); + } else { + values.add(CueUtil.GB * Float.valueOf(range)); + sb.append(" frame.int_mem_max_used >= ? "); + } + } catch (RuntimeException e) { + logger.warn("Failed to convert float range: " + range + "," + e); } - - @Override - public void filterByFrameStates(List frameStates) { - addPhrase( - "frame.str_state", - frameStates.stream().map(FrameState::toString).collect(Collectors.toSet())); + chunks.add(sb); + } + + @Override + public void filterByDurationRange(String range) { + StringBuilder sb = new StringBuilder(128); + Matcher matchRange = PATTERN_FLOAT_RANGE.matcher(range); + try { + if (matchRange.matches()) { + values.add((int) (3600 * Float.valueOf(matchRange.group(1)))); + values.add((int) (3600 * Float.valueOf(matchRange.group(2)))); + sb.append(" (frame.str_state != 'WAITING' "); + sb.append(" AND find_duration(frame.ts_started, frame.ts_stopped) "); + sb.append(" BETWEEN ? AND ? )"); + } else { + values.add((int) (3600 * Float.valueOf(range))); + sb.append(" (frame.str_state != 'WAITING' AND "); + sb.append("find_duration(frame.ts_started, frame.ts_stopped) >= ?) "); + } + } catch (RuntimeException e) { + logger.warn("Failed to convert float range: " + range + "," + e); + // a cast failed, ignore for now. } - - @Override - public void filterByFrameSet(String frameSet) { - StringBuilder sb = new StringBuilder(8096); - Matcher matchRange = PATTERN_RANGE.matcher(frameSet); - Matcher matchSingle = PATTERN_SINGLE_FRAME.matcher(frameSet); - - if (matchSingle.matches()) { - sb.append("frame.int_number=?"); - values.add(Integer.valueOf(matchSingle.group(1))); - } else if (matchRange.matches()) { - sb.append(" ( frame.int_number >= ? AND "); - sb.append(" frame.int_number <= ? )"); - values.add(Integer.valueOf(matchRange.group(1))); - values.add(Integer.valueOf(matchRange.group(2))); - } else { - FrameSet set = new FrameSet(frameSet); - int num_frames = set.size(); - if (num_frames <= RANGE_MAX_SIZE) { - sb.append("("); - for (int i=0; i ?"); + chunks.add(sb); + values.add(new java.sql.Timestamp(changeDate * 1000L)); + } + + @Override + void buildWhereClause() { + addPhrase("frame.pk_frame", criteria.getIdsList()); + + addPhrase("frame.str_name", criteria.getFramesList()); + addPhrase("layer.str_name", criteria.getLayersList()); + filterByFrameStates(criteria.getStates().getFrameStatesList()); + if (isValid(criteria.getFrameRange())) { + filterByFrameSet(criteria.getFrameRange()); } - - @Override - public void filterByMemoryRange(String range) { - StringBuilder sb = new StringBuilder(128); - Matcher matchRange = PATTERN_FLOAT_RANGE.matcher(range); - try { - if (matchRange.matches()) { - values.add(CueUtil.GB * Float.valueOf(matchRange.group(1))); - values.add(CueUtil.GB * Float.valueOf(matchRange.group(2))); - sb.append(" (frame.int_mem_max_used >= ? AND frame.int_mem_max_used <= ?) "); - } - else { - values.add(CueUtil.GB * Float.valueOf(range)); - sb.append(" frame.int_mem_max_used >= ? "); - } - } catch (RuntimeException e) { - logger.warn("Failed to convert float range: " + range + "," + e); - } - chunks.add(sb); - } - - @Override - public void filterByDurationRange(String range) { - StringBuilder sb = new StringBuilder(128); - Matcher matchRange = PATTERN_FLOAT_RANGE.matcher(range); - try { - if (matchRange.matches()) { - values.add((int) (3600 * Float.valueOf(matchRange.group(1)))); - values.add((int) (3600 * Float.valueOf(matchRange.group(2)))); - sb.append(" (frame.str_state != 'WAITING' "); - sb.append(" AND find_duration(frame.ts_started, frame.ts_stopped) "); - sb.append(" BETWEEN ? AND ? )"); - } - else { - values.add((int) (3600 * Float.valueOf(range))); - sb.append(" (frame.str_state != 'WAITING' AND "); - sb.append("find_duration(frame.ts_started, frame.ts_stopped) >= ?) "); - } - } catch (RuntimeException e) { - logger.warn("Failed to convert float range: " + range + "," + e); - // a cast failed, ignore for now. - } - System.out.println(sb.toString()); - System.out.println(values); - chunks.add(sb); + if (isValid(criteria.getMemoryRange())) { + filterByMemoryRange(criteria.getMemoryRange()); } - - @Override - public void filterByChangeDate(int changeDate) { - StringBuilder sb = new StringBuilder(); - sb.append("frame.ts_updated > ?"); - chunks.add(sb); - values.add(new java.sql.Timestamp( changeDate * 1000L)); + if (isValid(criteria.getDurationRange())) { + filterByDurationRange(criteria.getDurationRange()); } - - @Override - void buildWhereClause() { - addPhrase("frame.pk_frame", criteria.getIdsList()); - - addPhrase("frame.str_name", criteria.getFramesList()); - addPhrase("layer.str_name", criteria.getLayersList()); - filterByFrameStates(criteria.getStates().getFrameStatesList()); - if (isValid(criteria.getFrameRange())) { filterByFrameSet(criteria.getFrameRange()); } - if (isValid(criteria.getMemoryRange())) { filterByMemoryRange(criteria.getMemoryRange()); } - if (isValid(criteria.getDurationRange())) { filterByDurationRange(criteria.getDurationRange()); } - if (criteria.getChangeDate() > 0) { filterByChangeDate(criteria.getChangeDate()); } + if (criteria.getChangeDate() > 0) { + filterByChangeDate(criteria.getChangeDate()); } + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/HostSearch.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/HostSearch.java index 9e84d3ed0..f7a2dc63e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/HostSearch.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/HostSearch.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.dao.criteria.postgres; @@ -26,31 +24,31 @@ import com.imageworks.spcue.grpc.host.HostSearchCriteria; public class HostSearch extends Criteria implements HostSearchInterface { - private HostSearchCriteria criteria; - - public HostSearch(HostSearchCriteria criteria) { - this.criteria = criteria; - } - - public HostSearchCriteria getCriteria() { - return this.criteria; - } - - public void filterByAlloc(AllocationInterface alloc) { - addPhrase("host.pk_alloc", alloc.getAllocationId()); - } - - @Override - public void buildWhereClause() { - addPhrase("host.pk_host", criteria.getIdsList()); - addPhrase("host.str_name", criteria.getHostsList()); - addLikePhrase("host.str_name", new HashSet<>(criteria.getSubstrList())); - addRegexPhrase("host.str_name", new HashSet<>(criteria.getRegexList())); - addPhrase("alloc.str_name", criteria.getAllocsList()); - Set items = new HashSet<>(criteria.getStates().getStateCount()); - for (HardwareState w: criteria.getStates().getStateList()) { - items.add(w.toString()); - } - addPhrase("host_stat.str_state", items); + private HostSearchCriteria criteria; + + public HostSearch(HostSearchCriteria criteria) { + this.criteria = criteria; + } + + public HostSearchCriteria getCriteria() { + return this.criteria; + } + + public void filterByAlloc(AllocationInterface alloc) { + addPhrase("host.pk_alloc", alloc.getAllocationId()); + } + + @Override + public void buildWhereClause() { + addPhrase("host.pk_host", criteria.getIdsList()); + addPhrase("host.str_name", criteria.getHostsList()); + addLikePhrase("host.str_name", new HashSet<>(criteria.getSubstrList())); + addRegexPhrase("host.str_name", new HashSet<>(criteria.getRegexList())); + addPhrase("alloc.str_name", criteria.getAllocsList()); + Set items = new HashSet<>(criteria.getStates().getStateCount()); + for (HardwareState w : criteria.getStates().getStateList()) { + items.add(w.toString()); } + addPhrase("host_stat.str_state", items); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/JobSearch.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/JobSearch.java index fa62794bd..b69545791 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/JobSearch.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/JobSearch.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.dao.criteria.postgres; @@ -24,38 +22,38 @@ import com.imageworks.spcue.grpc.job.JobSearchCriteria; public final class JobSearch extends Criteria implements JobSearchInterface { - private JobSearchCriteria criteria; - - public JobSearch() { - criteria = JobSearchInterface.criteriaFactory(); - } - - @Override - public JobSearchCriteria getCriteria() { - return criteria; - } - - @Override - public void setCriteria(JobSearchCriteria criteria) { - this.criteria = criteria; - } - - @Override - public void filterByShow(ShowInterface show) { - addPhrase("job.pk_show", show.getShowId()); - } - - @Override - void buildWhereClause() { - addPhrase("job.pk_job", criteria.getIdsList()); - addPhrase("job.str_name", criteria.getJobsList()); - addLikePhrase("job.str_name", new HashSet<>(criteria.getSubstrList())); - addRegexPhrase("job.str_name", new HashSet<>(criteria.getRegexList())); - addPhrase("job.str_shot", criteria.getShotsList()); - addPhrase("show.str_name", criteria.getShowsList()); - addPhrase("job.str_user", criteria.getUsersList()); - if (!criteria.getIncludeFinished()) { - addPhrase("job.str_state", "PENDING"); - } + private JobSearchCriteria criteria; + + public JobSearch() { + criteria = JobSearchInterface.criteriaFactory(); + } + + @Override + public JobSearchCriteria getCriteria() { + return criteria; + } + + @Override + public void setCriteria(JobSearchCriteria criteria) { + this.criteria = criteria; + } + + @Override + public void filterByShow(ShowInterface show) { + addPhrase("job.pk_show", show.getShowId()); + } + + @Override + void buildWhereClause() { + addPhrase("job.pk_job", criteria.getIdsList()); + addPhrase("job.str_name", criteria.getJobsList()); + addLikePhrase("job.str_name", new HashSet<>(criteria.getSubstrList())); + addRegexPhrase("job.str_name", new HashSet<>(criteria.getRegexList())); + addPhrase("job.str_shot", criteria.getShotsList()); + addPhrase("show.str_name", criteria.getShowsList()); + addPhrase("job.str_user", criteria.getUsersList()); + if (!criteria.getIncludeFinished()) { + addPhrase("job.str_state", "PENDING"); } + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/ProcSearch.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/ProcSearch.java index 294d5c4d8..038de725a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/ProcSearch.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/ProcSearch.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.dao.criteria.postgres; @@ -34,102 +32,102 @@ public class ProcSearch extends Criteria implements ProcSearchInterface { - private ProcSearchCriteria criteria; - private Set notJobs = new HashSet<>(); - private Set notGroups = new HashSet<>(); + private ProcSearchCriteria criteria; + private Set notJobs = new HashSet<>(); + private Set notGroups = new HashSet<>(); - public ProcSearch() { - criteria = ProcSearchInterface.criteriaFactory(); - } - - public ProcSearchCriteria getCriteria() { - return criteria; - } - - public void setCriteria(ProcSearchCriteria criteria) { - this.criteria = criteria; - } + public ProcSearch() { + criteria = ProcSearchInterface.criteriaFactory(); + } - public void notJobs(List jobs) { - for (JobInterface job: jobs) { - notJobs.add(new Phrase("proc.pk_job","!=", job.getJobId())); - } - } + public ProcSearchCriteria getCriteria() { + return criteria; + } - public void notGroups(List groups) { - for (GroupInterface group: groups) { - notGroups.add(new Phrase("folder.pk_folder","!=", group.getGroupId())); - } - } + public void setCriteria(ProcSearchCriteria criteria) { + this.criteria = criteria; + } - public void filterByDurationRange(LessThanIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" (find_duration(proc.ts_dispatched, null) <= ?) "); - chunks.add(sb); - values.add(criterion.getValue()); + public void notJobs(List jobs) { + for (JobInterface job : jobs) { + notJobs.add(new Phrase("proc.pk_job", "!=", job.getJobId())); } + } - public void filterByDurationRange(GreaterThanIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" (find_duration(proc.ts_dispatched, null) >= ?) "); - chunks.add(sb); - values.add(criterion.getValue()); + public void notGroups(List groups) { + for (GroupInterface group : groups) { + notGroups.add(new Phrase("folder.pk_folder", "!=", group.getGroupId())); } - - public void filterByDurationRange(InRangeIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" (find_duration(proc.ts_dispatched, null) BETWEEN ? AND ? )"); - chunks.add(sb); - values.add(criterion.getMin()); - values.add(criterion.getMax()); - } - - public void filterByHost(HostInterface host) { - addPhrase("host.pk_host", host.getHostId()); + } + + public void filterByDurationRange(LessThanIntegerSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" (find_duration(proc.ts_dispatched, null) <= ?) "); + chunks.add(sb); + values.add(criterion.getValue()); + } + + public void filterByDurationRange(GreaterThanIntegerSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" (find_duration(proc.ts_dispatched, null) >= ?) "); + chunks.add(sb); + values.add(criterion.getValue()); + } + + public void filterByDurationRange(InRangeIntegerSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" (find_duration(proc.ts_dispatched, null) BETWEEN ? AND ? )"); + chunks.add(sb); + values.add(criterion.getMin()); + values.add(criterion.getMax()); + } + + public void filterByHost(HostInterface host) { + addPhrase("host.pk_host", host.getHostId()); + } + + public void sortByHostName() { + addSort(Sort.asc("host.str_name")); + } + + public void sortByDispatchedTime() { + addSort(Sort.asc("proc.ts_dispatched")); + } + + public void sortByBookedTime() { + addSort(Sort.asc("proc.ts_booked")); + } + + @Override + void buildWhereClause() { + addPhrases(notJobs, "AND"); + addPhrases(notGroups, "AND"); + + addPhrase("host.str_name", criteria.getHostsList()); + addPhrase("job.str_name", criteria.getJobsList()); + addPhrase("layer.str_name", criteria.getLayersList()); + addPhrase("show.str_name", criteria.getShowsList()); + addPhrase("alloc.str_name", criteria.getAllocsList()); + + if (criteria.getMemoryRangeCount() > 0) { + addRangePhrase("proc.int_mem_reserved", criteria.getMemoryRange(0)); } - public void sortByHostName() { - addSort(Sort.asc("host.str_name")); + if (criteria.getMemoryLessThanCount() > 0) { + addRangePhrase("proc.int_mem_reserved", criteria.getMemoryLessThan(0)); } - public void sortByDispatchedTime() { - addSort(Sort.asc("proc.ts_dispatched")); + if (criteria.getMemoryGreaterThanCount() > 0) { + addRangePhrase("proc.int_mem_reserved", criteria.getMemoryGreaterThan(0)); } - public void sortByBookedTime() { - addSort(Sort.asc("proc.ts_booked")); + if (criteria.getDurationRangeCount() > 0) { + filterByDurationRange(criteria.getDurationRange(0)); } - @Override - void buildWhereClause() { - addPhrases(notJobs, "AND"); - addPhrases(notGroups, "AND"); - - addPhrase("host.str_name", criteria.getHostsList()); - addPhrase("job.str_name", criteria.getJobsList()); - addPhrase("layer.str_name", criteria.getLayersList()); - addPhrase("show.str_name", criteria.getShowsList()); - addPhrase("alloc.str_name", criteria.getAllocsList()); - - if (criteria.getMemoryRangeCount() > 0) { - addRangePhrase("proc.int_mem_reserved", criteria.getMemoryRange(0)); - } - - if (criteria.getMemoryLessThanCount() > 0) { - addRangePhrase("proc.int_mem_reserved", criteria.getMemoryLessThan(0)); - } - - if (criteria.getMemoryGreaterThanCount() > 0) { - addRangePhrase("proc.int_mem_reserved", criteria.getMemoryGreaterThan(0)); - } - - if (criteria.getDurationRangeCount() > 0) { - filterByDurationRange(criteria.getDurationRange(0)); - } - - setFirstResult(criteria.getFirstResult()); - if (criteria.getMaxResultsCount() > 0) { - setMaxResults(criteria.getMaxResults(0)); - } + setFirstResult(criteria.getFirstResult()); + if (criteria.getMaxResultsCount() > 0) { + setMaxResults(criteria.getMaxResults(0)); } + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ActionDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ActionDaoJdbc.java index 38b1c5c32..96059b6a1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ActionDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ActionDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -36,129 +32,113 @@ import com.imageworks.spcue.grpc.filter.ActionValueType; import com.imageworks.spcue.util.SqlUtil; -public class ActionDaoJdbc extends JdbcDaoSupport implements ActionDao { - - public static final String INSERT_ACTION = - "INSERT INTO " + - "action " + - "(" + - "pk_action,pk_filter,str_action,str_value_type,b_stop" + - ") VALUES (?,?,?,?,?)"; - - public void createAction(ActionEntity action) { - action.id = SqlUtil.genKeyRandom(); - boolean stopAction = ActionType.STOP_PROCESSING.equals(action.type); - getJdbcTemplate().update(INSERT_ACTION, - action.id, action.filterId,action.type.toString(), - action.valueType.toString(), stopAction); - updateAction(action); - } - - private static final String GET_ACTION = - "SELECT "+ - "action.*," + - "filter.pk_show "+ - "FROM " + - "action,"+ - "filter " + - "WHERE " + - "action.pk_filter = filter.pk_filter"; - - public ActionEntity getAction(String id) { - return getJdbcTemplate().queryForObject( - GET_ACTION + " AND pk_action=?", - ACTION_DETAIL_MAPPER, id); +public class ActionDaoJdbc extends JdbcDaoSupport implements ActionDao { + + public static final String INSERT_ACTION = "INSERT INTO " + "action " + "(" + + "pk_action,pk_filter,str_action,str_value_type,b_stop" + ") VALUES (?,?,?,?,?)"; + + public void createAction(ActionEntity action) { + action.id = SqlUtil.genKeyRandom(); + boolean stopAction = ActionType.STOP_PROCESSING.equals(action.type); + getJdbcTemplate().update(INSERT_ACTION, action.id, action.filterId, action.type.toString(), + action.valueType.toString(), stopAction); + updateAction(action); + } + + private static final String GET_ACTION = "SELECT " + "action.*," + "filter.pk_show " + "FROM " + + "action," + "filter " + "WHERE " + "action.pk_filter = filter.pk_filter"; + + public ActionEntity getAction(String id) { + return getJdbcTemplate().queryForObject(GET_ACTION + " AND pk_action=?", ACTION_DETAIL_MAPPER, + id); + } + + public ActionEntity getAction(ActionInterface action) { + return getJdbcTemplate().queryForObject(GET_ACTION + " AND pk_action=?", ACTION_DETAIL_MAPPER, + action.getActionId()); + } + + public List getActions(FilterInterface filter) { + return getJdbcTemplate().query( + GET_ACTION + " AND filter.pk_filter=? ORDER BY b_stop ASC, ts_created ASC", + ACTION_DETAIL_MAPPER, filter.getFilterId()); + } + + public void updateAction(ActionEntity action) { + if (action.isNew()) { + throw new SpcueRuntimeException("unable to update action that is not already commited"); } - public ActionEntity getAction(ActionInterface action) { - return getJdbcTemplate().queryForObject( - GET_ACTION + " AND pk_action=?", - ACTION_DETAIL_MAPPER, action.getActionId()); - } - - public List getActions(FilterInterface filter) { - return getJdbcTemplate().query( - GET_ACTION + " AND filter.pk_filter=? ORDER BY b_stop ASC, ts_created ASC", - ACTION_DETAIL_MAPPER, filter.getFilterId()); - } + // first we clear out all values - public void updateAction(ActionEntity action) { - if (action.isNew()) { - throw new SpcueRuntimeException("unable to update action that is not already commited"); - } + getJdbcTemplate().update( + "UPDATE action SET str_value=NULL,int_value=NULL,b_value=NULL,float_value=NULL WHERE pk_action=?", + action.getActionId()); - // first we clear out all values + StringBuilder query = new StringBuilder(1024); + query.append("UPDATE action SET str_action=?,str_value_type=?"); - getJdbcTemplate().update( - "UPDATE action SET str_value=NULL,int_value=NULL,b_value=NULL,float_value=NULL WHERE pk_action=?", - action.getActionId()); + List args = new ArrayList(4); + args.add(action.type.toString()); + args.add(action.valueType.toString()); - StringBuilder query = new StringBuilder(1024); - query.append("UPDATE action SET str_action=?,str_value_type=?"); + switch (action.valueType) { + case GROUP_TYPE: + query.append(",pk_folder=? WHERE pk_action=?"); + args.add(action.groupValue); + break; - List args = new ArrayList(4); - args.add(action.type.toString()); - args.add(action.valueType.toString()); + case STRING_TYPE: + query.append(",str_value=? WHERE pk_action=?"); + args.add(action.stringValue); + break; - switch(action.valueType) { - case GROUP_TYPE: - query.append(",pk_folder=? WHERE pk_action=?"); - args.add(action.groupValue); - break; + case INTEGER_TYPE: + query.append(",int_value=? WHERE pk_action=?"); + args.add(action.intValue); + break; - case STRING_TYPE: - query.append(",str_value=? WHERE pk_action=?"); - args.add(action.stringValue); - break; + case FLOAT_TYPE: + query.append(",float_value=? WHERE pk_action=?"); + args.add(action.floatValue); + break; - case INTEGER_TYPE: - query.append(",int_value=? WHERE pk_action=?"); - args.add(action.intValue); - break; + case BOOLEAN_TYPE: + query.append(",b_value=? WHERE pk_action=?"); + args.add(action.booleanValue); + break; - case FLOAT_TYPE: - query.append(",float_value=? WHERE pk_action=?"); - args.add(action.floatValue); - break; - - case BOOLEAN_TYPE: - query.append(",b_value=? WHERE pk_action=?"); - args.add(action.booleanValue); - break; - - case NONE_TYPE: - query.append(" WHERE pk_action=?"); - break; - - default: - throw new SpcueRuntimeException("invalid action value type: " + action.valueType); - } - - args.add(action.id); - getJdbcTemplate().update(query.toString(), - args.toArray()); + case NONE_TYPE: + query.append(" WHERE pk_action=?"); + break; + default: + throw new SpcueRuntimeException("invalid action value type: " + action.valueType); } - public void deleteAction(ActionInterface action) { - getJdbcTemplate().update("DELETE FROM action WHERE pk_action=?",action.getActionId()); + args.add(action.id); + getJdbcTemplate().update(query.toString(), args.toArray()); + + } + + public void deleteAction(ActionInterface action) { + getJdbcTemplate().update("DELETE FROM action WHERE pk_action=?", action.getActionId()); + } + + public static final RowMapper ACTION_DETAIL_MAPPER = new RowMapper() { + public ActionEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + ActionEntity action = new ActionEntity(); + action.id = rs.getString("pk_action"); + action.showId = rs.getString("pk_show"); + action.filterId = rs.getString("pk_filter"); + action.booleanValue = rs.getBoolean("b_value"); + action.groupValue = rs.getString("pk_folder"); + action.intValue = rs.getLong("int_value"); + action.floatValue = rs.getFloat("float_value"); + action.type = ActionType.valueOf(rs.getString("str_action")); + action.valueType = ActionValueType.valueOf(rs.getString("str_value_type")); + action.stringValue = rs.getString("str_value"); + return action; } - - public static final RowMapper ACTION_DETAIL_MAPPER = new RowMapper() { - public ActionEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - ActionEntity action = new ActionEntity(); - action.id = rs.getString("pk_action"); - action.showId = rs.getString("pk_show"); - action.filterId = rs.getString("pk_filter"); - action.booleanValue = rs.getBoolean("b_value"); - action.groupValue = rs.getString("pk_folder"); - action.intValue = rs.getLong("int_value"); - action.floatValue = rs.getFloat("float_value"); - action.type = ActionType.valueOf(rs.getString("str_action")); - action.valueType = ActionValueType.valueOf(rs.getString("str_value_type")); - action.stringValue = rs.getString("str_value"); - return action; - } - }; + }; } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/AllocationDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/AllocationDaoJdbc.java index 1ceda9b5c..e1fd731bb 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/AllocationDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/AllocationDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.CallableStatement; @@ -41,178 +37,145 @@ import com.imageworks.spcue.dao.AllocationDao; import com.imageworks.spcue.util.SqlUtil; - -public class AllocationDaoJdbc extends JdbcDaoSupport implements AllocationDao { - - public static RowMapper ALLOC_MAPPER = new RowMapper() { - public AllocationEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - AllocationEntity alloc = new AllocationEntity(); - alloc.id = rs.getString("pk_alloc"); - alloc.facilityId = rs.getString("pk_facility"); - alloc.name = rs.getString("str_name"); - alloc.tag = rs.getString("str_tag"); - return alloc; - } - }; - - private static final String GET_ALLOCATION = - "SELECT " + - "alloc.pk_facility,"+ - "alloc.pk_alloc, " + - "alloc.str_name, "+ - "alloc.str_tag, " + - "facility.str_name AS facility_name " + - "FROM " + - "alloc, " + - "facility " + - "WHERE " + - "alloc.pk_facility = facility.pk_facility "; - - public AllocationEntity getAllocationEntity(String id) { - return getJdbcTemplate().queryForObject( - GET_ALLOCATION + " AND pk_alloc=?", - ALLOC_MAPPER, id); - } - - public AllocationEntity findAllocationEntity(String facility, String name) { - return getJdbcTemplate().queryForObject( - GET_ALLOCATION + " AND alloc.str_name=?", - ALLOC_MAPPER, String.format("%s.%s", facility, name)); - } - - @Override - public AllocationEntity findAllocationEntity(String name) { - return getJdbcTemplate().queryForObject( - GET_ALLOCATION + " AND alloc.str_name=?", - ALLOC_MAPPER, name); - } - - private static final String INSERT_ALLOCATION = - "INSERT INTO " + - "alloc " + - "(" + - "pk_alloc,"+ - "pk_facility,"+ - "str_name, "+ - "str_tag "+ - ") VALUES (?,?,?,?)"; - - public void insertAllocation(FacilityInterface facility, AllocationEntity detail) { - - String new_alloc_name = String.format("%s.%s", - facility.getName(), detail.getName()); - /* - * Checks if the allocation already exits. - */ - if (getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM alloc WHERE str_name=?", - Integer.class, new_alloc_name) > 0) { - - getJdbcTemplate().update( - "UPDATE alloc SET b_enabled = true WHERE str_name=?", - new_alloc_name); - } - else { - detail.id = SqlUtil.genKeyRandom(); - detail.name = new_alloc_name; - getJdbcTemplate().update(INSERT_ALLOCATION, - detail.id, facility.getFacilityId(), - detail.name, detail.tag); - } - } - - public void deleteAllocation(AllocationInterface a) { - if (getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM host WHERE pk_alloc=?", Integer.class, - a.getAllocationId()) > 0) { - throw new EntityRemovalError("allocation still contains hosts", a); - } - - if (getJdbcTemplate().queryForObject( - "SELECT b_default FROM alloc WHERE pk_alloc=?", Boolean.class, - a.getAllocationId())) { - throw new EntityRemovalError("you cannot delete the default allocation", a); - } - - Savepoint sp1; - try { - sp1 = getConnection().setSavepoint(); - } catch (SQLException e) { - throw new RuntimeException("failed to create savepoint", e); - } - - /* - * Allocations are logged in historical data so once they are used you - * can't specifically delete them. They are disabled instead. - */ - try { - getJdbcTemplate().update("DELETE FROM alloc WHERE pk_alloc=?", - a.getAllocationId()); - } catch (DataIntegrityViolationException e) { - try { - getConnection().rollback(sp1); - } catch (SQLException e1) { - throw new RuntimeException("failed to roll back failed delete", e); - } - getJdbcTemplate().update("UPDATE alloc SET b_enabled = false WHERE pk_alloc = ?", - a.getAllocationId()); - } - } - - public void updateAllocationName(AllocationInterface a, String name) { - if (!Pattern.matches("^\\w+$", name)) { - throw new IllegalArgumentException("The new allocation name" + - "must be alpha numeric and not contain the facility prefix."); - } - - String[] parts = a.getName().split("\\.", 2); - String new_name = String.format("%s.%s", parts[0], name); - - getJdbcTemplate().update( - "UPDATE alloc SET str_name=? WHERE pk_alloc=?", - new_name, a.getAllocationId()); - } - - public void updateAllocationTag(AllocationInterface a, String tag) { - getJdbcTemplate().update("UPDATE alloc SET str_tag=? WHERE pk_alloc=?", - tag, a.getAllocationId()); - - getJdbcTemplate().update("UPDATE host_tag SET str_tag=? WHERE " + - "host_tag.str_tag_type='Alloc' AND pk_host IN " + - "(SELECT pk_host FROM host WHERE host.pk_alloc=?)", tag, - a.getAllocationId()); - - for (Map e: getJdbcTemplate().queryForList( - "SELECT pk_host FROM host WHERE pk_alloc=?",a.getAllocationId())) { - final String pk_host = (String) e.get("pk_host"); - getJdbcTemplate().call(new CallableStatementCreator() { - public CallableStatement createCallableStatement(Connection con) throws SQLException { - CallableStatement c = con.prepareCall("{ call recalculate_tags(?) }"); - c.setString(1, pk_host); - return c; - } - }, new ArrayList()); - } - } - - public void setDefaultAllocation(AllocationInterface a) { - getJdbcTemplate().update("UPDATE alloc SET b_default = false WHERE b_default = true"); - getJdbcTemplate().update("UPDATE alloc SET b_default = true WHERE pk_alloc=?", - a.getAllocationId()); - } - - public AllocationEntity getDefaultAllocationEntity() { - return getJdbcTemplate().queryForObject( - GET_ALLOCATION + " AND alloc.b_default = true LIMIT 1", - ALLOC_MAPPER); - } - - @Override - public void updateAllocationBillable(AllocationInterface alloc, boolean value) { - getJdbcTemplate().update( - "UPDATE alloc SET b_billable = ? WHERE pk_alloc = ?", - value, alloc.getAllocationId()); - - } +public class AllocationDaoJdbc extends JdbcDaoSupport implements AllocationDao { + + public static RowMapper ALLOC_MAPPER = new RowMapper() { + public AllocationEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + AllocationEntity alloc = new AllocationEntity(); + alloc.id = rs.getString("pk_alloc"); + alloc.facilityId = rs.getString("pk_facility"); + alloc.name = rs.getString("str_name"); + alloc.tag = rs.getString("str_tag"); + return alloc; + } + }; + + private static final String GET_ALLOCATION = "SELECT " + "alloc.pk_facility," + "alloc.pk_alloc, " + + "alloc.str_name, " + "alloc.str_tag, " + "facility.str_name AS facility_name " + "FROM " + + "alloc, " + "facility " + "WHERE " + "alloc.pk_facility = facility.pk_facility "; + + public AllocationEntity getAllocationEntity(String id) { + return getJdbcTemplate().queryForObject(GET_ALLOCATION + " AND pk_alloc=?", ALLOC_MAPPER, id); + } + + public AllocationEntity findAllocationEntity(String facility, String name) { + return getJdbcTemplate().queryForObject(GET_ALLOCATION + " AND alloc.str_name=?", ALLOC_MAPPER, + String.format("%s.%s", facility, name)); + } + + @Override + public AllocationEntity findAllocationEntity(String name) { + return getJdbcTemplate().queryForObject(GET_ALLOCATION + " AND alloc.str_name=?", ALLOC_MAPPER, + name); + } + + private static final String INSERT_ALLOCATION = "INSERT INTO " + "alloc " + "(" + "pk_alloc," + + "pk_facility," + "str_name, " + "str_tag " + ") VALUES (?,?,?,?)"; + + public void insertAllocation(FacilityInterface facility, AllocationEntity detail) { + + String new_alloc_name = String.format("%s.%s", facility.getName(), detail.getName()); + /* + * Checks if the allocation already exits. + */ + if (getJdbcTemplate().queryForObject("SELECT COUNT(1) FROM alloc WHERE str_name=?", + Integer.class, new_alloc_name) > 0) { + + getJdbcTemplate().update("UPDATE alloc SET b_enabled = true WHERE str_name=?", + new_alloc_name); + } else { + detail.id = SqlUtil.genKeyRandom(); + detail.name = new_alloc_name; + getJdbcTemplate().update(INSERT_ALLOCATION, detail.id, facility.getFacilityId(), detail.name, + detail.tag); + } + } + + public void deleteAllocation(AllocationInterface a) { + if (getJdbcTemplate().queryForObject("SELECT COUNT(1) FROM host WHERE pk_alloc=?", + Integer.class, a.getAllocationId()) > 0) { + throw new EntityRemovalError("allocation still contains hosts", a); + } + + if (getJdbcTemplate().queryForObject("SELECT b_default FROM alloc WHERE pk_alloc=?", + Boolean.class, a.getAllocationId())) { + throw new EntityRemovalError("you cannot delete the default allocation", a); + } + + Savepoint sp1; + try { + sp1 = getConnection().setSavepoint(); + } catch (SQLException e) { + throw new RuntimeException("failed to create savepoint", e); + } + + /* + * Allocations are logged in historical data so once they are used you can't specifically delete + * them. They are disabled instead. + */ + try { + getJdbcTemplate().update("DELETE FROM alloc WHERE pk_alloc=?", a.getAllocationId()); + } catch (DataIntegrityViolationException e) { + try { + getConnection().rollback(sp1); + } catch (SQLException e1) { + throw new RuntimeException("failed to roll back failed delete", e); + } + getJdbcTemplate().update("UPDATE alloc SET b_enabled = false WHERE pk_alloc = ?", + a.getAllocationId()); + } + } + + public void updateAllocationName(AllocationInterface a, String name) { + if (!Pattern.matches("^\\w+$", name)) { + throw new IllegalArgumentException( + "The new allocation name" + "must be alpha numeric and not contain the facility prefix."); + } + + String[] parts = a.getName().split("\\.", 2); + String new_name = String.format("%s.%s", parts[0], name); + + getJdbcTemplate().update("UPDATE alloc SET str_name=? WHERE pk_alloc=?", new_name, + a.getAllocationId()); + } + + public void updateAllocationTag(AllocationInterface a, String tag) { + getJdbcTemplate().update("UPDATE alloc SET str_tag=? WHERE pk_alloc=?", tag, + a.getAllocationId()); + + getJdbcTemplate().update( + "UPDATE host_tag SET str_tag=? WHERE " + "host_tag.str_tag_type='Alloc' AND pk_host IN " + + "(SELECT pk_host FROM host WHERE host.pk_alloc=?)", + tag, a.getAllocationId()); + + for (Map e : getJdbcTemplate() + .queryForList("SELECT pk_host FROM host WHERE pk_alloc=?", a.getAllocationId())) { + final String pk_host = (String) e.get("pk_host"); + getJdbcTemplate().call(new CallableStatementCreator() { + public CallableStatement createCallableStatement(Connection con) throws SQLException { + CallableStatement c = con.prepareCall("{ call recalculate_tags(?) }"); + c.setString(1, pk_host); + return c; + } + }, new ArrayList()); + } + } + + public void setDefaultAllocation(AllocationInterface a) { + getJdbcTemplate().update("UPDATE alloc SET b_default = false WHERE b_default = true"); + getJdbcTemplate().update("UPDATE alloc SET b_default = true WHERE pk_alloc=?", + a.getAllocationId()); + } + + public AllocationEntity getDefaultAllocationEntity() { + return getJdbcTemplate().queryForObject(GET_ALLOCATION + " AND alloc.b_default = true LIMIT 1", + ALLOC_MAPPER); + } + + @Override + public void updateAllocationBillable(AllocationInterface alloc, boolean value) { + getJdbcTemplate().update("UPDATE alloc SET b_billable = ? WHERE pk_alloc = ?", value, + alloc.getAllocationId()); + + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/BookingDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/BookingDaoJdbc.java index 08e1634aa..f2b85cba5 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/BookingDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/BookingDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -38,402 +34,284 @@ import com.imageworks.spcue.grpc.renderpartition.RenderPartitionType; import com.imageworks.spcue.util.SqlUtil; -public class BookingDaoJdbc extends - JdbcDaoSupport implements BookingDao { - - private static final String INSERT_LOCAL_JOB_ASSIGNMENT = - "INSERT INTO " + - "host_local " + - "(" + - "pk_host_local,"+ - "pk_job,"+ - "pk_layer,"+ - "pk_frame,"+ - "str_type,"+ - "pk_host,"+ - "int_mem_max,"+ - "int_mem_idle,"+ - "int_cores_max,"+ - "int_cores_idle,"+ - "int_gpu_mem_idle,"+ - "int_gpu_mem_max,"+ - "int_gpus_max,"+ - "int_gpus_idle,"+ - "int_threads "+ - ") " + - "VALUES " + - "(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; - - @Override - public void insertLocalHostAssignment(HostInterface h, JobInterface job, LocalHostAssignment l) { - l.id = SqlUtil.genKeyRandom(); - l.name = String.format("%s->%s", h.getName(), job.getName()); - l.setHostId(h.getHostId()); - l.setJobId(job.getJobId()); - l.setType(RenderPartitionType.JOB_PARTITION); - l.setIdleCoreUnits(l.getMaxCoreUnits()); - l.setIdleMemory(l.getMaxMemory()); - l.setIdleGpuUnits(l.getMaxGpuUnits()); - l.setIdleGpuMemory(l.getMaxGpuMemory()); - - getJdbcTemplate().update( - INSERT_LOCAL_JOB_ASSIGNMENT, - l.id, - job.getJobId(), - l.getLayerId(), - l.getFrameId(), - l.getType().toString(), - h.getHostId(), - l.getMaxMemory(), - l.getMaxMemory(), - l.getMaxCoreUnits(), - l.getMaxCoreUnits(), - l.getMaxGpuMemory(), - l.getMaxGpuMemory(), - l.getMaxGpuUnits(), - l.getMaxGpuUnits(), - l.getThreads()); - } - - @Override - public void insertLocalHostAssignment(HostInterface h, LayerInterface layer, LocalHostAssignment l) { - l.id = SqlUtil.genKeyRandom(); - l.name = String.format("%s->%s", h.getName(), layer.getName()); - l.setHostId(h.getHostId()); - l.setJobId(layer.getJobId()); - l.setLayerId(layer.getLayerId()); - l.setType(RenderPartitionType.LAYER_PARTITION); - l.setIdleCoreUnits(l.getMaxCoreUnits()); - l.setIdleMemory(l.getMaxMemory()); - l.setIdleGpuUnits(l.getMaxGpuUnits()); - l.setIdleGpuMemory(l.getMaxGpuMemory()); - - getJdbcTemplate().update( - INSERT_LOCAL_JOB_ASSIGNMENT, - l.id, - l.getJobId(), - l.getLayerId(), - l.getFrameId(), - l.getType().toString(), - h.getHostId(), - l.getMaxMemory(), - l.getMaxMemory(), - l.getMaxCoreUnits(), - l.getMaxCoreUnits(), - l.getMaxGpuMemory(), - l.getMaxGpuMemory(), - l.getMaxGpuUnits(), - l.getMaxGpuUnits(), - l.getThreads()); - } - - @Override - public void insertLocalHostAssignment(HostInterface h, FrameInterface frame, LocalHostAssignment l) { - l.id = SqlUtil.genKeyRandom(); - l.name = String.format("%s->%s", h.getName(), frame.getName()); - l.setHostId(h.getHostId()); - l.setJobId(frame.getJobId()); - l.setLayerId(frame.getLayerId()); - l.setFrameId(frame.getFrameId()); - l.setType(RenderPartitionType.FRAME_PARTITION); - l.setIdleCoreUnits(l.getMaxCoreUnits()); - l.setIdleMemory(l.getMaxMemory()); - l.setIdleGpuUnits(l.getMaxGpuUnits()); - l.setIdleGpuMemory(l.getMaxGpuMemory()); - - getJdbcTemplate().update( - INSERT_LOCAL_JOB_ASSIGNMENT, - l.id, - l.getJobId(), - l.getLayerId(), - l.getFrameId(), - l.getType().toString(), - h.getHostId(), - l.getMaxMemory(), - l.getMaxMemory(), - l.getMaxCoreUnits(), - l.getMaxCoreUnits(), - l.getMaxGpuMemory(), - l.getMaxGpuMemory(), - l.getMaxGpuUnits(), - l.getMaxGpuUnits(), - l.getThreads()); - } - public static final RowMapper LJA_MAPPER = - new RowMapper() { +public class BookingDaoJdbc extends JdbcDaoSupport implements BookingDao { + + private static final String INSERT_LOCAL_JOB_ASSIGNMENT = "INSERT INTO " + "host_local " + "(" + + "pk_host_local," + "pk_job," + "pk_layer," + "pk_frame," + "str_type," + "pk_host," + + "int_mem_max," + "int_mem_idle," + "int_cores_max," + "int_cores_idle," + + "int_gpu_mem_idle," + "int_gpu_mem_max," + "int_gpus_max," + "int_gpus_idle," + + "int_threads " + ") " + "VALUES " + "(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + + @Override + public void insertLocalHostAssignment(HostInterface h, JobInterface job, LocalHostAssignment l) { + l.id = SqlUtil.genKeyRandom(); + l.name = String.format("%s->%s", h.getName(), job.getName()); + l.setHostId(h.getHostId()); + l.setJobId(job.getJobId()); + l.setType(RenderPartitionType.JOB_PARTITION); + l.setIdleCoreUnits(l.getMaxCoreUnits()); + l.setIdleMemory(l.getMaxMemory()); + l.setIdleGpuUnits(l.getMaxGpuUnits()); + l.setIdleGpuMemory(l.getMaxGpuMemory()); + + getJdbcTemplate().update(INSERT_LOCAL_JOB_ASSIGNMENT, l.id, job.getJobId(), l.getLayerId(), + l.getFrameId(), l.getType().toString(), h.getHostId(), l.getMaxMemory(), l.getMaxMemory(), + l.getMaxCoreUnits(), l.getMaxCoreUnits(), l.getMaxGpuMemory(), l.getMaxGpuMemory(), + l.getMaxGpuUnits(), l.getMaxGpuUnits(), l.getThreads()); + } + + @Override + public void insertLocalHostAssignment(HostInterface h, LayerInterface layer, + LocalHostAssignment l) { + l.id = SqlUtil.genKeyRandom(); + l.name = String.format("%s->%s", h.getName(), layer.getName()); + l.setHostId(h.getHostId()); + l.setJobId(layer.getJobId()); + l.setLayerId(layer.getLayerId()); + l.setType(RenderPartitionType.LAYER_PARTITION); + l.setIdleCoreUnits(l.getMaxCoreUnits()); + l.setIdleMemory(l.getMaxMemory()); + l.setIdleGpuUnits(l.getMaxGpuUnits()); + l.setIdleGpuMemory(l.getMaxGpuMemory()); + + getJdbcTemplate().update(INSERT_LOCAL_JOB_ASSIGNMENT, l.id, l.getJobId(), l.getLayerId(), + l.getFrameId(), l.getType().toString(), h.getHostId(), l.getMaxMemory(), l.getMaxMemory(), + l.getMaxCoreUnits(), l.getMaxCoreUnits(), l.getMaxGpuMemory(), l.getMaxGpuMemory(), + l.getMaxGpuUnits(), l.getMaxGpuUnits(), l.getThreads()); + } + + @Override + public void insertLocalHostAssignment(HostInterface h, FrameInterface frame, + LocalHostAssignment l) { + l.id = SqlUtil.genKeyRandom(); + l.name = String.format("%s->%s", h.getName(), frame.getName()); + l.setHostId(h.getHostId()); + l.setJobId(frame.getJobId()); + l.setLayerId(frame.getLayerId()); + l.setFrameId(frame.getFrameId()); + l.setType(RenderPartitionType.FRAME_PARTITION); + l.setIdleCoreUnits(l.getMaxCoreUnits()); + l.setIdleMemory(l.getMaxMemory()); + l.setIdleGpuUnits(l.getMaxGpuUnits()); + l.setIdleGpuMemory(l.getMaxGpuMemory()); + + getJdbcTemplate().update(INSERT_LOCAL_JOB_ASSIGNMENT, l.id, l.getJobId(), l.getLayerId(), + l.getFrameId(), l.getType().toString(), h.getHostId(), l.getMaxMemory(), l.getMaxMemory(), + l.getMaxCoreUnits(), l.getMaxCoreUnits(), l.getMaxGpuMemory(), l.getMaxGpuMemory(), + l.getMaxGpuUnits(), l.getMaxGpuUnits(), l.getThreads()); + } + + public static final RowMapper LJA_MAPPER = + new RowMapper() { public LocalHostAssignment mapRow(final ResultSet rs, int rowNum) throws SQLException { - LocalHostAssignment l = new LocalHostAssignment(); - l.id = rs.getString("pk_host_local"); - l.setMaxCoreUnits(rs.getInt("int_cores_max")); - l.setMaxMemory(rs.getLong("int_mem_max")); - l.setMaxGpuUnits(rs.getInt("int_gpus_max")); - l.setMaxGpuMemory(rs.getLong("int_gpu_mem_max")); - l.setThreads(rs.getInt("int_threads")); - l.setIdleCoreUnits(rs.getInt("int_cores_idle")); - l.setIdleMemory(rs.getLong("int_mem_idle")); - l.setIdleGpuUnits(rs.getInt("int_gpus_idle")); - l.setIdleGpuMemory(rs.getLong("int_gpu_mem_idle")); - l.setJobId(rs.getString("pk_job")); - l.setLayerId(rs.getString("pk_layer")); - l.setFrameId(rs.getString("pk_frame")); - l.setHostId(rs.getString("pk_host")); - l.setType(RenderPartitionType.valueOf(rs.getString("str_type"))); - return l; + LocalHostAssignment l = new LocalHostAssignment(); + l.id = rs.getString("pk_host_local"); + l.setMaxCoreUnits(rs.getInt("int_cores_max")); + l.setMaxMemory(rs.getLong("int_mem_max")); + l.setMaxGpuUnits(rs.getInt("int_gpus_max")); + l.setMaxGpuMemory(rs.getLong("int_gpu_mem_max")); + l.setThreads(rs.getInt("int_threads")); + l.setIdleCoreUnits(rs.getInt("int_cores_idle")); + l.setIdleMemory(rs.getLong("int_mem_idle")); + l.setIdleGpuUnits(rs.getInt("int_gpus_idle")); + l.setIdleGpuMemory(rs.getLong("int_gpu_mem_idle")); + l.setJobId(rs.getString("pk_job")); + l.setLayerId(rs.getString("pk_layer")); + l.setFrameId(rs.getString("pk_frame")); + l.setHostId(rs.getString("pk_host")); + l.setType(RenderPartitionType.valueOf(rs.getString("str_type"))); + return l; } - }; - - private static final String QUERY_FOR_LJA = - "SELECT " + - "pk_host_local,"+ - "pk_job,"+ - "pk_layer," + - "pk_frame,"+ - "pk_host,"+ - "int_mem_idle,"+ - "int_mem_max,"+ - "int_cores_idle,"+ - "int_cores_max,"+ - "int_gpu_mem_idle,"+ - "int_gpu_mem_max,"+ - "int_gpus_idle,"+ - "int_gpus_max,"+ - "int_threads, "+ - "str_type " + - "FROM " + - "host_local "; - - @Override - public List getLocalJobAssignment(HostInterface host) { - return getJdbcTemplate().query( - QUERY_FOR_LJA + - "WHERE " + - "host_local.pk_host = ? ", - LJA_MAPPER, host.getHostId()); - } - - @Override - public LocalHostAssignment getLocalJobAssignment(String id) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_LJA + " WHERE pk_host_local = ?", - LJA_MAPPER, id); - } - - @Override - public LocalHostAssignment getLocalJobAssignment(String hostId, String jobId) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_LJA + " WHERE pk_host = ? and pk_job = ?", - LJA_MAPPER, hostId, jobId); + }; + + private static final String QUERY_FOR_LJA = "SELECT " + "pk_host_local," + "pk_job," + "pk_layer," + + "pk_frame," + "pk_host," + "int_mem_idle," + "int_mem_max," + "int_cores_idle," + + "int_cores_max," + "int_gpu_mem_idle," + "int_gpu_mem_max," + "int_gpus_idle," + + "int_gpus_max," + "int_threads, " + "str_type " + "FROM " + "host_local "; + + @Override + public List getLocalJobAssignment(HostInterface host) { + return getJdbcTemplate().query(QUERY_FOR_LJA + "WHERE " + "host_local.pk_host = ? ", LJA_MAPPER, + host.getHostId()); + } + + @Override + public LocalHostAssignment getLocalJobAssignment(String id) { + return getJdbcTemplate().queryForObject(QUERY_FOR_LJA + " WHERE pk_host_local = ?", LJA_MAPPER, + id); + } + + @Override + public LocalHostAssignment getLocalJobAssignment(String hostId, String jobId) { + return getJdbcTemplate().queryForObject(QUERY_FOR_LJA + " WHERE pk_host = ? and pk_job = ?", + LJA_MAPPER, hostId, jobId); + } + + @Override + public boolean deleteLocalJobAssignment(LocalHostAssignment l) { + return getJdbcTemplate().update("DELETE FROM host_local WHERE pk_host_local = ?", + l.getId()) > 0; + } + + private static final String HAS_LOCAL_JOB = + "SELECT " + "COUNT(1) " + "FROM " + "host_local " + "WHERE " + "host_local.pk_host = ? "; + + @Override + public boolean hasLocalJob(HostInterface host) { + return getJdbcTemplate().queryForObject(HAS_LOCAL_JOB, Integer.class, host.getHostId()) > 0; + } + + private static final String HAS_ACTIVE_LOCAL_JOB = "SELECT " + "COUNT(1) " + "FROM " + + "host_local, " + "proc " + "WHERE " + "host_local.pk_host = proc.pk_host " + "AND " + + "proc.b_local = true " + "AND " + "host_local.pk_host = ? "; + + @Override + public boolean hasActiveLocalJob(HostInterface host) { + return getJdbcTemplate().queryForObject(HAS_ACTIVE_LOCAL_JOB, Integer.class, + host.getHostId()) > 0; + } + + @Override + public int getCoreUsageDifference(LocalHostAssignment l, int coreUnits) { + return getJdbcTemplate().queryForObject( + "SELECT ? - int_cores_max FROM host_local WHERE pk_host_local=?", Integer.class, coreUnits, + l.getId()); + } + + @Override + public int getGpuUsageDifference(LocalHostAssignment l, int gpuUnits) { + return getJdbcTemplate().queryForObject( + "SELECT ? - int_gpus_max FROM host_local WHERE pk_host_local=?", Integer.class, gpuUnits, + l.getId()); + } + + private static final String UPDATE_MAX_CORES = + "UPDATE " + "host_local " + "SET " + "int_cores_idle = int_cores_idle + (? - int_cores_max), " + + "int_cores_max = ? " + "WHERE " + "pk_host_local = ? "; + + @Override + public boolean updateMaxCores(LocalHostAssignment l, int coreUnits) { + return getJdbcTemplate().update(UPDATE_MAX_CORES, coreUnits, coreUnits, l.getId()) > 0; + } + + private static final String UPDATE_MAX_GPUS = + "UPDATE " + "host_local " + "SET " + "int_gpus_idle = int_gpus_idle + (? - int_gpus_max), " + + "int_gpus_max = ? " + "WHERE " + "pk_host_local = ? "; + + @Override + public boolean updateMaxGpus(LocalHostAssignment l, int gpuUnits) { + return getJdbcTemplate().update(UPDATE_MAX_GPUS, gpuUnits, gpuUnits, l.getId()) > 0; + } + + private static final String UPDATE_MAX_MEMORY = + "UPDATE " + "host_local " + "SET " + "int_mem_idle = int_mem_idle + (? - int_mem_max), " + + "int_mem_max = ? " + "WHERE " + "pk_host_local = ? "; + + @Override + public boolean updateMaxMemory(LocalHostAssignment l, long maxMemory) { + return getJdbcTemplate().update(UPDATE_MAX_MEMORY, maxMemory, maxMemory, l.getId()) > 0; + } + + private static final String UPDATE_MAX_GPU_MEMORY = "UPDATE " + "host_local " + "SET " + + "int_gpu_mem_idle = int_gpu_mem_idle + (? - int_gpu_mem_max), " + "int_gpu_mem_max = ? " + + "WHERE " + "pk_host_local = ? "; + + @Override + public boolean updateMaxGpuMemory(LocalHostAssignment l, long maxGpuMemory) { + return getJdbcTemplate().update(UPDATE_MAX_GPU_MEMORY, maxGpuMemory, maxGpuMemory, + l.getId()) > 0; + } + + @Override + public boolean deactivate(LocalHostAssignment l) { + return getJdbcTemplate().update( + "UPDATE host_local SET b_active = false WHERE " + "pk_host_local = ? AND b_active = true", + l.getId()) > 0; + } + + /** + * + * @param h HostInterface + * @param cores int + * @return boolean + */ + @Override + public boolean allocateCoresFromHost(HostInterface h, int cores) { + + try { + return getJdbcTemplate().update( + "UPDATE host SET int_cores_idle = int_cores_idle - ? " + "WHERE pk_host = ?", cores, + h.getHostId()) > 0; + } catch (DataAccessException e) { + throw new ResourceReservationFailureException( + "Failed to allocate " + cores + " from host, " + e); } - @Override - public boolean deleteLocalJobAssignment(LocalHostAssignment l) { - return getJdbcTemplate().update( - "DELETE FROM host_local WHERE pk_host_local = ?", - l.getId()) > 0; + } + + /** + * + * @param h HostInterface + * @param gpus int + * @return boolean + */ + @Override + public boolean allocateGpusFromHost(HostInterface h, int gpus) { + + try { + return getJdbcTemplate().update( + "UPDATE host SET int_gpus_idle = int_gpus_idle - ? " + "WHERE pk_host = ?", gpus, + h.getHostId()) > 0; + } catch (DataAccessException e) { + throw new ResourceReservationFailureException( + "Failed to allocate " + gpus + " GPU from host, " + e); } - - private static final String HAS_LOCAL_JOB = - "SELECT " + - "COUNT(1) " + - "FROM " + - "host_local " + - "WHERE " + - "host_local.pk_host = ? "; - - @Override - public boolean hasLocalJob(HostInterface host) { - return getJdbcTemplate().queryForObject(HAS_LOCAL_JOB, - Integer.class, host.getHostId()) > 0; - } - - private static final String HAS_ACTIVE_LOCAL_JOB = - "SELECT " + - "COUNT(1) " + - "FROM " + - "host_local, " + - "proc " + - "WHERE " + - "host_local.pk_host = proc.pk_host " + - "AND " + - "proc.b_local = true " + - "AND " + - "host_local.pk_host = ? "; - - @Override - public boolean hasActiveLocalJob(HostInterface host) { - return getJdbcTemplate().queryForObject(HAS_ACTIVE_LOCAL_JOB, - Integer.class, host.getHostId()) > 0; + } + + /** + * + * @param h HostInterface + * @param cores int + * @return boolean + */ + @Override + public boolean deallocateCoresFromHost(HostInterface h, int cores) { + try { + return getJdbcTemplate().update( + "UPDATE host SET int_cores_idle = int_cores_idle + ? WHERE pk_host = ?", cores, + h.getHostId()) > 0; + } catch (DataAccessException e) { + throw new ResourceReservationFailureException( + "Failed to de-allocate " + cores + " from host, " + e); } - - @Override - public int getCoreUsageDifference(LocalHostAssignment l, int coreUnits) { - return getJdbcTemplate().queryForObject( - "SELECT ? - int_cores_max FROM host_local WHERE pk_host_local=?", - Integer.class, coreUnits, l.getId()); - } - - @Override - public int getGpuUsageDifference(LocalHostAssignment l, int gpuUnits) { - return getJdbcTemplate().queryForObject( - "SELECT ? - int_gpus_max FROM host_local WHERE pk_host_local=?", - Integer.class, gpuUnits, l.getId()); - } - - private static final String UPDATE_MAX_CORES = - "UPDATE " + - "host_local " + - "SET " + - "int_cores_idle = int_cores_idle + (? - int_cores_max), " + - "int_cores_max = ? "+ - "WHERE " + - "pk_host_local = ? "; - - @Override - public boolean updateMaxCores(LocalHostAssignment l, int coreUnits) { - return getJdbcTemplate().update(UPDATE_MAX_CORES, - coreUnits, coreUnits, l.getId()) > 0; - } - - private static final String UPDATE_MAX_GPUS = - "UPDATE " + - "host_local " + - "SET " + - "int_gpus_idle = int_gpus_idle + (? - int_gpus_max), " + - "int_gpus_max = ? "+ - "WHERE " + - "pk_host_local = ? "; - - @Override - public boolean updateMaxGpus(LocalHostAssignment l, int gpuUnits) { - return getJdbcTemplate().update(UPDATE_MAX_GPUS, - gpuUnits, gpuUnits, l.getId()) > 0; - } - - private static final String UPDATE_MAX_MEMORY = - "UPDATE " + - "host_local " + - "SET " + - "int_mem_idle = int_mem_idle + (? - int_mem_max), " + - "int_mem_max = ? "+ - "WHERE " + - "pk_host_local = ? "; - - @Override - public boolean updateMaxMemory(LocalHostAssignment l, long maxMemory) { - return getJdbcTemplate().update( - UPDATE_MAX_MEMORY, maxMemory, maxMemory, l.getId()) > 0; - } - - private static final String UPDATE_MAX_GPU_MEMORY = - "UPDATE " + - "host_local " + - "SET " + - "int_gpu_mem_idle = int_gpu_mem_idle + (? - int_gpu_mem_max), " + - "int_gpu_mem_max = ? "+ - "WHERE " + - "pk_host_local = ? "; - - @Override - public boolean updateMaxGpuMemory(LocalHostAssignment l, long maxGpuMemory) { - return getJdbcTemplate().update( - UPDATE_MAX_GPU_MEMORY, maxGpuMemory, maxGpuMemory, l.getId()) > 0; - } - - @Override - public boolean deactivate(LocalHostAssignment l) { - return getJdbcTemplate().update( - "UPDATE host_local SET b_active = false WHERE " + - "pk_host_local = ? AND b_active = true", - l.getId()) > 0; - } - - /** - * - * @param h HostInterface - * @param cores int - * @return boolean - */ - @Override - public boolean allocateCoresFromHost(HostInterface h, int cores) { - - try { - return getJdbcTemplate().update( - "UPDATE host SET int_cores_idle = int_cores_idle - ? " + - "WHERE pk_host = ?", - cores, h.getHostId()) > 0; - } catch (DataAccessException e) { - throw new ResourceReservationFailureException("Failed to allocate " + - cores + " from host, " + e); - } - - } - - /** - * - * @param h HostInterface - * @param gpus int - * @return boolean - */ - @Override - public boolean allocateGpusFromHost(HostInterface h, int gpus) { - - try { - return getJdbcTemplate().update( - "UPDATE host SET int_gpus_idle = int_gpus_idle - ? " + - "WHERE pk_host = ?", - gpus, h.getHostId()) > 0; - } catch (DataAccessException e) { - throw new ResourceReservationFailureException("Failed to allocate " + - gpus + " GPU from host, " + e); - } - } - - /** - * - * @param h HostInterface - * @param cores int - * @return boolean - */ - @Override - public boolean deallocateCoresFromHost(HostInterface h, int cores) { - try { - return getJdbcTemplate().update( - "UPDATE host SET int_cores_idle = int_cores_idle + ? WHERE pk_host = ?", - cores, h.getHostId()) > 0; - } catch (DataAccessException e) { - throw new ResourceReservationFailureException("Failed to de-allocate " + - cores + " from host, " + e); - } - } - - /** - * - * @param h HostInterface - * @param gpus int - * @return boolean - */ - @Override - public boolean deallocateGpusFromHost(HostInterface h, int gpus) { - try { - return getJdbcTemplate().update( - "UPDATE host SET int_gpus_idle = int_gpus_idle + ? WHERE pk_host = ?", - gpus, h.getHostId()) > 0; - } catch (DataAccessException e) { - throw new ResourceReservationFailureException("Failed to de-allocate " + - gpus + " GPU from host, " + e); - } - } - - @Override - public boolean hasResourceDeficit(HostInterface host) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM host_local WHERE " + - "(int_cores_max < int_cores_max - int_cores_idle OR " + - "int_gpus_max < int_gpus_max - int_gpus_idle OR " + - "int_gpu_mem_max < int_gpu_mem_max - int_gpu_mem_idle OR " + - "int_mem_max < int_mem_max - int_mem_idle) AND " + - "host_local.pk_host= ?", - Integer.class, host.getHostId()) > 0; + } + + /** + * + * @param h HostInterface + * @param gpus int + * @return boolean + */ + @Override + public boolean deallocateGpusFromHost(HostInterface h, int gpus) { + try { + return getJdbcTemplate().update( + "UPDATE host SET int_gpus_idle = int_gpus_idle + ? WHERE pk_host = ?", gpus, + h.getHostId()) > 0; + } catch (DataAccessException e) { + throw new ResourceReservationFailureException( + "Failed to de-allocate " + gpus + " GPU from host, " + e); } + } + + @Override + public boolean hasResourceDeficit(HostInterface host) { + return getJdbcTemplate().queryForObject( + "SELECT COUNT(1) FROM host_local WHERE " + + "(int_cores_max < int_cores_max - int_cores_idle OR " + + "int_gpus_max < int_gpus_max - int_gpus_idle OR " + + "int_gpu_mem_max < int_gpu_mem_max - int_gpu_mem_idle OR " + + "int_mem_max < int_mem_max - int_mem_idle) AND " + "host_local.pk_host= ?", + Integer.class, host.getHostId()) > 0; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/CommentDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/CommentDaoJdbc.java index ea61f07bb..9362981c1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/CommentDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/CommentDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -35,113 +31,94 @@ public class CommentDaoJdbc extends JdbcDaoSupport implements CommentDao { - public void deleteComment(String id) { - /* - * Checks what type of comment we have. - */ - Map type = getJdbcTemplate().queryForMap( - "SELECT pk_job, pk_host FROM comments WHERE pk_comment=?",id); - - /* - * If the comment is deleted successfully, check if we need to unset - * the b_comment boolean flag. - */ - if (getJdbcTemplate().update( - "DELETE FROM comments WHERE pk_comment=?",id) > 0) { - if (type.get("pk_job") != null) { - getJdbcTemplate().update("UPDATE job SET b_comment=false WHERE job.pk_job = ? AND " + - "(SELECT COUNT(1) FROM comments c WHERE c.pk_job = job.pk_job) = 0",type.get("pk_job")); - } - else if (type.get("pk_host") != null) { - getJdbcTemplate().update("UPDATE host SET b_comment=false WHERE host.pk_host = ? AND " + - "(SELECT COUNT(1) FROM comments c WHERE c.pk_host = host.pk_host) = 0",type.get("pk_host")); - } - } - } - - private static final RowMapper COMMENT_DETAIL_MAPPER = - new RowMapper() { - public CommentDetail mapRow(ResultSet rs, int row) throws SQLException { - CommentDetail d = new CommentDetail(); - d.id = rs.getString("pk_comment"); - d.message = rs.getString("str_message"); - d.subject = rs.getString("str_subject"); - d.timestamp = rs.getTimestamp("ts_created"); - d.user = rs.getString("str_user"); - return d; - } - }; - - public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, String subject) { - return getJdbcTemplate().update( - "DELETE FROM comments WHERE pk_host=? AND str_user=? AND str_subject=?", - host.getHostId(), user, subject) > 0; - } - - public List getCommentsByHostUserAndSubject(HostInterface host, String user, String subject) { - return getJdbcTemplate().query( - "SELECT * FROM comments WHERE pk_host=? AND str_user=? AND str_subject=?", - COMMENT_DETAIL_MAPPER, host.getHostId(), user, subject); - } - - public CommentDetail getCommentDetail(String id) { - return getJdbcTemplate().queryForObject( - "SELECT * FROM comments WHERE pk_comment=?", - COMMENT_DETAIL_MAPPER, id); - } - - public void updateComment(CommentDetail comment) { + public void deleteComment(String id) { + /* + * Checks what type of comment we have. + */ + Map type = getJdbcTemplate() + .queryForMap("SELECT pk_job, pk_host FROM comments WHERE pk_comment=?", id); + + /* + * If the comment is deleted successfully, check if we need to unset the b_comment boolean flag. + */ + if (getJdbcTemplate().update("DELETE FROM comments WHERE pk_comment=?", id) > 0) { + if (type.get("pk_job") != null) { getJdbcTemplate().update( - "UPDATE comments SET str_message=?,str_subject=? WHERE pk_comment=?", - comment.message, comment.subject, comment.id); - } - - public void updateCommentMessage(String id, String message) { - getJdbcTemplate().update( - "UPDATE comments SET str_message=? WHERE pk_comment=?", - message,id); - } - - public void updateCommentSubject(String id, String subject) { - getJdbcTemplate().update( - "UPDATE comments SET str_subject=? WHERE pk_comment=?", - subject,id); - } - - private static final String INSERT_JOB_COMMENT = - "INSERT INTO " + - "comments " + - "(" + - "pk_comment,pk_job,str_user,str_subject,str_message"+ - ") VALUES (?,?,?,?,?)"; - - public void insertComment(JobInterface job, CommentDetail comment) { - comment.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_JOB_COMMENT, - comment.id, job.getJobId(), comment.user, - comment.subject, comment.message); + "UPDATE job SET b_comment=false WHERE job.pk_job = ? AND " + + "(SELECT COUNT(1) FROM comments c WHERE c.pk_job = job.pk_job) = 0", + type.get("pk_job")); + } else if (type.get("pk_host") != null) { getJdbcTemplate().update( - "UPDATE job SET b_comment=true WHERE pk_job=?", - job.getJobId()); + "UPDATE host SET b_comment=false WHERE host.pk_host = ? AND " + + "(SELECT COUNT(1) FROM comments c WHERE c.pk_host = host.pk_host) = 0", + type.get("pk_host")); + } } + } - private static final String INSERT_HOST_COMMENT = - "INSERT INTO " + - "comments " + - "(" + - "pk_comment,pk_host,str_user,str_subject,str_message"+ - ") VALUES (?,?,?,?,?)"; - - - public void insertComment(HostInterface host, CommentDetail comment) { - comment.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_HOST_COMMENT, - comment.id, host.getHostId(), comment.user, - comment.subject, comment.message); - getJdbcTemplate().update( - "UPDATE host SET b_comment=true WHERE pk_host=?", - host.getHostId()); - } + private static final RowMapper COMMENT_DETAIL_MAPPER = + new RowMapper() { + public CommentDetail mapRow(ResultSet rs, int row) throws SQLException { + CommentDetail d = new CommentDetail(); + d.id = rs.getString("pk_comment"); + d.message = rs.getString("str_message"); + d.subject = rs.getString("str_subject"); + d.timestamp = rs.getTimestamp("ts_created"); + d.user = rs.getString("str_user"); + return d; + } + }; + + public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, + String subject) { + return getJdbcTemplate().update( + "DELETE FROM comments WHERE pk_host=? AND str_user=? AND str_subject=?", host.getHostId(), + user, subject) > 0; + } + + public List getCommentsByHostUserAndSubject(HostInterface host, String user, + String subject) { + return getJdbcTemplate().query( + "SELECT * FROM comments WHERE pk_host=? AND str_user=? AND str_subject=?", + COMMENT_DETAIL_MAPPER, host.getHostId(), user, subject); + } + + public CommentDetail getCommentDetail(String id) { + return getJdbcTemplate().queryForObject("SELECT * FROM comments WHERE pk_comment=?", + COMMENT_DETAIL_MAPPER, id); + } + + public void updateComment(CommentDetail comment) { + getJdbcTemplate().update("UPDATE comments SET str_message=?,str_subject=? WHERE pk_comment=?", + comment.message, comment.subject, comment.id); + } + + public void updateCommentMessage(String id, String message) { + getJdbcTemplate().update("UPDATE comments SET str_message=? WHERE pk_comment=?", message, id); + } + + public void updateCommentSubject(String id, String subject) { + getJdbcTemplate().update("UPDATE comments SET str_subject=? WHERE pk_comment=?", subject, id); + } + + private static final String INSERT_JOB_COMMENT = "INSERT INTO " + "comments " + "(" + + "pk_comment,pk_job,str_user,str_subject,str_message" + ") VALUES (?,?,?,?,?)"; + + public void insertComment(JobInterface job, CommentDetail comment) { + comment.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_JOB_COMMENT, comment.id, job.getJobId(), comment.user, + comment.subject, comment.message); + getJdbcTemplate().update("UPDATE job SET b_comment=true WHERE pk_job=?", job.getJobId()); + } + + private static final String INSERT_HOST_COMMENT = "INSERT INTO " + "comments " + "(" + + "pk_comment,pk_host,str_user,str_subject,str_message" + ") VALUES (?,?,?,?,?)"; + + public void insertComment(HostInterface host, CommentDetail comment) { + comment.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_HOST_COMMENT, comment.id, host.getHostId(), comment.user, + comment.subject, comment.message); + getJdbcTemplate().update("UPDATE host SET b_comment=true WHERE pk_host=?", host.getHostId()); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DeedDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DeedDaoJdbc.java index 6d461423f..bdffd1694 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DeedDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DeedDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -34,86 +30,58 @@ public class DeedDaoJdbc extends JdbcDaoSupport implements DeedDao { - public static final RowMapper - DEED_MAPPER = new RowMapper() { - public DeedEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - DeedEntity o = new DeedEntity(); - o.id = rs.getString("pk_deed"); - o.owner = rs.getString("str_username"); - o.host = rs.getString("str_hostname"); - return o; - } - }; - - @Override - public boolean deleteDeed(DeedEntity deed) { - return getJdbcTemplate().update( - "DELETE FROM deed WHERE pk_deed = ?", - deed.getId()) > 0; - } - - @Override - public boolean deleteDeed(HostInterface host) { - return getJdbcTemplate().update( - "DELETE FROM deed WHERE pk_host = ?", - host.getHostId()) > 0; - } - - @Override - public void deleteDeeds(OwnerEntity owner) { - getJdbcTemplate().update( - "DELETE FROM deed WHERE pk_owner = ?", - owner.getId()); - } - - private static final String INSERT_DEED = - "INSERT INTO " + - "deed " + - "("+ - "pk_deed,"+ - "pk_owner,"+ - "pk_host " + - ") "+ - "VALUES (?,?,?)"; - - public DeedEntity insertDeed(OwnerEntity owner, HostInterface host) { - DeedEntity deed = new DeedEntity(); - deed.id = SqlUtil.genKeyRandom(); - deed.host = host.getName(); - deed.owner = owner.name; - - getJdbcTemplate().update(INSERT_DEED, - deed.getId(), owner.getId(), host.getId()); - - return deed; - } - - private static final String QUERY_FOR_DEED = - "SELECT " + - "deed.pk_deed, "+ - "host.str_name as str_hostname, " + - "owner.str_username " + - "FROM " + - "deed,"+ - "host,"+ - "owner " + - "WHERE " + - "deed.pk_owner = owner.pk_owner " + - "AND " + - "deed.pk_host = host.pk_host "; - - @Override - public DeedEntity getDeed(String id) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_DEED + " AND pk_deed = ?", - DEED_MAPPER, id); - } - - @Override - public List getDeeds(OwnerEntity owner) { - return getJdbcTemplate().query( - QUERY_FOR_DEED + " AND owner.pk_owner = ?", - DEED_MAPPER, owner.getId()); + public static final RowMapper DEED_MAPPER = new RowMapper() { + public DeedEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + DeedEntity o = new DeedEntity(); + o.id = rs.getString("pk_deed"); + o.owner = rs.getString("str_username"); + o.host = rs.getString("str_hostname"); + return o; } + }; + + @Override + public boolean deleteDeed(DeedEntity deed) { + return getJdbcTemplate().update("DELETE FROM deed WHERE pk_deed = ?", deed.getId()) > 0; + } + + @Override + public boolean deleteDeed(HostInterface host) { + return getJdbcTemplate().update("DELETE FROM deed WHERE pk_host = ?", host.getHostId()) > 0; + } + + @Override + public void deleteDeeds(OwnerEntity owner) { + getJdbcTemplate().update("DELETE FROM deed WHERE pk_owner = ?", owner.getId()); + } + + private static final String INSERT_DEED = "INSERT INTO " + "deed " + "(" + "pk_deed," + + "pk_owner," + "pk_host " + ") " + "VALUES (?,?,?)"; + + public DeedEntity insertDeed(OwnerEntity owner, HostInterface host) { + DeedEntity deed = new DeedEntity(); + deed.id = SqlUtil.genKeyRandom(); + deed.host = host.getName(); + deed.owner = owner.name; + + getJdbcTemplate().update(INSERT_DEED, deed.getId(), owner.getId(), host.getId()); + + return deed; + } + + private static final String QUERY_FOR_DEED = + "SELECT " + "deed.pk_deed, " + "host.str_name as str_hostname, " + "owner.str_username " + + "FROM " + "deed," + "host," + "owner " + "WHERE " + "deed.pk_owner = owner.pk_owner " + + "AND " + "deed.pk_host = host.pk_host "; + + @Override + public DeedEntity getDeed(String id) { + return getJdbcTemplate().queryForObject(QUERY_FOR_DEED + " AND pk_deed = ?", DEED_MAPPER, id); + } + + @Override + public List getDeeds(OwnerEntity owner) { + return getJdbcTemplate().query(QUERY_FOR_DEED + " AND owner.pk_owner = ?", DEED_MAPPER, + owner.getId()); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DepartmentDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DepartmentDaoJdbc.java index 04c4234e5..24a1c3ba0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DepartmentDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DepartmentDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -32,53 +28,48 @@ public class DepartmentDaoJdbc extends JdbcDaoSupport implements DepartmentDao { - public static final RowMapper DEPARTMENT_MAPPER = new RowMapper() { + public static final RowMapper DEPARTMENT_MAPPER = + new RowMapper() { public DepartmentInterface mapRow(ResultSet rs, int rowNum) throws SQLException { - DepartmentEntity d = new DepartmentEntity(); - d.id = rs.getString("pk_dept"); - d.name = rs.getString("str_name"); - return d; + DepartmentEntity d = new DepartmentEntity(); + d.id = rs.getString("pk_dept"); + d.name = rs.getString("str_name"); + return d; } - }; - - @Override - public boolean departmentExists(String name) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM dept WHERE str_name=?", - Integer.class, name) > 0; - } - - @Override - public DepartmentInterface findDepartment(String name) { - return getJdbcTemplate().queryForObject( - "SELECT pk_dept, str_name FROM dept WHERE str_name=?", - DEPARTMENT_MAPPER, name); - } - - @Override - public DepartmentInterface getDefaultDepartment() { - return getJdbcTemplate().queryForObject( - "SELECT pk_dept, str_name FROM dept WHERE b_default=true", - DEPARTMENT_MAPPER); - } - - @Override - public DepartmentInterface getDepartment(String id) { - return getJdbcTemplate().queryForObject( - "SELECT pk_dept, str_name FROM dept WHERE pk_dept=?", - DEPARTMENT_MAPPER, id); - } - - @Override - public void deleteDepartment(DepartmentInterface d) { - getJdbcTemplate().update("DELETE FROM dept WHERE pk_dept=?", - d.getDepartmentId()); - } - - @Override - public void insertDepartment(String name) { - getJdbcTemplate().update("INSERT INTO dept (pk_dept,str_name) VALUES (?,?)", - SqlUtil.genKeyRandom(), name); - } + }; + + @Override + public boolean departmentExists(String name) { + return getJdbcTemplate().queryForObject("SELECT COUNT(1) FROM dept WHERE str_name=?", + Integer.class, name) > 0; + } + + @Override + public DepartmentInterface findDepartment(String name) { + return getJdbcTemplate().queryForObject("SELECT pk_dept, str_name FROM dept WHERE str_name=?", + DEPARTMENT_MAPPER, name); + } + + @Override + public DepartmentInterface getDefaultDepartment() { + return getJdbcTemplate().queryForObject( + "SELECT pk_dept, str_name FROM dept WHERE b_default=true", DEPARTMENT_MAPPER); + } + + @Override + public DepartmentInterface getDepartment(String id) { + return getJdbcTemplate().queryForObject("SELECT pk_dept, str_name FROM dept WHERE pk_dept=?", + DEPARTMENT_MAPPER, id); + } + + @Override + public void deleteDepartment(DepartmentInterface d) { + getJdbcTemplate().update("DELETE FROM dept WHERE pk_dept=?", d.getDepartmentId()); + } + + @Override + public void insertDepartment(String name) { + getJdbcTemplate().update("INSERT INTO dept (pk_dept,str_name) VALUES (?,?)", + SqlUtil.genKeyRandom(), name); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DependDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DependDaoJdbc.java index bb81b1909..30038ccd8 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DependDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DependDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -49,703 +45,371 @@ public class DependDaoJdbc extends JdbcDaoSupport implements DependDao { - public static final RowMapper DEPEND_MAPPER = new RowMapper() { + public static final RowMapper DEPEND_MAPPER = + new RowMapper() { public LightweightDependency mapRow(ResultSet rs, int row) throws SQLException { - LightweightDependency d = new LightweightDependency(); - d.id = rs.getString("pk_depend"); - d.type = DependType.valueOf(rs.getString("str_type")); - d.target = DependTarget.valueOf(rs.getString("str_target")); - d.anyFrame = rs.getBoolean("b_any"); - d.parent = rs.getString("pk_parent"); - d.active = rs.getBoolean("b_active"); - d.dependErFrameId = rs.getString("pk_frame_depend_er"); - d.dependOnFrameId = rs.getString("pk_frame_depend_on"); - d.dependErLayerId = rs.getString("pk_layer_depend_er"); - d.dependOnLayerId =rs.getString("pk_layer_depend_on"); - d.dependOnJobId = rs.getString("pk_job_depend_on"); - d.dependErJobId = rs.getString("pk_job_depend_er"); - return d; - } - }; - - private static final String INSERT_DEPEND = - "INSERT INTO " + - "depend " + - "(" + - "pk_depend,"+ - "pk_parent,"+ - "pk_job_depend_er," + - "pk_layer_depend_er," + - "pk_frame_depend_er," + - "pk_job_depend_on," + - "pk_layer_depend_on," + - "pk_frame_depend_on," + - "str_type," + - "b_any, " + - "str_target, " + - "b_active, " + - "str_signature, "+ - "b_composite " + - ") " + - "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; - - @Override - public void insertDepend(JobOnJob d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErJob().getJobId(), - null, - null, - d.getDependOnJob().getJobId(), - null, - null, - DependType.JOB_ON_JOB.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(JobOnLayer d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErJob().getJobId(), - null, - null, - d.getDependOnLayer().getJobId(), - d.getDependOnLayer().getLayerId(), - null, - DependType.JOB_ON_LAYER.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(JobOnFrame d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErJob().getJobId(), - null, - null, - d.getDependOnFrame().getJobId(), - d.getDependOnFrame().getLayerId(), - d.getDependOnFrame().getFrameId(), - DependType.JOB_ON_FRAME.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(LayerOnJob d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErLayer().getJobId(), - d.getDependErLayer().getLayerId(), - null, - d.getDependOnJob().getJobId(), - null, - null, - DependType.LAYER_ON_JOB.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(LayerOnLayer d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErLayer().getJobId(), - d.getDependErLayer().getLayerId(), - null, - d.getDependOnLayer().getJobId(), - d.getDependOnLayer().getLayerId(), - null, - DependType.LAYER_ON_LAYER.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(LayerOnFrame d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErLayer().getJobId(), - d.getDependErLayer().getLayerId(), - null, - d.getDependOnFrame().getJobId(), - d.getDependOnFrame().getLayerId(), - d.getDependOnFrame().getFrameId(), - DependType.LAYER_ON_FRAME.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(FrameOnJob d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErFrame().getJobId(), - d.getDependErFrame().getLayerId(), - d.getDependErFrame().getFrameId(), - d.getDependOnJob().getJobId(), - null, - null, - DependType.FRAME_ON_JOB.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(FrameOnLayer d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErFrame().getJobId(), - d.getDependErFrame().getLayerId(), - d.getDependErFrame().getFrameId(), - d.getDependOnLayer().getJobId(), - d.getDependOnLayer().getLayerId(), - null, - DependType.FRAME_ON_LAYER.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(PreviousFrame d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErLayer().getJobId(), - d.getDependErLayer().getLayerId(), - null, - d.getDependOnLayer().getJobId(), - d.getDependOnLayer().getLayerId(), - null, - DependType.PREVIOUS_FRAME.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(FrameOnFrame d) { - d.setId(SqlUtil.genKeyRandom()); - String parentId = null; - if (d.getParent() != null) { - parentId =d.getParent().getId(); - } - - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - parentId, - d.getDependErFrame().getJobId(), - d.getDependErFrame().getLayerId(), - d.getDependErFrame().getFrameId(), - d.getDependOnFrame().getJobId(), - d.getDependOnFrame().getLayerId(), - d.getDependOnFrame().getFrameId(), - DependType.FRAME_ON_FRAME.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(FrameByFrame d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErLayer().getJobId(), - d.getDependErLayer().getLayerId(), - null, - d.getDependOnLayer().getJobId(), - d.getDependOnLayer().getLayerId(), - null, - DependType.FRAME_BY_FRAME.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - private static final String UPDATE_FRAME_STATE = - "UPDATE " + - "frame " + - "SET " + - "str_state='DEPEND' " + - "WHERE " + - "int_depend_count != 0 " + - "AND " + - "frame.str_state NOT IN ('SUCCEEDED','EATEN','RUNNING','DEPEND') " + - "AND " + - "frame.pk_frame = ?"; - - @Override - public void updateFrameState(FrameInterface f) { - getJdbcTemplate().update(UPDATE_FRAME_STATE, - f.getFrameId()); - } - - private static final String UPDATE_DEPEND_COUNT = - "UPDATE " + - "frame " + - "SET " + - "int_depend_count = int_depend_count + 1 " + - "WHERE " + - "pk_frame = ?"; - - @Override - public void incrementDependCount(FrameInterface f) { - int result = getJdbcTemplate().update(UPDATE_DEPEND_COUNT, - f.getFrameId()); - if (result == 0) { - throw new DependException("updating the depend count for " + - " the frame " + f.getName() + " in job " + f.getJobId() + - "failed."); - } - } - - private static final String DECREMENT_DEPEND_COUNT = - "UPDATE " + - "frame " + - "SET " + - "int_depend_count = int_depend_count -1 " + - "WHERE " + - "pk_frame = ? " + - "AND " + - "int_depend_count > 0"; - - @Override - public boolean decrementDependCount(FrameInterface f) { - return getJdbcTemplate().update(DECREMENT_DEPEND_COUNT, - f.getFrameId()) == 1; - } - - private static final String[] DELETE_DEPEND = { - "DELETE FROM depend WHERE pk_parent=?", - "DELETE FROM depend WHERE pk_depend=?" - }; - - @Override - public void deleteDepend(LightweightDependency depend) { - if (depend.type.equals(DependType.FRAME_BY_FRAME)) { - getJdbcTemplate().update(DELETE_DEPEND[0], depend.getId()); - } - getJdbcTemplate().update(DELETE_DEPEND[1], depend.getId()); - } - - private static final String GET_LIGHTWEIGHT_DEPEND = - "SELECT * FROM depend WHERE pk_depend=?"; - - @Override - public LightweightDependency getDepend(String id) { - return getJdbcTemplate().queryForObject( - GET_LIGHTWEIGHT_DEPEND, - DEPEND_MAPPER, id); - } - - private static final String GET_LIGHTWEIGHT_DEPEND_BY_SIGNATURE = - "SELECT * FROM depend WHERE str_signature=?"; - - @Override - public LightweightDependency getDependBySignature(String s) { - return getJdbcTemplate().queryForObject( - GET_LIGHTWEIGHT_DEPEND_BY_SIGNATURE, - DEPEND_MAPPER, s); - } - - private static final String GET_WHAT_DEPENDS_ON_JOB = - "SELECT " + - "depend.pk_depend," + - "depend.str_type," + - "depend.str_target,"+ - "depend.b_any,"+ - "depend.pk_parent,"+ - "depend.b_active," + - "depend.pk_frame_depend_er,"+ - "depend.pk_frame_depend_on,"+ - "depend.pk_layer_depend_er,"+ - "depend.pk_layer_depend_on,"+ - "depend.pk_job_depend_er,"+ - "depend.pk_job_depend_on "+ - "FROM " + - "depend " + - "WHERE " + - "pk_job_depend_on=? " + - "AND " + - "b_active = true " + - "AND " + - "str_type IN (?,?,?)"; - - @Override - public List getWhatDependsOn(JobInterface job) { - return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_JOB, - DEPEND_MAPPER, job.getJobId(), - DependType.JOB_ON_JOB.toString(), - DependType.LAYER_ON_JOB.toString(), - DependType.FRAME_ON_JOB.toString()); - } - - private static final String GET_WHAT_DEPENDS_ON_JOB_WITH_TARGET = - "SELECT " + - "depend.pk_depend," + - "depend.str_type," + - "depend.str_target,"+ - "depend.b_any,"+ - "depend.pk_parent,"+ - "depend.b_active," + - "depend.pk_frame_depend_er,"+ - "depend.pk_frame_depend_on,"+ - "depend.pk_layer_depend_er,"+ - "depend.pk_layer_depend_on,"+ - "depend.pk_job_depend_er,"+ - "depend.pk_job_depend_on "+ - "FROM " + - "depend " + - "WHERE " + - "pk_job_depend_on=? " + - "AND " + - "b_active = true " + - "AND " + - "str_target = ? " + - "AND " + - "str_type IN (?,?,?)"; - - @Override - public List getWhatDependsOn(JobInterface job, DependTarget target) { - if (target.equals(DependTarget.ANY_TARGET)) { - return getWhatDependsOn(job); + LightweightDependency d = new LightweightDependency(); + d.id = rs.getString("pk_depend"); + d.type = DependType.valueOf(rs.getString("str_type")); + d.target = DependTarget.valueOf(rs.getString("str_target")); + d.anyFrame = rs.getBoolean("b_any"); + d.parent = rs.getString("pk_parent"); + d.active = rs.getBoolean("b_active"); + d.dependErFrameId = rs.getString("pk_frame_depend_er"); + d.dependOnFrameId = rs.getString("pk_frame_depend_on"); + d.dependErLayerId = rs.getString("pk_layer_depend_er"); + d.dependOnLayerId = rs.getString("pk_layer_depend_on"); + d.dependOnJobId = rs.getString("pk_job_depend_on"); + d.dependErJobId = rs.getString("pk_job_depend_er"); + return d; } - else { - return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_JOB_WITH_TARGET, - DEPEND_MAPPER, job.getJobId(), target.toString(), - DependType.JOB_ON_JOB.toString(), - DependType.LAYER_ON_JOB.toString(), - DependType.FRAME_ON_JOB.toString()); - } - } - - private static final String GET_WHAT_DEPENDS_ON_LAYER = - "SELECT " + - "depend.pk_depend," + - "depend.str_type," + - "depend.str_target,"+ - "depend.b_any,"+ - "depend.pk_parent,"+ - "depend.b_active," + - "depend.pk_frame_depend_er,"+ - "depend.pk_frame_depend_on,"+ - "depend.pk_layer_depend_er,"+ - "depend.pk_layer_depend_on,"+ - "depend.pk_job_depend_er,"+ - "depend.pk_job_depend_on "+ - "FROM " + - "depend " + - "WHERE " + - "pk_job_depend_on=? " + - "AND " + - "pk_layer_depend_on=? " + - "AND " + - "str_type IN (?,?,?) " + - "AND " + - "b_active = ?"; - - @Override - public List getWhatDependsOn(LayerInterface layer) { - return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_LAYER, - DEPEND_MAPPER, layer.getJobId(), layer.getLayerId(), - DependType.JOB_ON_LAYER.toString(), - DependType.LAYER_ON_LAYER.toString(), - DependType.FRAME_ON_LAYER.toString(), - true); + }; + + private static final String INSERT_DEPEND = "INSERT INTO " + "depend " + "(" + "pk_depend," + + "pk_parent," + "pk_job_depend_er," + "pk_layer_depend_er," + "pk_frame_depend_er," + + "pk_job_depend_on," + "pk_layer_depend_on," + "pk_frame_depend_on," + "str_type," + + "b_any, " + "str_target, " + "b_active, " + "str_signature, " + "b_composite " + ") " + + "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + + @Override + public void insertDepend(JobOnJob d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErJob().getJobId(), null, + null, d.getDependOnJob().getJobId(), null, null, DependType.JOB_ON_JOB.toString(), + d.isAnyFrame(), d.getTarget().toString(), d.isActive(), d.getSignature(), d.isComposite()); + } + + @Override + public void insertDepend(JobOnLayer d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErJob().getJobId(), null, + null, d.getDependOnLayer().getJobId(), d.getDependOnLayer().getLayerId(), null, + DependType.JOB_ON_LAYER.toString(), d.isAnyFrame(), d.getTarget().toString(), d.isActive(), + d.getSignature(), d.isComposite()); + } + + @Override + public void insertDepend(JobOnFrame d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErJob().getJobId(), null, + null, d.getDependOnFrame().getJobId(), d.getDependOnFrame().getLayerId(), + d.getDependOnFrame().getFrameId(), DependType.JOB_ON_FRAME.toString(), d.isAnyFrame(), + d.getTarget().toString(), d.isActive(), d.getSignature(), d.isComposite()); + } + + @Override + public void insertDepend(LayerOnJob d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErLayer().getJobId(), + d.getDependErLayer().getLayerId(), null, d.getDependOnJob().getJobId(), null, null, + DependType.LAYER_ON_JOB.toString(), d.isAnyFrame(), d.getTarget().toString(), d.isActive(), + d.getSignature(), d.isComposite()); + } + + @Override + public void insertDepend(LayerOnLayer d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErLayer().getJobId(), + d.getDependErLayer().getLayerId(), null, d.getDependOnLayer().getJobId(), + d.getDependOnLayer().getLayerId(), null, DependType.LAYER_ON_LAYER.toString(), + d.isAnyFrame(), d.getTarget().toString(), d.isActive(), d.getSignature(), d.isComposite()); + } + + @Override + public void insertDepend(LayerOnFrame d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErLayer().getJobId(), + d.getDependErLayer().getLayerId(), null, d.getDependOnFrame().getJobId(), + d.getDependOnFrame().getLayerId(), d.getDependOnFrame().getFrameId(), + DependType.LAYER_ON_FRAME.toString(), d.isAnyFrame(), d.getTarget().toString(), + d.isActive(), d.getSignature(), d.isComposite()); + } + + @Override + public void insertDepend(FrameOnJob d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErFrame().getJobId(), + d.getDependErFrame().getLayerId(), d.getDependErFrame().getFrameId(), + d.getDependOnJob().getJobId(), null, null, DependType.FRAME_ON_JOB.toString(), + d.isAnyFrame(), d.getTarget().toString(), d.isActive(), d.getSignature(), d.isComposite()); + } + + @Override + public void insertDepend(FrameOnLayer d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErFrame().getJobId(), + d.getDependErFrame().getLayerId(), d.getDependErFrame().getFrameId(), + d.getDependOnLayer().getJobId(), d.getDependOnLayer().getLayerId(), null, + DependType.FRAME_ON_LAYER.toString(), d.isAnyFrame(), d.getTarget().toString(), + d.isActive(), d.getSignature(), d.isComposite()); + } + + @Override + public void insertDepend(PreviousFrame d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErLayer().getJobId(), + d.getDependErLayer().getLayerId(), null, d.getDependOnLayer().getJobId(), + d.getDependOnLayer().getLayerId(), null, DependType.PREVIOUS_FRAME.toString(), + d.isAnyFrame(), d.getTarget().toString(), d.isActive(), d.getSignature(), d.isComposite()); + } + + @Override + public void insertDepend(FrameOnFrame d) { + d.setId(SqlUtil.genKeyRandom()); + String parentId = null; + if (d.getParent() != null) { + parentId = d.getParent().getId(); } - @Override - public List getWhatDependsOn(LayerInterface layer, boolean active) { - return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_LAYER, - DEPEND_MAPPER, layer.getJobId(), layer.getLayerId(), - DependType.JOB_ON_LAYER.toString(), - DependType.LAYER_ON_LAYER.toString(), - DependType.FRAME_ON_LAYER.toString(), - active); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), parentId, d.getDependErFrame().getJobId(), + d.getDependErFrame().getLayerId(), d.getDependErFrame().getFrameId(), + d.getDependOnFrame().getJobId(), d.getDependOnFrame().getLayerId(), + d.getDependOnFrame().getFrameId(), DependType.FRAME_ON_FRAME.toString(), d.isAnyFrame(), + d.getTarget().toString(), d.isActive(), d.getSignature(), d.isComposite()); + } + + @Override + public void insertDepend(FrameByFrame d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErLayer().getJobId(), + d.getDependErLayer().getLayerId(), null, d.getDependOnLayer().getJobId(), + d.getDependOnLayer().getLayerId(), null, DependType.FRAME_BY_FRAME.toString(), + d.isAnyFrame(), d.getTarget().toString(), d.isActive(), d.getSignature(), d.isComposite()); + } + + private static final String UPDATE_FRAME_STATE = + "UPDATE " + "frame " + "SET " + "str_state='DEPEND' " + "WHERE " + "int_depend_count != 0 " + + "AND " + "frame.str_state NOT IN ('SUCCEEDED','EATEN','RUNNING','DEPEND') " + "AND " + + "frame.pk_frame = ?"; + + @Override + public void updateFrameState(FrameInterface f) { + getJdbcTemplate().update(UPDATE_FRAME_STATE, f.getFrameId()); + } + + private static final String UPDATE_DEPEND_COUNT = "UPDATE " + "frame " + "SET " + + "int_depend_count = int_depend_count + 1 " + "WHERE " + "pk_frame = ?"; + + @Override + public void incrementDependCount(FrameInterface f) { + int result = getJdbcTemplate().update(UPDATE_DEPEND_COUNT, f.getFrameId()); + if (result == 0) { + throw new DependException("updating the depend count for " + " the frame " + f.getName() + + " in job " + f.getJobId() + "failed."); } + } + private static final String DECREMENT_DEPEND_COUNT = + "UPDATE " + "frame " + "SET " + "int_depend_count = int_depend_count -1 " + "WHERE " + + "pk_frame = ? " + "AND " + "int_depend_count > 0"; - private static final String GET_WHAT_DEPENDS_ON_FRAME = - "SELECT " + - "depend.pk_depend," + - "depend.str_type," + - "depend.str_target,"+ - "depend.b_any,"+ - "depend.pk_parent,"+ - "depend.b_active," + - "depend.pk_frame_depend_er,"+ - "depend.pk_frame_depend_on,"+ - "depend.pk_layer_depend_er,"+ - "depend.pk_layer_depend_on,"+ - "depend.pk_job_depend_er,"+ - "depend.pk_job_depend_on "+ - "FROM " + - "depend " + - "WHERE " + - "b_active = ? " + - "AND " + - "pk_job_depend_on = ? " + - "AND " + - "(pk_frame_depend_on = ? " + - "AND " + - "str_type IN (?,?,?)) " + - "OR " + - "(pk_layer_depend_on = ? AND str_type = ? AND b_any = true)"; - - @Override - public List getWhatDependsOn(FrameInterface frame) { - return getWhatDependsOn(frame, true); - } + @Override + public boolean decrementDependCount(FrameInterface f) { + return getJdbcTemplate().update(DECREMENT_DEPEND_COUNT, f.getFrameId()) == 1; + } - @Override - public List getWhatDependsOn(FrameInterface frame, boolean active) { - return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_FRAME, - DEPEND_MAPPER, active, frame.getJobId(), frame.getFrameId(), - DependType.FRAME_ON_FRAME.toString(), - DependType.LAYER_ON_FRAME.toString(), - DependType.JOB_ON_FRAME.toString(), - frame.getLayerId(), - DependType.LAYER_ON_LAYER.toString()); - } + private static final String[] DELETE_DEPEND = + {"DELETE FROM depend WHERE pk_parent=?", "DELETE FROM depend WHERE pk_depend=?"}; - private static final String SET_INACTIVE = - "UPDATE " + - "depend " + - "SET " + - "b_active=false,"+ - "ts_satisfied=current_timestamp,"+ - "str_signature=pk_depend "+ - "WHERE " + - "pk_depend = ? " + - "AND " + - "b_active = true " + - "AND " + - "b_composite = false"; - - @Override - public boolean setInactive(LightweightDependency depend) { - depend.active = getJdbcTemplate().update(SET_INACTIVE, depend.getId()) == 1; - return depend.active; + @Override + public void deleteDepend(LightweightDependency depend) { + if (depend.type.equals(DependType.FRAME_BY_FRAME)) { + getJdbcTemplate().update(DELETE_DEPEND[0], depend.getId()); } - - private static final String SET_ACTIVE = - "UPDATE " + - "depend " + - "SET " + - "b_active=true "+ - "WHERE " + - "pk_depend=? " + - "AND "+ - "b_active=false"; - - @Override - public boolean setActive(LightweightDependency depend) { - if (!depend.type.equals(DependType.FRAME_ON_FRAME) - && !depend.type.equals(DependType.LAYER_ON_LAYER)) { - return false; - } - depend.active = getJdbcTemplate().update( - SET_ACTIVE, depend.getId()) == 1; - return depend.active; + getJdbcTemplate().update(DELETE_DEPEND[1], depend.getId()); + } + + private static final String GET_LIGHTWEIGHT_DEPEND = "SELECT * FROM depend WHERE pk_depend=?"; + + @Override + public LightweightDependency getDepend(String id) { + return getJdbcTemplate().queryForObject(GET_LIGHTWEIGHT_DEPEND, DEPEND_MAPPER, id); + } + + private static final String GET_LIGHTWEIGHT_DEPEND_BY_SIGNATURE = + "SELECT * FROM depend WHERE str_signature=?"; + + @Override + public LightweightDependency getDependBySignature(String s) { + return getJdbcTemplate().queryForObject(GET_LIGHTWEIGHT_DEPEND_BY_SIGNATURE, DEPEND_MAPPER, s); + } + + private static final String GET_WHAT_DEPENDS_ON_JOB = "SELECT " + "depend.pk_depend," + + "depend.str_type," + "depend.str_target," + "depend.b_any," + "depend.pk_parent," + + "depend.b_active," + "depend.pk_frame_depend_er," + "depend.pk_frame_depend_on," + + "depend.pk_layer_depend_er," + "depend.pk_layer_depend_on," + "depend.pk_job_depend_er," + + "depend.pk_job_depend_on " + "FROM " + "depend " + "WHERE " + "pk_job_depend_on=? " + "AND " + + "b_active = true " + "AND " + "str_type IN (?,?,?)"; + + @Override + public List getWhatDependsOn(JobInterface job) { + return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_JOB, DEPEND_MAPPER, job.getJobId(), + DependType.JOB_ON_JOB.toString(), DependType.LAYER_ON_JOB.toString(), + DependType.FRAME_ON_JOB.toString()); + } + + private static final String GET_WHAT_DEPENDS_ON_JOB_WITH_TARGET = "SELECT " + "depend.pk_depend," + + "depend.str_type," + "depend.str_target," + "depend.b_any," + "depend.pk_parent," + + "depend.b_active," + "depend.pk_frame_depend_er," + "depend.pk_frame_depend_on," + + "depend.pk_layer_depend_er," + "depend.pk_layer_depend_on," + "depend.pk_job_depend_er," + + "depend.pk_job_depend_on " + "FROM " + "depend " + "WHERE " + "pk_job_depend_on=? " + "AND " + + "b_active = true " + "AND " + "str_target = ? " + "AND " + "str_type IN (?,?,?)"; + + @Override + public List getWhatDependsOn(JobInterface job, DependTarget target) { + if (target.equals(DependTarget.ANY_TARGET)) { + return getWhatDependsOn(job); + } else { + return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_JOB_WITH_TARGET, DEPEND_MAPPER, + job.getJobId(), target.toString(), DependType.JOB_ON_JOB.toString(), + DependType.LAYER_ON_JOB.toString(), DependType.FRAME_ON_JOB.toString()); } - - private static final String GET_CHILD_DEPENDS = - "SELECT " + - "depend.pk_depend," + - "depend.str_type," + - "depend.str_target,"+ - "depend.b_any,"+ - "depend.pk_parent,"+ - "depend.b_active," + - "depend.pk_frame_depend_er,"+ - "depend.pk_frame_depend_on,"+ - "depend.pk_layer_depend_er,"+ - "depend.pk_layer_depend_on,"+ - "depend.pk_job_depend_er,"+ - "depend.pk_job_depend_on "+ - "FROM " + - "depend " + - "WHERE " + - "depend.pk_job_depend_er = ? " + - "AND " + - "depend.pk_job_depend_on = ? " + - "AND " + - "depend.pk_parent = ? " + - "AND " + - "depend.b_active = true "; - - @Override - public List getChildDepends(LightweightDependency depend) { - return getJdbcTemplate().query(GET_CHILD_DEPENDS, DEPEND_MAPPER, - depend.dependErJobId, depend.dependOnJobId, depend.id); + } + + private static final String GET_WHAT_DEPENDS_ON_LAYER = "SELECT " + "depend.pk_depend," + + "depend.str_type," + "depend.str_target," + "depend.b_any," + "depend.pk_parent," + + "depend.b_active," + "depend.pk_frame_depend_er," + "depend.pk_frame_depend_on," + + "depend.pk_layer_depend_er," + "depend.pk_layer_depend_on," + "depend.pk_job_depend_er," + + "depend.pk_job_depend_on " + "FROM " + "depend " + "WHERE " + "pk_job_depend_on=? " + "AND " + + "pk_layer_depend_on=? " + "AND " + "str_type IN (?,?,?) " + "AND " + "b_active = ?"; + + @Override + public List getWhatDependsOn(LayerInterface layer) { + return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_LAYER, DEPEND_MAPPER, layer.getJobId(), + layer.getLayerId(), DependType.JOB_ON_LAYER.toString(), + DependType.LAYER_ON_LAYER.toString(), DependType.FRAME_ON_LAYER.toString(), true); + } + + @Override + public List getWhatDependsOn(LayerInterface layer, boolean active) { + return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_LAYER, DEPEND_MAPPER, layer.getJobId(), + layer.getLayerId(), DependType.JOB_ON_LAYER.toString(), + DependType.LAYER_ON_LAYER.toString(), DependType.FRAME_ON_LAYER.toString(), active); + } + + private static final String GET_WHAT_DEPENDS_ON_FRAME = + "SELECT " + "depend.pk_depend," + "depend.str_type," + "depend.str_target," + "depend.b_any," + + "depend.pk_parent," + "depend.b_active," + "depend.pk_frame_depend_er," + + "depend.pk_frame_depend_on," + "depend.pk_layer_depend_er," + + "depend.pk_layer_depend_on," + "depend.pk_job_depend_er," + "depend.pk_job_depend_on " + + "FROM " + "depend " + "WHERE " + "b_active = ? " + "AND " + "pk_job_depend_on = ? " + + "AND " + "(pk_frame_depend_on = ? " + "AND " + "str_type IN (?,?,?)) " + "OR " + + "(pk_layer_depend_on = ? AND str_type = ? AND b_any = true)"; + + @Override + public List getWhatDependsOn(FrameInterface frame) { + return getWhatDependsOn(frame, true); + } + + @Override + public List getWhatDependsOn(FrameInterface frame, boolean active) { + return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_FRAME, DEPEND_MAPPER, active, + frame.getJobId(), frame.getFrameId(), DependType.FRAME_ON_FRAME.toString(), + DependType.LAYER_ON_FRAME.toString(), DependType.JOB_ON_FRAME.toString(), + frame.getLayerId(), DependType.LAYER_ON_LAYER.toString()); + } + + private static final String SET_INACTIVE = "UPDATE " + "depend " + "SET " + "b_active=false," + + "ts_satisfied=current_timestamp," + "str_signature=pk_depend " + "WHERE " + "pk_depend = ? " + + "AND " + "b_active = true " + "AND " + "b_composite = false"; + + @Override + public boolean setInactive(LightweightDependency depend) { + depend.active = getJdbcTemplate().update(SET_INACTIVE, depend.getId()) == 1; + return depend.active; + } + + private static final String SET_ACTIVE = "UPDATE " + "depend " + "SET " + "b_active=true " + + "WHERE " + "pk_depend=? " + "AND " + "b_active=false"; + + @Override + public boolean setActive(LightweightDependency depend) { + if (!depend.type.equals(DependType.FRAME_ON_FRAME) + && !depend.type.equals(DependType.LAYER_ON_LAYER)) { + return false; } - - private static final String GET_WHAT_THIS_JOB_DEPENDS_ON = - "SELECT " + - "depend.pk_depend," + - "depend.str_type," + - "depend.str_target,"+ - "depend.b_any,"+ - "depend.pk_parent,"+ - "depend.b_active," + - "depend.pk_frame_depend_er,"+ - "depend.pk_frame_depend_on,"+ - "depend.pk_layer_depend_er,"+ - "depend.pk_layer_depend_on,"+ - "depend.pk_job_depend_er,"+ - "depend.pk_job_depend_on "+ - "FROM " + - "depend " + - "WHERE " + - "depend.pk_job_depend_er = ? " + - "AND " + - "depend.b_active = true " + - "AND " + - "depend.pk_parent IS NULL "; - - @Override - public List getWhatThisDependsOn(JobInterface job, DependTarget target) { - String query = GET_WHAT_THIS_JOB_DEPENDS_ON; - Object[] values = new Object[] { job.getJobId() }; - if (!target.equals(DependTarget.ANY_TARGET)) { - query = query + " AND depend.str_target = ?"; - values = new Object[] { job.getJobId(), target.toString() }; - } - return getJdbcTemplate().query(query,DEPEND_MAPPER, values); - + depend.active = getJdbcTemplate().update(SET_ACTIVE, depend.getId()) == 1; + return depend.active; + } + + private static final String GET_CHILD_DEPENDS = "SELECT " + "depend.pk_depend," + + "depend.str_type," + "depend.str_target," + "depend.b_any," + "depend.pk_parent," + + "depend.b_active," + "depend.pk_frame_depend_er," + "depend.pk_frame_depend_on," + + "depend.pk_layer_depend_er," + "depend.pk_layer_depend_on," + "depend.pk_job_depend_er," + + "depend.pk_job_depend_on " + "FROM " + "depend " + "WHERE " + "depend.pk_job_depend_er = ? " + + "AND " + "depend.pk_job_depend_on = ? " + "AND " + "depend.pk_parent = ? " + "AND " + + "depend.b_active = true "; + + @Override + public List getChildDepends(LightweightDependency depend) { + return getJdbcTemplate().query(GET_CHILD_DEPENDS, DEPEND_MAPPER, depend.dependErJobId, + depend.dependOnJobId, depend.id); + } + + private static final String GET_WHAT_THIS_JOB_DEPENDS_ON = "SELECT " + "depend.pk_depend," + + "depend.str_type," + "depend.str_target," + "depend.b_any," + "depend.pk_parent," + + "depend.b_active," + "depend.pk_frame_depend_er," + "depend.pk_frame_depend_on," + + "depend.pk_layer_depend_er," + "depend.pk_layer_depend_on," + "depend.pk_job_depend_er," + + "depend.pk_job_depend_on " + "FROM " + "depend " + "WHERE " + "depend.pk_job_depend_er = ? " + + "AND " + "depend.b_active = true " + "AND " + "depend.pk_parent IS NULL "; + + @Override + public List getWhatThisDependsOn(JobInterface job, DependTarget target) { + String query = GET_WHAT_THIS_JOB_DEPENDS_ON; + Object[] values = new Object[] {job.getJobId()}; + if (!target.equals(DependTarget.ANY_TARGET)) { + query = query + " AND depend.str_target = ?"; + values = new Object[] {job.getJobId(), target.toString()}; } - - private static final String GET_WHAT_THIS_LAYER_DEPENDS_ON = - "SELECT " + - "depend.pk_depend," + - "depend.str_type," + - "depend.str_target,"+ - "depend.b_any,"+ - "depend.pk_parent,"+ - "depend.b_active," + - "depend.pk_frame_depend_er,"+ - "depend.pk_frame_depend_on,"+ - "depend.pk_layer_depend_er,"+ - "depend.pk_layer_depend_on,"+ - "depend.pk_job_depend_er,"+ - "depend.pk_job_depend_on "+ - "FROM " + - "depend " + - "WHERE " + - "depend.pk_layer_depend_er = ? " + - "AND " + - "depend.b_active = true " + - "AND " + - "depend.pk_parent IS NULL " + - "AND " + - "depend.str_type IN (?,?,?,?) "; - - @Override - public List getWhatThisDependsOn(LayerInterface layer, DependTarget target) { - if (!target.equals(DependTarget.ANY_TARGET)) { - String query = GET_WHAT_THIS_LAYER_DEPENDS_ON + " AND str_target = ?"; - return getJdbcTemplate().query(query, DEPEND_MAPPER, - layer.getLayerId(), DependType.LAYER_ON_JOB.toString(), - DependType.LAYER_ON_LAYER.toString(), DependType.LAYER_ON_FRAME.toString(), - DependType.FRAME_BY_FRAME.toString(), target.toString()); - } - else { - return getJdbcTemplate().query(GET_WHAT_THIS_LAYER_DEPENDS_ON, DEPEND_MAPPER, - layer.getLayerId(), DependType.LAYER_ON_JOB.toString(), - DependType.LAYER_ON_LAYER.toString(), DependType.LAYER_ON_FRAME.toString(), - DependType.FRAME_BY_FRAME.toString()); - } + return getJdbcTemplate().query(query, DEPEND_MAPPER, values); + + } + + private static final String GET_WHAT_THIS_LAYER_DEPENDS_ON = "SELECT " + "depend.pk_depend," + + "depend.str_type," + "depend.str_target," + "depend.b_any," + "depend.pk_parent," + + "depend.b_active," + "depend.pk_frame_depend_er," + "depend.pk_frame_depend_on," + + "depend.pk_layer_depend_er," + "depend.pk_layer_depend_on," + "depend.pk_job_depend_er," + + "depend.pk_job_depend_on " + "FROM " + "depend " + "WHERE " + + "depend.pk_layer_depend_er = ? " + "AND " + "depend.b_active = true " + "AND " + + "depend.pk_parent IS NULL " + "AND " + "depend.str_type IN (?,?,?,?) "; + + @Override + public List getWhatThisDependsOn(LayerInterface layer, + DependTarget target) { + if (!target.equals(DependTarget.ANY_TARGET)) { + String query = GET_WHAT_THIS_LAYER_DEPENDS_ON + " AND str_target = ?"; + return getJdbcTemplate().query(query, DEPEND_MAPPER, layer.getLayerId(), + DependType.LAYER_ON_JOB.toString(), DependType.LAYER_ON_LAYER.toString(), + DependType.LAYER_ON_FRAME.toString(), DependType.FRAME_BY_FRAME.toString(), + target.toString()); + } else { + return getJdbcTemplate().query(GET_WHAT_THIS_LAYER_DEPENDS_ON, DEPEND_MAPPER, + layer.getLayerId(), DependType.LAYER_ON_JOB.toString(), + DependType.LAYER_ON_LAYER.toString(), DependType.LAYER_ON_FRAME.toString(), + DependType.FRAME_BY_FRAME.toString()); } - - private static final String GET_WHAT_THIS_FRAME_DEPENDS_ON = - "SELECT " + - "depend.pk_depend," + - "depend.str_type," + - "depend.str_target,"+ - "depend.b_any,"+ - "depend.pk_parent,"+ - "depend.b_active," + - "depend.pk_frame_depend_er,"+ - "depend.pk_frame_depend_on,"+ - "depend.pk_layer_depend_er,"+ - "depend.pk_layer_depend_on,"+ - "depend.pk_job_depend_er,"+ - "depend.pk_job_depend_on "+ - "FROM " + - "depend " + - "WHERE " + - "depend.pk_frame_depend_er = ? " + - "AND " + - "depend.b_active = true " + - "AND " + - "depend.str_type IN (?,?,?) "; - - @Override - public List getWhatThisDependsOn(FrameInterface frame, DependTarget target) { - if (!target.equals(DependTarget.ANY_TARGET)) { - String query = GET_WHAT_THIS_FRAME_DEPENDS_ON + " AND depend.str_target = ?"; - return getJdbcTemplate().query(query, DEPEND_MAPPER, - frame.getFrameId(), DependType.FRAME_ON_JOB.toString(), - DependType.FRAME_ON_LAYER.toString(), DependType.FRAME_ON_FRAME.toString(), - target.toString()); - } - else { - return getJdbcTemplate().query(GET_WHAT_THIS_FRAME_DEPENDS_ON, DEPEND_MAPPER, - frame.getFrameId(), DependType.FRAME_ON_JOB.toString(), - DependType.FRAME_ON_LAYER.toString(), DependType.FRAME_ON_FRAME.toString()); - } + } + + private static final String GET_WHAT_THIS_FRAME_DEPENDS_ON = + "SELECT " + "depend.pk_depend," + "depend.str_type," + "depend.str_target," + "depend.b_any," + + "depend.pk_parent," + "depend.b_active," + "depend.pk_frame_depend_er," + + "depend.pk_frame_depend_on," + "depend.pk_layer_depend_er," + + "depend.pk_layer_depend_on," + "depend.pk_job_depend_er," + "depend.pk_job_depend_on " + + "FROM " + "depend " + "WHERE " + "depend.pk_frame_depend_er = ? " + "AND " + + "depend.b_active = true " + "AND " + "depend.str_type IN (?,?,?) "; + + @Override + public List getWhatThisDependsOn(FrameInterface frame, + DependTarget target) { + if (!target.equals(DependTarget.ANY_TARGET)) { + String query = GET_WHAT_THIS_FRAME_DEPENDS_ON + " AND depend.str_target = ?"; + return getJdbcTemplate().query(query, DEPEND_MAPPER, frame.getFrameId(), + DependType.FRAME_ON_JOB.toString(), DependType.FRAME_ON_LAYER.toString(), + DependType.FRAME_ON_FRAME.toString(), target.toString()); + } else { + return getJdbcTemplate().query(GET_WHAT_THIS_FRAME_DEPENDS_ON, DEPEND_MAPPER, + frame.getFrameId(), DependType.FRAME_ON_JOB.toString(), + DependType.FRAME_ON_LAYER.toString(), DependType.FRAME_ON_FRAME.toString()); } + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatchQuery.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatchQuery.java index 010294323..803a0556f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatchQuery.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatchQuery.java @@ -2,23 +2,22 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.dao.postgres; +// spotless:off public class DispatchQuery { public static final String FIND_JOBS_BY_SHOW = "/* FIND_JOBS_BY_SHOW */ " + @@ -1253,3 +1252,5 @@ private static final String replaceQueryForFifo(String query) { } + +// spotless:on diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatcherDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatcherDaoJdbc.java index c2af24e0f..6962fb3e8 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatcherDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatcherDaoJdbc.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.dao.postgres; import java.sql.Connection; @@ -54,577 +51,518 @@ import com.imageworks.spcue.dao.DispatcherDao; import com.imageworks.spcue.grpc.host.ThreadMode; - /** * Dispatcher DAO * * @category DAO */ public class DispatcherDaoJdbc extends JdbcDaoSupport implements DispatcherDao { - private static final Logger logger = LogManager.getLogger(DispatcherDaoJdbc.class); - private PrometheusMetricsCollector prometheusMetrics; - - public void setPrometheusMetrics(PrometheusMetricsCollector prometheusMetrics) { - this.prometheusMetrics = prometheusMetrics; - } + private static final Logger logger = LogManager.getLogger(DispatcherDaoJdbc.class); + private PrometheusMetricsCollector prometheusMetrics; - public static final RowMapper PKJOB_MAPPER = - new RowMapper() { - public String mapRow(ResultSet rs, int rowNum) throws SQLException { - return rs.getString("pk_job"); - } - }; + public void setPrometheusMetrics(PrometheusMetricsCollector prometheusMetrics) { + this.prometheusMetrics = prometheusMetrics; + } - private static final RowMapper SHOW_MAPPER = new RowMapper() { - public SortableShow mapRow(ResultSet rs, int rowNum) throws SQLException { - return new SortableShow( - rs.getString("pk_show"), - rs.getFloat("float_tier")); - } - }; - - private int threadMode(int mode) { - if (mode == ThreadMode.ALL_VALUE) - return mode; - return ThreadMode.AUTO_VALUE; + public static final RowMapper PKJOB_MAPPER = new RowMapper() { + public String mapRow(ResultSet rs, int rowNum) throws SQLException { + return rs.getString("pk_job"); } + }; - /** - * Number of milliseconds before the show cache expires and - * a new show cache is created. - */ - private static final long SHOW_CACHE_EXPIRE_TIME_SEC = 8000; - - /** - * Wraps a list of SortableShows along with an expiration time. - */ - private class ShowCache { - final private long expireTime = System.currentTimeMillis() + SHOW_CACHE_EXPIRE_TIME_SEC; - final private List shows; - - public ShowCache(List shows) { - this.shows = shows; - Collections.sort(this.shows); - } - - public boolean isExpired() { - return System.currentTimeMillis() > expireTime; - } - - public List getShows() { - return shows; - } + private static final RowMapper SHOW_MAPPER = new RowMapper() { + public SortableShow mapRow(ResultSet rs, int rowNum) throws SQLException { + return new SortableShow(rs.getString("pk_show"), rs.getFloat("float_tier")); } - - /** - * A cache of SortableShows keyed on host tags. - */ - private final ConcurrentHashMap bookableShows = - new ConcurrentHashMap(); - - public boolean testMode = false; - - /** - * Choose between different scheduling strategies - */ - private SchedulingMode schedulingMode; - - @Autowired - public DispatcherDaoJdbc(Environment env) { - this.schedulingMode = SchedulingMode.valueOf(env.getProperty( - "dispatcher.scheduling_mode", String.class, "PRIORITY_ONLY")); + }; + + private int threadMode(int mode) { + if (mode == ThreadMode.ALL_VALUE) + return mode; + return ThreadMode.AUTO_VALUE; + } + + /** + * Number of milliseconds before the show cache expires and a new show cache is created. + */ + private static final long SHOW_CACHE_EXPIRE_TIME_SEC = 8000; + + /** + * Wraps a list of SortableShows along with an expiration time. + */ + private class ShowCache { + final private long expireTime = System.currentTimeMillis() + SHOW_CACHE_EXPIRE_TIME_SEC; + final private List shows; + + public ShowCache(List shows) { + this.shows = shows; + Collections.sort(this.shows); } - @Override - public SchedulingMode getSchedulingMode() { - return schedulingMode; + public boolean isExpired() { + return System.currentTimeMillis() > expireTime; } - @Override - public void setSchedulingMode(SchedulingMode schedulingMode) { - this.schedulingMode = schedulingMode; + public List getShows() { + return shows; } - - /** - * Returns a sorted list of shows that have pending jobs - * which could benefit from the specified allocation. - * - * @param alloc - * @return a sorted list of shows. - */ - private List getBookableShows(AllocationInterface alloc) { - long startTime = System.currentTimeMillis(); - String key = alloc.getAllocationId(); - - ShowCache cached = bookableShows.get(key); - if (cached == null) { - bookableShows.put(key, new ShowCache(getJdbcTemplate().query( - FIND_SHOWS, - SHOW_MAPPER, alloc.getAllocationId()))); - } - else if (cached.isExpired()) { - bookableShows.put(key, new ShowCache(getJdbcTemplate().query( - FIND_SHOWS, - SHOW_MAPPER, alloc.getAllocationId()))); - } - prometheusMetrics.setBookingDurationMetric("getBookableShows", - System.currentTimeMillis() - startTime); - - return bookableShows.get(key).shows; + } + + /** + * A cache of SortableShows keyed on host tags. + */ + private final ConcurrentHashMap bookableShows = + new ConcurrentHashMap(); + + public boolean testMode = false; + + /** + * Choose between different scheduling strategies + */ + private SchedulingMode schedulingMode; + + @Autowired + public DispatcherDaoJdbc(Environment env) { + this.schedulingMode = SchedulingMode + .valueOf(env.getProperty("dispatcher.scheduling_mode", String.class, "PRIORITY_ONLY")); + } + + @Override + public SchedulingMode getSchedulingMode() { + return schedulingMode; + } + + @Override + public void setSchedulingMode(SchedulingMode schedulingMode) { + this.schedulingMode = schedulingMode; + } + + /** + * Returns a sorted list of shows that have pending jobs which could benefit from the specified + * allocation. + * + * @param alloc + * @return a sorted list of shows. + */ + private List getBookableShows(AllocationInterface alloc) { + long startTime = System.currentTimeMillis(); + String key = alloc.getAllocationId(); + + ShowCache cached = bookableShows.get(key); + if (cached == null) { + bookableShows.put(key, + new ShowCache(getJdbcTemplate().query(FIND_SHOWS, SHOW_MAPPER, alloc.getAllocationId()))); + } else if (cached.isExpired()) { + bookableShows.put(key, + new ShowCache(getJdbcTemplate().query(FIND_SHOWS, SHOW_MAPPER, alloc.getAllocationId()))); } - - private String handleInClause(String key, String query, int inValueLength) { - String placeholders = String.join(",", Collections.nCopies(inValueLength, "?")); - return query.replace(key + " IN ?", key + " IN (" + placeholders + ")"); + prometheusMetrics.setBookingDurationMetric("getBookableShows", + System.currentTimeMillis() - startTime); + + return bookableShows.get(key).shows; + } + + private String handleInClause(String key, String query, int inValueLength) { + String placeholders = String.join(",", Collections.nCopies(inValueLength, "?")); + return query.replace(key + " IN ?", key + " IN (" + placeholders + ")"); + } + + private Set findDispatchJobs(DispatchHost host, int numJobs, boolean shuffleShows) { + LinkedHashSet result = new LinkedHashSet(); + List shows = new LinkedList(getBookableShows(host)); + // shows were sorted. If we want it in random sequence, we need to shuffle it. + if (shuffleShows) { + if (!shows.isEmpty()) + shows.remove(0); + Collections.shuffle(shows); } - private Set findDispatchJobs(DispatchHost host, int numJobs, boolean shuffleShows) { - LinkedHashSet result = new LinkedHashSet(); - List shows = new LinkedList(getBookableShows(host)); - // shows were sorted. If we want it in random sequence, we need to shuffle it. - if (shuffleShows) { - if (!shows.isEmpty()) - shows.remove(0); - Collections.shuffle(shows); - } - - long loopTime = System.currentTimeMillis(); - for (SortableShow s: shows) { - long lastTime = System.currentTimeMillis(); - if (s.isSkipped(host.tags, (long) host.cores, host.memory)) { - logger.info("skipping show " + s.getShowId()); - continue; - } - - if (s.isSkipped(host)) { - logger.info("skipping show " + s.getShowId() + ", over its subscription."); - continue; - } - - /** - * Check if the show is over its subscription because we're using - * cached SortableShows, we don't pull a fresh list of shows for - * a while. If the show is over its subscription the alloc - * gets add to the SortableShow skipped alloc set. - */ - if (getJdbcTemplate().queryForObject( - "SELECT int_burst - int_cores FROM subscription WHERE pk_show=? AND pk_alloc=?", - Integer.class, s.getShowId(), host.getAllocationId()) < 100) { - s.skip(host); - - prometheusMetrics.setBookingDurationMetric("findDispatchJobs check overburst", - System.currentTimeMillis() - lastTime); - continue; - } - - if (host.idleGpus == 0 && (schedulingMode == SchedulingMode.BALANCED)) { - result.addAll(getJdbcTemplate().query(new PreparedStatementCreator() { - @Override - public PreparedStatement createPreparedStatement(Connection conn) - throws SQLException { - String query = handleInClause("str_os", FIND_JOBS_BY_SHOW_NO_GPU, host.getOs().length); - PreparedStatement find_jobs_stmt = conn.prepareStatement(query); - - int index = 1; - find_jobs_stmt.setString(index++, s.getShowId()); - find_jobs_stmt.setString(index++, host.getFacilityId()); - for (String item : host.getOs()) { - find_jobs_stmt.setString(index++, item); - } - find_jobs_stmt.setInt(index++, host.idleCores); - find_jobs_stmt.setLong(index++, host.idleMemory); - find_jobs_stmt.setInt(index++, threadMode(host.threadMode)); - find_jobs_stmt.setString(index++, host.getName()); - find_jobs_stmt.setInt(index++, numJobs * 10); - return find_jobs_stmt; - }}, PKJOB_MAPPER - )); - prometheusMetrics.setBookingDurationMetric("findDispatchJobs nogpu findByShowQuery", - System.currentTimeMillis() - lastTime); - } - else { - result.addAll(getJdbcTemplate().query(new PreparedStatementCreator() { - @Override - public PreparedStatement createPreparedStatement(Connection conn) - throws SQLException { - String query = handleInClause("str_os", findByShowQuery(), host.getOs().length); - PreparedStatement find_jobs_stmt = conn.prepareStatement(query); - int index = 1; - find_jobs_stmt.setString(index++, s.getShowId()); - find_jobs_stmt.setString(index++, host.getFacilityId()); - for (String item : host.getOs()) { - find_jobs_stmt.setString(index++, item); - } - find_jobs_stmt.setInt(index++, host.idleCores); - find_jobs_stmt.setLong(index++, host.idleMemory); - find_jobs_stmt.setInt(index++, threadMode(host.threadMode)); - find_jobs_stmt.setInt(index++, host.idleGpus); - find_jobs_stmt.setLong(index++, (host.idleGpuMemory > 0) ? 1 : 0); - find_jobs_stmt.setLong(index++, host.idleGpuMemory); - find_jobs_stmt.setString(index++, host.getName()); - find_jobs_stmt.setInt(index++, numJobs * 10); - return find_jobs_stmt; - }}, PKJOB_MAPPER - )); - prometheusMetrics.setBookingDurationMetric("findDispatchJobs findByShowQuery", - System.currentTimeMillis() - lastTime); - } + long loopTime = System.currentTimeMillis(); + for (SortableShow s : shows) { + long lastTime = System.currentTimeMillis(); + if (s.isSkipped(host.tags, (long) host.cores, host.memory)) { + logger.info("skipping show " + s.getShowId()); + continue; + } + + if (s.isSkipped(host)) { + logger.info("skipping show " + s.getShowId() + ", over its subscription."); + continue; + } + + /** + * Check if the show is over its subscription because we're using cached SortableShows, we + * don't pull a fresh list of shows for a while. If the show is over its subscription the + * alloc gets add to the SortableShow skipped alloc set. + */ + if (getJdbcTemplate().queryForObject( + "SELECT int_burst - int_cores FROM subscription WHERE pk_show=? AND pk_alloc=?", + Integer.class, s.getShowId(), host.getAllocationId()) < 100) { + s.skip(host); + + prometheusMetrics.setBookingDurationMetric("findDispatchJobs check overburst", + System.currentTimeMillis() - lastTime); + continue; + } + + if (host.idleGpus == 0 && (schedulingMode == SchedulingMode.BALANCED)) { + result.addAll(getJdbcTemplate().query(new PreparedStatementCreator() { + @Override + public PreparedStatement createPreparedStatement(Connection conn) throws SQLException { + String query = handleInClause("str_os", FIND_JOBS_BY_SHOW_NO_GPU, host.getOs().length); + PreparedStatement find_jobs_stmt = conn.prepareStatement(query); - // Collect metrics - prometheusMetrics.incrementFindJobsByShowQueryCountMetric(); - if (result.size() < 1) { - if (host.gpuMemory == 0) { - s.skip(host.tags, host.idleCores, host.idleMemory); - } + int index = 1; + find_jobs_stmt.setString(index++, s.getShowId()); + find_jobs_stmt.setString(index++, host.getFacilityId()); + for (String item : host.getOs()) { + find_jobs_stmt.setString(index++, item); } - else { - return result; + find_jobs_stmt.setInt(index++, host.idleCores); + find_jobs_stmt.setLong(index++, host.idleMemory); + find_jobs_stmt.setInt(index++, threadMode(host.threadMode)); + find_jobs_stmt.setString(index++, host.getName()); + find_jobs_stmt.setInt(index++, numJobs * 10); + return find_jobs_stmt; + } + }, PKJOB_MAPPER)); + prometheusMetrics.setBookingDurationMetric("findDispatchJobs nogpu findByShowQuery", + System.currentTimeMillis() - lastTime); + } else { + result.addAll(getJdbcTemplate().query(new PreparedStatementCreator() { + @Override + public PreparedStatement createPreparedStatement(Connection conn) throws SQLException { + String query = handleInClause("str_os", findByShowQuery(), host.getOs().length); + PreparedStatement find_jobs_stmt = conn.prepareStatement(query); + int index = 1; + find_jobs_stmt.setString(index++, s.getShowId()); + find_jobs_stmt.setString(index++, host.getFacilityId()); + for (String item : host.getOs()) { + find_jobs_stmt.setString(index++, item); } + find_jobs_stmt.setInt(index++, host.idleCores); + find_jobs_stmt.setLong(index++, host.idleMemory); + find_jobs_stmt.setInt(index++, threadMode(host.threadMode)); + find_jobs_stmt.setInt(index++, host.idleGpus); + find_jobs_stmt.setLong(index++, (host.idleGpuMemory > 0) ? 1 : 0); + find_jobs_stmt.setLong(index++, host.idleGpuMemory); + find_jobs_stmt.setString(index++, host.getName()); + find_jobs_stmt.setInt(index++, numJobs * 10); + return find_jobs_stmt; + } + }, PKJOB_MAPPER)); + prometheusMetrics.setBookingDurationMetric("findDispatchJobs findByShowQuery", + System.currentTimeMillis() - lastTime); + } + + // Collect metrics + prometheusMetrics.incrementFindJobsByShowQueryCountMetric(); + if (result.size() < 1) { + if (host.gpuMemory == 0) { + s.skip(host.tags, host.idleCores, host.idleMemory); } - prometheusMetrics.setBookingDurationMetric("findDispatchJobs show loop", - System.currentTimeMillis() - loopTime); + } else { return result; - + } } - - private String findByShowQuery() { - switch (schedulingMode) { - case PRIORITY_ONLY: return FIND_JOBS_BY_SHOW_PRIORITY_MODE; - case FIFO: return FIND_JOBS_BY_SHOW_FIFO_MODE; - case BALANCED: return FIND_JOBS_BY_SHOW; - default: return FIND_JOBS_BY_SHOW_PRIORITY_MODE; - } + prometheusMetrics.setBookingDurationMetric("findDispatchJobs show loop", + System.currentTimeMillis() - loopTime); + return result; + + } + + private String findByShowQuery() { + switch (schedulingMode) { + case PRIORITY_ONLY: + return FIND_JOBS_BY_SHOW_PRIORITY_MODE; + case FIFO: + return FIND_JOBS_BY_SHOW_FIFO_MODE; + case BALANCED: + return FIND_JOBS_BY_SHOW; + default: + return FIND_JOBS_BY_SHOW_PRIORITY_MODE; } - - private String findByGroupQuery() { - switch (schedulingMode) { - case PRIORITY_ONLY: return FIND_JOBS_BY_GROUP_PRIORITY_MODE; - case FIFO: return FIND_JOBS_BY_GROUP_FIFO_MODE; - case BALANCED: return FIND_JOBS_BY_GROUP_BALANCED_MODE; - default: return FIND_JOBS_BY_GROUP_PRIORITY_MODE; - } + } + + private String findByGroupQuery() { + switch (schedulingMode) { + case PRIORITY_ONLY: + return FIND_JOBS_BY_GROUP_PRIORITY_MODE; + case FIFO: + return FIND_JOBS_BY_GROUP_FIFO_MODE; + case BALANCED: + return FIND_JOBS_BY_GROUP_BALANCED_MODE; + default: + return FIND_JOBS_BY_GROUP_PRIORITY_MODE; } - - @Override - public Set findDispatchJobsForAllShows(DispatchHost host, int numJobs) { - return findDispatchJobs(host, numJobs, true); + } + + @Override + public Set findDispatchJobsForAllShows(DispatchHost host, int numJobs) { + return findDispatchJobs(host, numJobs, true); + } + + @Override + public Set findDispatchJobs(DispatchHost host, int numJobs) { + return findDispatchJobs(host, numJobs, false); + } + + @Override + public Set findDispatchJobs(DispatchHost host, GroupInterface g) { + LinkedHashSet result = new LinkedHashSet(5); + long lastTime = System.currentTimeMillis(); + + if (host.idleGpus == 0 && (schedulingMode == SchedulingMode.BALANCED)) { + String query = handleInClause("str_os", FIND_JOBS_BY_GROUP_NO_GPU, host.getOs().length); + ArrayList args = new ArrayList(); + + args.add(g.getGroupId()); + args.add(host.getFacilityId()); + for (String item : host.getOs()) { + args.add(item); + } + args.add(host.idleCores); + args.add(host.idleMemory); + args.add(threadMode(host.threadMode)); + args.add(host.getName()); + args.add(50); + result.addAll(getJdbcTemplate().query(query, PKJOB_MAPPER, args.toArray())); + prometheusMetrics.setBookingDurationMetric("findDispatchJobs by group nogpu query", + System.currentTimeMillis() - lastTime); + } else { + String query = handleInClause("str_os", findByGroupQuery(), host.getOs().length); + ArrayList args = new ArrayList(); + + args.add(g.getGroupId()); + args.add(host.getFacilityId()); + for (String item : host.getOs()) { + args.add(item); + } + args.add(host.idleCores); + args.add(host.idleMemory); + args.add(threadMode(host.threadMode)); + args.add(host.idleGpus); + args.add(host.idleGpuMemory > 0 ? 1 : 0); + args.add(host.idleGpuMemory); + args.add(host.getName()); + args.add(50); + result.addAll(getJdbcTemplate().query(query, PKJOB_MAPPER, args.toArray())); + prometheusMetrics.setBookingDurationMetric("findDispatchJobs by group query", + System.currentTimeMillis() - lastTime); } - - @Override - public Set findDispatchJobs(DispatchHost host, int numJobs) { - return findDispatchJobs(host, numJobs, false); + return result; + } + + @Override + public List findNextDispatchFrames(JobInterface job, VirtualProc proc, int limit) { + long lastTime = System.currentTimeMillis(); + List frames; + if (proc.isLocalDispatch) { + frames = getJdbcTemplate().query(FIND_LOCAL_DISPATCH_FRAME_BY_JOB_AND_PROC, + FrameDaoJdbc.DISPATCH_FRAME_MAPPER, proc.memoryReserved, proc.gpuMemoryReserved, + job.getJobId(), limit); + } else { + frames = getJdbcTemplate().query(FIND_DISPATCH_FRAME_BY_JOB_AND_PROC, + FrameDaoJdbc.DISPATCH_FRAME_MAPPER, proc.coresReserved, proc.memoryReserved, + proc.gpusReserved, (proc.gpuMemoryReserved > 0) ? 1 : 0, proc.gpuMemoryReserved, + job.getJobId(), proc.hostName, job.getJobId(), limit); } - @Override - public Set findDispatchJobs(DispatchHost host, GroupInterface g) { - LinkedHashSet result = new LinkedHashSet(5); - long lastTime = System.currentTimeMillis(); - - if (host.idleGpus == 0 && (schedulingMode == SchedulingMode.BALANCED)) { - String query = handleInClause("str_os", FIND_JOBS_BY_GROUP_NO_GPU, host.getOs().length); - ArrayList args = new ArrayList(); - - args.add(g.getGroupId()); - args.add(host.getFacilityId()); - for (String item : host.getOs()) { - args.add(item); - } - args.add(host.idleCores); - args.add(host.idleMemory); - args.add(threadMode(host.threadMode)); - args.add(host.getName()); - args.add(50); - result.addAll(getJdbcTemplate().query( - query, - PKJOB_MAPPER, args.toArray())); - prometheusMetrics.setBookingDurationMetric("findDispatchJobs by group nogpu query", - System.currentTimeMillis() - lastTime); - } - else { - String query = handleInClause("str_os", findByGroupQuery(), host.getOs().length); - ArrayList args = new ArrayList(); + prometheusMetrics.setBookingDurationMetric("findNextDispatchFrames by job and proc query", + System.currentTimeMillis() - lastTime); - args.add(g.getGroupId()); - args.add(host.getFacilityId()); - for (String item : host.getOs()) { - args.add(item); - } - args.add(host.idleCores); - args.add(host.idleMemory); - args.add(threadMode(host.threadMode)); - args.add(host.idleGpus); - args.add(host.idleGpuMemory > 0 ? 1 : 0); - args.add(host.idleGpuMemory); - args.add(host.getName()); - args.add(50); - result.addAll(getJdbcTemplate().query( - query, - PKJOB_MAPPER, args.toArray())); - prometheusMetrics.setBookingDurationMetric("findDispatchJobs by group query", - System.currentTimeMillis() - lastTime); - } - return result; - } + return frames; + } - @Override - public List findNextDispatchFrames(JobInterface job, - VirtualProc proc, int limit) { - long lastTime = System.currentTimeMillis(); - List frames; - if (proc.isLocalDispatch) { - frames = getJdbcTemplate().query( - FIND_LOCAL_DISPATCH_FRAME_BY_JOB_AND_PROC, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, - proc.memoryReserved, - proc.gpuMemoryReserved, - job.getJobId(), - limit); - } - else { - frames = getJdbcTemplate().query( - FIND_DISPATCH_FRAME_BY_JOB_AND_PROC, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, - proc.coresReserved, - proc.memoryReserved, - proc.gpusReserved, - (proc.gpuMemoryReserved > 0) ? 1 : 0, proc.gpuMemoryReserved, - job.getJobId(), proc.hostName, - job.getJobId(), limit); - } + @Override + public List findNextDispatchFrames(JobInterface job, DispatchHost host, + int limit) { + long lastTime = System.currentTimeMillis(); + List frames; - prometheusMetrics.setBookingDurationMetric("findNextDispatchFrames by job and proc query", - System.currentTimeMillis() - lastTime); + if (host.isLocalDispatch) { + frames = getJdbcTemplate().query(FIND_LOCAL_DISPATCH_FRAME_BY_JOB_AND_HOST, + FrameDaoJdbc.DISPATCH_FRAME_MAPPER, host.idleMemory, host.idleGpuMemory, job.getJobId(), + limit); - return frames; + } else { + frames = getJdbcTemplate().query(FIND_DISPATCH_FRAME_BY_JOB_AND_HOST, + FrameDaoJdbc.DISPATCH_FRAME_MAPPER, host.idleCores, host.idleMemory, + threadMode(host.threadMode), host.idleGpus, (host.idleGpuMemory > 0) ? 1 : 0, + host.idleGpuMemory, job.getJobId(), host.getName(), job.getJobId(), limit); } - - @Override - public List findNextDispatchFrames(JobInterface job, - DispatchHost host, int limit) { - long lastTime = System.currentTimeMillis(); - List frames; - - if (host.isLocalDispatch) { - frames = getJdbcTemplate().query( - FIND_LOCAL_DISPATCH_FRAME_BY_JOB_AND_HOST, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, - host.idleMemory, host.idleGpuMemory, job.getJobId(), - limit); - - } else { - frames = getJdbcTemplate().query( - FIND_DISPATCH_FRAME_BY_JOB_AND_HOST, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, - host.idleCores, host.idleMemory, - threadMode(host.threadMode), - host.idleGpus, - (host.idleGpuMemory > 0) ? 1 : 0, host.idleGpuMemory, - job.getJobId(), host.getName(), - job.getJobId(), limit); - } - prometheusMetrics.setBookingDurationMetric("findNextDispatchFrames by job and host query", - System.currentTimeMillis() - lastTime); - - return frames; + prometheusMetrics.setBookingDurationMetric("findNextDispatchFrames by job and host query", + System.currentTimeMillis() - lastTime); + + return frames; + } + + @Override + public List findNextDispatchFrames(LayerInterface layer, VirtualProc proc, + int limit) { + long lastTime = System.currentTimeMillis(); + List frames; + + if (proc.isLocalDispatch) { + frames = getJdbcTemplate().query(FIND_LOCAL_DISPATCH_FRAME_BY_LAYER_AND_PROC, + FrameDaoJdbc.DISPATCH_FRAME_MAPPER, proc.memoryReserved, proc.gpuMemoryReserved, + layer.getLayerId(), limit); + } else { + frames = getJdbcTemplate().query(FIND_DISPATCH_FRAME_BY_LAYER_AND_PROC, + FrameDaoJdbc.DISPATCH_FRAME_MAPPER, proc.coresReserved, proc.memoryReserved, + proc.gpusReserved, proc.gpuMemoryReserved, layer.getLayerId(), layer.getLayerId(), + proc.hostName, limit); } + prometheusMetrics.setBookingDurationMetric("findNextDispatchFrames by layer and proc query", + System.currentTimeMillis() - lastTime); - @Override - public List findNextDispatchFrames(LayerInterface layer, - VirtualProc proc, int limit) { - long lastTime = System.currentTimeMillis(); - List frames; - - if (proc.isLocalDispatch) { - frames = getJdbcTemplate().query( - FIND_LOCAL_DISPATCH_FRAME_BY_LAYER_AND_PROC, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, - proc.memoryReserved, proc.gpuMemoryReserved, - layer.getLayerId(), - limit); - } - else { - frames = getJdbcTemplate().query( - FIND_DISPATCH_FRAME_BY_LAYER_AND_PROC, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, - proc.coresReserved, proc.memoryReserved, - proc.gpusReserved, proc.gpuMemoryReserved, - layer.getLayerId(), layer.getLayerId(), - proc.hostName, limit); - } + return frames; + } + @Override + public List findNextDispatchFrames(LayerInterface layer, DispatchHost host, + int limit) { + long lastTime = System.currentTimeMillis(); + List frames; - prometheusMetrics.setBookingDurationMetric("findNextDispatchFrames by layer and proc query", - System.currentTimeMillis() - lastTime); + if (host.isLocalDispatch) { + frames = getJdbcTemplate().query(FIND_LOCAL_DISPATCH_FRAME_BY_LAYER_AND_HOST, + FrameDaoJdbc.DISPATCH_FRAME_MAPPER, host.idleMemory, host.idleGpuMemory, + layer.getLayerId(), limit); - return frames; + } else { + frames = getJdbcTemplate().query(FIND_DISPATCH_FRAME_BY_LAYER_AND_HOST, + FrameDaoJdbc.DISPATCH_FRAME_MAPPER, host.idleCores, host.idleMemory, + threadMode(host.threadMode), host.idleGpus, host.idleGpuMemory, layer.getLayerId(), + layer.getLayerId(), host.getName(), limit); } - @Override - public List findNextDispatchFrames(LayerInterface layer, - DispatchHost host, int limit) { - long lastTime = System.currentTimeMillis(); - List frames; - - if (host.isLocalDispatch) { - frames = getJdbcTemplate().query( - FIND_LOCAL_DISPATCH_FRAME_BY_LAYER_AND_HOST, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, - host.idleMemory, host.idleGpuMemory, layer.getLayerId(), - limit); - - } else { - frames = getJdbcTemplate().query( - FIND_DISPATCH_FRAME_BY_LAYER_AND_HOST, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, - host.idleCores, host.idleMemory, - threadMode(host.threadMode), - host.idleGpus, host.idleGpuMemory, layer.getLayerId(), layer.getLayerId(), - host.getName(), limit); - } - - prometheusMetrics.setBookingDurationMetric("findNextDispatchFrames by layer and host query", - System.currentTimeMillis() - lastTime); - - return frames; - } - - - @Override - public DispatchFrame findNextDispatchFrame(JobInterface job, VirtualProc proc) { - return findNextDispatchFrames(job, proc, 1).get(0); + prometheusMetrics.setBookingDurationMetric("findNextDispatchFrames by layer and host query", + System.currentTimeMillis() - lastTime); + + return frames; + } + + @Override + public DispatchFrame findNextDispatchFrame(JobInterface job, VirtualProc proc) { + return findNextDispatchFrames(job, proc, 1).get(0); + } + + @Override + public DispatchFrame findNextDispatchFrame(JobInterface job, DispatchHost host) { + return findNextDispatchFrames(job, host, 1).get(0); + } + + @Override + public boolean findUnderProcedJob(JobInterface excludeJob, VirtualProc proc) { + long start = System.currentTimeMillis(); + try { + return getJdbcTemplate().queryForObject(FIND_UNDER_PROCED_JOB_BY_FACILITY, Integer.class, + excludeJob.getShowId(), proc.getFacilityId(), proc.os, excludeJob.getShowId(), + proc.getFacilityId(), proc.os, proc.coresReserved, proc.memoryReserved, proc.gpusReserved, + proc.gpuMemoryReserved, proc.hostName) > 0; + } catch (org.springframework.dao.EmptyResultDataAccessException e) { + return false; + } finally { + prometheusMetrics.setBookingDurationMetric("findUnderProcedJob query", + System.currentTimeMillis() - start); } - - @Override - public DispatchFrame findNextDispatchFrame(JobInterface job, DispatchHost host) { - return findNextDispatchFrames(job, host, 1).get(0); + } + + @Override + public boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc) { + long start = System.currentTimeMillis(); + try { + return getJdbcTemplate().queryForObject(HIGHER_PRIORITY_JOB_BY_FACILITY_EXISTS, Boolean.class, + baseJob.priority, proc.getFacilityId(), proc.os, proc.getFacilityId(), proc.os, + proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, + proc.hostName); + } catch (org.springframework.dao.EmptyResultDataAccessException e) { + return false; + } finally { + prometheusMetrics.setBookingDurationMetric("higherPriorityJobExists query", + System.currentTimeMillis() - start); } - - @Override - public boolean findUnderProcedJob(JobInterface excludeJob, VirtualProc proc) { - long start = System.currentTimeMillis(); - try { - return getJdbcTemplate().queryForObject( - FIND_UNDER_PROCED_JOB_BY_FACILITY, - Integer.class, excludeJob.getShowId(), proc.getFacilityId(), - proc.os, excludeJob.getShowId(), - proc.getFacilityId(), proc.os, - proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, - proc.hostName) > 0; - } catch (org.springframework.dao.EmptyResultDataAccessException e) { - return false; - } - finally { - prometheusMetrics.setBookingDurationMetric("findUnderProcedJob query", - System.currentTimeMillis() - start); - } + } + + @Override + public Set findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs) { + LinkedHashSet result = new LinkedHashSet(numJobs); + long start = System.currentTimeMillis(); + if (host.idleGpus == 0 && (schedulingMode == SchedulingMode.BALANCED)) { + String query = handleInClause("str_os", FIND_JOBS_BY_SHOW_NO_GPU, host.getOs().length); + ArrayList args = new ArrayList(); + args.add(show.getShowId()); + args.add(host.getFacilityId()); + for (String item : host.getOs()) { + args.add(item); + } + args.add(host.idleCores); + args.add(host.idleMemory); + args.add(threadMode(host.threadMode)); + args.add(host.getName()); + args.add(numJobs * 10); + + result.addAll(getJdbcTemplate().query(query, PKJOB_MAPPER, args.toArray())); + + prometheusMetrics.setBookingDurationMetric("findDispatchJobs by show nogpu query", + System.currentTimeMillis() - start); + } else { + String query = handleInClause("str_os", findByShowQuery(), host.getOs().length); + ArrayList args = new ArrayList(); + args.add(show.getShowId()); + args.add(host.getFacilityId()); + for (String item : host.getOs()) { + args.add(item); + } + args.add(host.idleCores); + args.add(host.idleMemory); + args.add(threadMode(host.threadMode)); + args.add(host.idleGpus); + args.add(host.idleGpuMemory > 0 ? 1 : 0); + args.add(host.idleGpuMemory); + args.add(host.getName()); + args.add(numJobs * 10); + + result.addAll(getJdbcTemplate().query(query, PKJOB_MAPPER, args.toArray())); + + prometheusMetrics.setBookingDurationMetric("findDispatchJobs by show query", + System.currentTimeMillis() - start); } - @Override - public boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc) { - long start = System.currentTimeMillis(); - try { - return getJdbcTemplate().queryForObject( - HIGHER_PRIORITY_JOB_BY_FACILITY_EXISTS, - Boolean.class, baseJob.priority, proc.getFacilityId(), - proc.os, proc.getFacilityId(), proc.os, - proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, - proc.hostName); - } catch (org.springframework.dao.EmptyResultDataAccessException e) { - return false; - } - finally { - prometheusMetrics.setBookingDurationMetric("higherPriorityJobExists query", - System.currentTimeMillis() - start); - } + // Collect metrics + prometheusMetrics.incrementFindJobsByShowQueryCountMetric(); + return result; + } + + @Override + public Set findLocalDispatchJobs(DispatchHost host) { + LinkedHashSet result = new LinkedHashSet(5); + long start = System.currentTimeMillis(); + + String query = handleInClause("str_os", FIND_JOBS_BY_LOCAL, host.getOs().length); + ArrayList args = new ArrayList(); + args.add(host.getHostId()); + args.add(host.getFacilityId()); + for (String item : host.getOs()) { + args.add(item); } - - @Override - public Set findDispatchJobs(DispatchHost host, - ShowInterface show, int numJobs) { - LinkedHashSet result = new LinkedHashSet(numJobs); - long start = System.currentTimeMillis(); - if (host.idleGpus == 0 && (schedulingMode == SchedulingMode.BALANCED)) { - String query = handleInClause("str_os", FIND_JOBS_BY_SHOW_NO_GPU, host.getOs().length); - ArrayList args = new ArrayList(); - args.add(show.getShowId()); - args.add(host.getFacilityId()); - for (String item : host.getOs()) { - args.add(item); - } - args.add(host.idleCores); - args.add(host.idleMemory); - args.add(threadMode(host.threadMode)); - args.add(host.getName()); - args.add(numJobs * 10); - - result.addAll(getJdbcTemplate().query( - query, - PKJOB_MAPPER, args.toArray())); - - prometheusMetrics.setBookingDurationMetric("findDispatchJobs by show nogpu query", - System.currentTimeMillis() - start); - } - else { - String query = handleInClause("str_os", findByShowQuery(), host.getOs().length); - ArrayList args = new ArrayList(); - args.add(show.getShowId()); - args.add(host.getFacilityId()); - for (String item : host.getOs()) { - args.add(item); - } - args.add(host.idleCores); - args.add(host.idleMemory); - args.add(threadMode(host.threadMode)); - args.add(host.idleGpus); - args.add(host.idleGpuMemory > 0 ? 1 : 0); - args.add(host.idleGpuMemory); - args.add(host.getName()); - args.add(numJobs * 10); - - result.addAll(getJdbcTemplate().query( - query, - PKJOB_MAPPER, args.toArray())); - - prometheusMetrics.setBookingDurationMetric("findDispatchJobs by show query", - System.currentTimeMillis() - start); - } - - // Collect metrics - prometheusMetrics.incrementFindJobsByShowQueryCountMetric(); - return result; + args.add(host.getHostId()); + args.add(host.getFacilityId()); + for (String item : host.getOs()) { + args.add(item); } - @Override - public Set findLocalDispatchJobs(DispatchHost host) { - LinkedHashSet result = new LinkedHashSet(5); - long start = System.currentTimeMillis(); - - String query = handleInClause("str_os", FIND_JOBS_BY_LOCAL, host.getOs().length); - ArrayList args = new ArrayList(); - args.add(host.getHostId()); - args.add(host.getFacilityId()); - for (String item : host.getOs()) { - args.add(item); - } - args.add(host.getHostId()); - args.add(host.getFacilityId()); - for (String item : host.getOs()) { - args.add(item); - } + result.addAll(getJdbcTemplate().query(query, PKJOB_MAPPER, args.toArray())); - result.addAll(getJdbcTemplate().query( - query, - PKJOB_MAPPER, args.toArray())); + prometheusMetrics.setBookingDurationMetric("findLocalDispatchJobs query", + System.currentTimeMillis() - start); + return result; + } - prometheusMetrics.setBookingDurationMetric("findLocalDispatchJobs query", - System.currentTimeMillis() - start); - return result; - } - - @Override - public void clearCache() { - bookableShows.clear(); - } + @Override + public void clearCache() { + bookableShows.clear(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FacilityDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FacilityDaoJdbc.java index 863b04225..3114798ef 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FacilityDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FacilityDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -32,57 +28,52 @@ public class FacilityDaoJdbc extends JdbcDaoSupport implements FacilityDao { - public static final RowMapper FACILITY_MAPPER = new RowMapper() { + public static final RowMapper FACILITY_MAPPER = + new RowMapper() { public FacilityInterface mapRow(ResultSet rs, int rowNum) throws SQLException { - FacilityEntity facility = new FacilityEntity(); - facility.id = rs.getString("pk_facility"); - facility.name = rs.getString("str_name"); - return facility; + FacilityEntity facility = new FacilityEntity(); + facility.id = rs.getString("pk_facility"); + facility.name = rs.getString("str_name"); + return facility; } - }; - - public FacilityInterface getDefaultFacility() { - return getJdbcTemplate().queryForObject( - "SELECT pk_facility,str_name FROM facility WHERE b_default=true LIMIT 1", - FACILITY_MAPPER); - } - - public FacilityInterface getFacility(String id) { - return getJdbcTemplate().queryForObject( - "SELECT pk_facility, str_name FROM facility WHERE pk_facility=? " + - "OR str_name=?", FACILITY_MAPPER, id, id); - } - - public boolean facilityExists(String name) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM facility WHERE str_name=?", - Integer.class, name) > 0; - - } - - public FacilityInterface insertFacility(FacilityEntity facility) { - facility.id = SqlUtil.genKeyRandom(); - - getJdbcTemplate().update( - "INSERT INTO facility (pk_facility, str_name) VALUES (?,?)", - facility.getId(), facility.getName()); - - return facility; - } - - @Override - public int deleteFacility(FacilityInterface facility) { - return getJdbcTemplate().update( - "DELETE FROM facility WHERE pk_facility = ?", - facility.getFacilityId()); - } - - @Override - public int updateFacilityName(FacilityInterface facility, String name) { - return getJdbcTemplate().update( - "UPDATE facility SET str_name=? WHERE pk_facility = ?", - name, facility.getFacilityId()); - } + }; -} + public FacilityInterface getDefaultFacility() { + return getJdbcTemplate().queryForObject( + "SELECT pk_facility,str_name FROM facility WHERE b_default=true LIMIT 1", FACILITY_MAPPER); + } + + public FacilityInterface getFacility(String id) { + return getJdbcTemplate().queryForObject( + "SELECT pk_facility, str_name FROM facility WHERE pk_facility=? " + "OR str_name=?", + FACILITY_MAPPER, id, id); + } + + public boolean facilityExists(String name) { + return getJdbcTemplate().queryForObject("SELECT COUNT(1) FROM facility WHERE str_name=?", + Integer.class, name) > 0; + + } + public FacilityInterface insertFacility(FacilityEntity facility) { + facility.id = SqlUtil.genKeyRandom(); + + getJdbcTemplate().update("INSERT INTO facility (pk_facility, str_name) VALUES (?,?)", + facility.getId(), facility.getName()); + + return facility; + } + + @Override + public int deleteFacility(FacilityInterface facility) { + return getJdbcTemplate().update("DELETE FROM facility WHERE pk_facility = ?", + facility.getFacilityId()); + } + + @Override + public int updateFacilityName(FacilityInterface facility, String name) { + return getJdbcTemplate().update("UPDATE facility SET str_name=? WHERE pk_facility = ?", name, + facility.getFacilityId()); + } + +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FilterDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FilterDaoJdbc.java index ee30d5f8e..5e4043227 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FilterDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FilterDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.CallableStatement; @@ -39,161 +35,118 @@ import com.imageworks.spcue.util.SqlUtil; /** - * A DAO class for loading Filters, Actions, and Matchers. Part of the - * job filtering system. + * A DAO class for loading Filters, Actions, and Matchers. Part of the job filtering system. * * @category DAO */ public class FilterDaoJdbc extends JdbcDaoSupport implements FilterDao { - private static final String GET_FILTER = - "SELECT " + - "filter.* " + - "FROM "+ - "filter "; - - private static final String GET_ACTIVE_FILTERS = - "SELECT " + - "filter.* " + - "FROM " + - "filter " + - "WHERE " + - "b_enabled = true "+ - "AND " + - "pk_show=? " + - "ORDER BY " + - "f_order ASC"; - - private static final String GET_FILTERS = - "SELECT " + - "filter.* " + - "FROM " + - "filter " + - "WHERE " + - "pk_show=? " + - "ORDER BY " + - "f_order ASC"; - - public static final RowMapper FILTER_DETAIL_MAPPER = new RowMapper() { - public FilterEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - FilterEntity d = new FilterEntity(); - d.type = FilterType.valueOf(rs.getString("str_type")); - d.id = rs.getString("pk_filter"); - d.name = rs.getString("str_name"); - d.showId = rs.getString("pk_show"); - d.enabled = rs.getBoolean("b_enabled"); - d.order = rs.getFloat("f_order"); - return d; - } - }; - - public List getActiveFilters(ShowInterface show) { - return getJdbcTemplate().query( - GET_ACTIVE_FILTERS, FILTER_DETAIL_MAPPER, show.getShowId()); - } - - public List getFilters(ShowInterface show) { - return getJdbcTemplate().query( - GET_FILTERS, FILTER_DETAIL_MAPPER, show.getShowId()); - } - - public void deleteFilter(FilterInterface f) { - getJdbcTemplate().update( - "DELETE FROM action WHERE pk_filter=?",f.getFilterId()); - getJdbcTemplate().update( - "DELETE FROM matcher WHERE pk_filter=?",f.getFilterId()); - getJdbcTemplate().update( - "DELETE FROM filter WHERE pk_filter=?",f.getFilterId()); - reorderFilters(f); - } - - private static final String INSERT_FILTER = - "INSERT INTO " + - "filter "+ - "(" + - "pk_filter," + - "pk_show,"+ - "str_name,"+ - "str_type,"+ - "f_order "+ - ") VALUES (?,?,?,?,(SELECT COALESCE(MAX(f_order)+1,1) FROM filter WHERE pk_show=?))"; - - public void insertFilter(FilterEntity f) { - f.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_FILTER, - f.id, f.getShowId(),f.name, f.type.toString(), f.getShowId()); - reorderFilters(f); - } + private static final String GET_FILTER = "SELECT " + "filter.* " + "FROM " + "filter "; - public void updateSetFilterEnabled(FilterInterface f, boolean enabled) { - getJdbcTemplate().update( - "UPDATE filter SET b_enabled=? WHERE pk_filter=?", - enabled, f.getFilterId()); - } + private static final String GET_ACTIVE_FILTERS = "SELECT " + "filter.* " + "FROM " + "filter " + + "WHERE " + "b_enabled = true " + "AND " + "pk_show=? " + "ORDER BY " + "f_order ASC"; - public void updateSetFilterName(FilterInterface f, String name) { - getJdbcTemplate().update( - "UPDATE filter SET str_name=? WHERE pk_filter=?", - name, f.getFilterId()); - } + private static final String GET_FILTERS = "SELECT " + "filter.* " + "FROM " + "filter " + "WHERE " + + "pk_show=? " + "ORDER BY " + "f_order ASC"; - public void updateSetFilterOrder(FilterInterface f, double order) { - getJdbcTemplate().update( - "UPDATE filter SET f_order=? - 0.1 WHERE pk_filter=?", - order, f.getFilterId()); - reorderFilters(f); - } - - public void lowerFilterOrder(FilterInterface f, int by) { - double lower_by = by + 0.1; - getJdbcTemplate().update( - "UPDATE filter SET f_order=f_order + ? WHERE pk_filter=?", - lower_by, f.getFilterId()); - reorderFilters(f); - } - - public void raiseFilterOrder(FilterInterface f, int by) { - double raise_by = (by * -1) - 0.1; - getJdbcTemplate().update( - "UPDATE filter SET f_order=f_order + ? WHERE pk_filter=?", - raise_by, f.getFilterId()); - reorderFilters(f); - } - - public void updateSetFilterType(FilterInterface f, FilterType type) { - getJdbcTemplate().update( - "UPDATE filter SET str_type=? WHERE pk_filter=?", - type.toString(), f.getFilterId()); - } - - public void reorderFilters(final ShowInterface s) { - getJdbcTemplate().update("LOCK TABLE filter IN SHARE MODE"); - getJdbcTemplate().call(new CallableStatementCreator() { - - public CallableStatement createCallableStatement(Connection con) throws SQLException { - CallableStatement c = con.prepareCall("{ call reorder_filters(?) }"); - c.setString(1, s.getShowId()); - return c; - } - }, new ArrayList()); - } - - public FilterEntity findFilter(ShowInterface show, String name) { - return getJdbcTemplate().queryForObject( - GET_FILTER + " WHERE pk_show=? AND str_name=?", - FILTER_DETAIL_MAPPER, show.getShowId(), name); - } - - public FilterEntity getFilter(String id) { - return getJdbcTemplate().queryForObject( - GET_FILTER + " WHERE pk_filter=?", - FILTER_DETAIL_MAPPER, id); - } - - public FilterEntity getFilter(FilterInterface filter) { - return getJdbcTemplate().queryForObject( - GET_FILTER + " WHERE pk_filter=?", - FILTER_DETAIL_MAPPER, filter.getFilterId()); + public static final RowMapper FILTER_DETAIL_MAPPER = new RowMapper() { + public FilterEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + FilterEntity d = new FilterEntity(); + d.type = FilterType.valueOf(rs.getString("str_type")); + d.id = rs.getString("pk_filter"); + d.name = rs.getString("str_name"); + d.showId = rs.getString("pk_show"); + d.enabled = rs.getBoolean("b_enabled"); + d.order = rs.getFloat("f_order"); + return d; } + }; + + public List getActiveFilters(ShowInterface show) { + return getJdbcTemplate().query(GET_ACTIVE_FILTERS, FILTER_DETAIL_MAPPER, show.getShowId()); + } + + public List getFilters(ShowInterface show) { + return getJdbcTemplate().query(GET_FILTERS, FILTER_DETAIL_MAPPER, show.getShowId()); + } + + public void deleteFilter(FilterInterface f) { + getJdbcTemplate().update("DELETE FROM action WHERE pk_filter=?", f.getFilterId()); + getJdbcTemplate().update("DELETE FROM matcher WHERE pk_filter=?", f.getFilterId()); + getJdbcTemplate().update("DELETE FROM filter WHERE pk_filter=?", f.getFilterId()); + reorderFilters(f); + } + + private static final String INSERT_FILTER = "INSERT INTO " + "filter " + "(" + "pk_filter," + + "pk_show," + "str_name," + "str_type," + "f_order " + + ") VALUES (?,?,?,?,(SELECT COALESCE(MAX(f_order)+1,1) FROM filter WHERE pk_show=?))"; + + public void insertFilter(FilterEntity f) { + f.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_FILTER, f.id, f.getShowId(), f.name, f.type.toString(), + f.getShowId()); + reorderFilters(f); + } + + public void updateSetFilterEnabled(FilterInterface f, boolean enabled) { + getJdbcTemplate().update("UPDATE filter SET b_enabled=? WHERE pk_filter=?", enabled, + f.getFilterId()); + } + + public void updateSetFilterName(FilterInterface f, String name) { + getJdbcTemplate().update("UPDATE filter SET str_name=? WHERE pk_filter=?", name, + f.getFilterId()); + } + + public void updateSetFilterOrder(FilterInterface f, double order) { + getJdbcTemplate().update("UPDATE filter SET f_order=? - 0.1 WHERE pk_filter=?", order, + f.getFilterId()); + reorderFilters(f); + } + + public void lowerFilterOrder(FilterInterface f, int by) { + double lower_by = by + 0.1; + getJdbcTemplate().update("UPDATE filter SET f_order=f_order + ? WHERE pk_filter=?", lower_by, + f.getFilterId()); + reorderFilters(f); + } + + public void raiseFilterOrder(FilterInterface f, int by) { + double raise_by = (by * -1) - 0.1; + getJdbcTemplate().update("UPDATE filter SET f_order=f_order + ? WHERE pk_filter=?", raise_by, + f.getFilterId()); + reorderFilters(f); + } + + public void updateSetFilterType(FilterInterface f, FilterType type) { + getJdbcTemplate().update("UPDATE filter SET str_type=? WHERE pk_filter=?", type.toString(), + f.getFilterId()); + } + + public void reorderFilters(final ShowInterface s) { + getJdbcTemplate().update("LOCK TABLE filter IN SHARE MODE"); + getJdbcTemplate().call(new CallableStatementCreator() { + + public CallableStatement createCallableStatement(Connection con) throws SQLException { + CallableStatement c = con.prepareCall("{ call reorder_filters(?) }"); + c.setString(1, s.getShowId()); + return c; + } + }, new ArrayList()); + } + + public FilterEntity findFilter(ShowInterface show, String name) { + return getJdbcTemplate().queryForObject(GET_FILTER + " WHERE pk_show=? AND str_name=?", + FILTER_DETAIL_MAPPER, show.getShowId(), name); + } + + public FilterEntity getFilter(String id) { + return getJdbcTemplate().queryForObject(GET_FILTER + " WHERE pk_filter=?", FILTER_DETAIL_MAPPER, + id); + } + + public FilterEntity getFilter(FilterInterface filter) { + return getJdbcTemplate().queryForObject(GET_FILTER + " WHERE pk_filter=?", FILTER_DETAIL_MAPPER, + filter.getFilterId()); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FrameDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FrameDaoJdbc.java index c25e00874..b4a4de4fa 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FrameDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FrameDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -57,1146 +53,770 @@ import com.imageworks.spcue.util.FrameSet; import com.imageworks.spcue.util.SqlUtil; -public class FrameDaoJdbc extends JdbcDaoSupport implements FrameDao { - - private static final String UPDATE_FRAME_STOPPED_NORSS = - "UPDATE "+ - "frame "+ - "SET " + - "str_state=?, "+ - "int_exit_status = ?, " + - "ts_stopped = current_timestamp, " + - "ts_updated = current_timestamp, " + - "int_version = int_version + 1, " + - "int_total_past_core_time = int_total_past_core_time + " + - "round(INTERVAL_TO_SECONDS(current_timestamp - ts_started) * int_cores / 100)," + - "int_total_past_gpu_time = int_total_past_gpu_time + " + - "round(INTERVAL_TO_SECONDS(current_timestamp - ts_started) * int_gpus) " + - "WHERE " + - "frame.pk_frame = ? " + - "AND " + - "frame.str_state = ? " + - "AND " + - "frame.int_version = ? "; - - @Override - public boolean updateFrameStopped(FrameInterface frame, FrameState state, - int exitStatus) { - return getJdbcTemplate().update(UPDATE_FRAME_STOPPED_NORSS, - state.toString(), exitStatus, frame.getFrameId(), - FrameState.RUNNING.toString(), frame.getVersion()) == 1; - } - - private static final String UPDATE_FRAME_STOPPED = - "UPDATE "+ - "frame "+ - "SET " + - "str_state=?, "+ - "int_exit_status = ?, " + - "ts_stopped = current_timestamp + interval '1' second, " + - "ts_updated = current_timestamp, " + - "int_mem_max_used = ?, " + - "int_version = int_version + 1, " + - "int_total_past_core_time = int_total_past_core_time + " + - "round(INTERVAL_TO_SECONDS(current_timestamp + interval '1' second - ts_started) * int_cores / 100), " + - "int_total_past_gpu_time = int_total_past_gpu_time + " + - "round(INTERVAL_TO_SECONDS(current_timestamp + interval '1' second - ts_started) * int_gpus) " + - "WHERE " + - "frame.pk_frame = ? " + - "AND " + - "frame.str_state = ? " + - "AND " + - "frame.int_version = ? "; - - @Override - public boolean updateFrameStopped(FrameInterface frame, FrameState state, - int exitStatus, long maxRss) { - - - return getJdbcTemplate().update(UPDATE_FRAME_STOPPED, - state.toString(), exitStatus, maxRss, - frame.getFrameId(), FrameState.RUNNING.toString(), - frame.getVersion()) == 1; - } - - private static final String UPDATE_FRAME_REASON = - "UPDATE "+ - "frame "+ - "SET " + - "str_state = ?, "+ - "int_exit_status = ?, " + - "ts_stopped = current_timestamp, " + - "ts_updated = current_timestamp, " + - "int_version = int_version + 1 " + - "WHERE " + - "frame.pk_frame = ? " + - "AND " + - "frame.pk_frame NOT IN " + - "(SELECT proc.pk_frame FROM " + - "proc WHERE proc.pk_frame=?)"; - - private int updateFrame(FrameInterface frame, int exitStatus) { - - int result = getJdbcTemplate().update( - UPDATE_FRAME_REASON, - FrameState.WAITING.toString(), - exitStatus, - frame.getFrameId(), - frame.getFrameId()); - - return result; - } - - @Override - public boolean updateFrameHostDown(FrameInterface frame) { - return updateFrame(frame, Dispatcher.EXIT_STATUS_DOWN_HOST) > 0; - } - - @Override - public boolean updateFrameCleared(FrameInterface frame) { - return updateFrame(frame, Dispatcher.EXIT_STATUS_FRAME_CLEARED) > 0; - } - - private static final String UPDATE_FRAME_MEMORY_ERROR = - "UPDATE "+ - "frame "+ - "SET " + - "int_exit_status = ?, " + - "int_version = int_version + 1 " + - "WHERE " + - "frame.pk_frame = ? "; - @Override - public boolean updateFrameMemoryError(FrameInterface frame) { - int result = getJdbcTemplate().update( - UPDATE_FRAME_MEMORY_ERROR, - Dispatcher.EXIT_STATUS_MEMORY_FAILURE, - frame.getFrameId()); - - return result > 0; - } - - private static final String UPDATE_FRAME_STARTED = - "UPDATE " + - "frame " + - "SET " + - "str_state = ?, " + - "str_host = ?, " + - "int_cores = ?, " + - "int_mem_reserved = ?, " + - "int_gpus = ?, " + - "int_gpu_mem_reserved = ?, " + - "ts_updated = current_timestamp, " + - "ts_started = current_timestamp, " + - "ts_stopped = null, " + - "int_version = int_version + 1 " + - "WHERE " + - "pk_frame = ? " + - "AND " + - "str_state = ? " + - "AND " + - "int_version = ? " + - "AND " + - "frame.pk_layer IN (" + - "SELECT " + - "layer.pk_layer " + - "FROM " + - "layer " + - "LEFT JOIN layer_limit ON layer_limit.pk_layer = layer.pk_layer " + - "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + - "LEFT JOIN (" + - "SELECT " + - "limit_record.pk_limit_record, " + - "SUM(layer_stat.int_running_count) AS int_sum_running " + - "FROM " + - "layer_limit " + - "LEFT JOIN limit_record ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN layer_stat ON layer_stat.pk_layer = layer_limit.pk_layer " + - "GROUP BY limit_record.pk_limit_record) AS sum_running " + - "ON limit_record.pk_limit_record = sum_running.pk_limit_record " + - "WHERE " + - "sum_running.int_sum_running < limit_record.int_max_value " + - "OR sum_running.int_sum_running IS NULL " + - ")"; - - private static final String UPDATE_FRAME_RETRIES = - "UPDATE " + - "frame " + - "SET " + - "int_retries = int_retries + 1 " + - "WHERE " + - "pk_frame = ? " + - "AND " + - "int_exit_status NOT IN (?,?,?,?,?,?,?) "; - - @Override - public void updateFrameStarted(VirtualProc proc, FrameInterface frame) { - - lockFrameForUpdate(frame, FrameState.WAITING); - - try { - int result = getJdbcTemplate().update(UPDATE_FRAME_STARTED, - FrameState.RUNNING.toString(), proc.hostName, proc.coresReserved, - proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, frame.getFrameId(), - FrameState.WAITING.toString(), frame.getVersion()); - if (result == 0) { - String error_msg = "the frame " + - frame + " was updated by another thread."; - throw new FrameReservationException(error_msg); - } - } catch (DataAccessException e) { - /* - * This usually happens when the folder's max cores - * limit has exceeded - */ - throw new FrameReservationException(e.getCause()); - } - - /* - * Frames that were killed via nimby or hardware errors not attributed to - * the software do not increment the retry counter. Like failed launch, - * orphaned frame, failed kill or down host. - */ - try { - getJdbcTemplate().update(UPDATE_FRAME_RETRIES, - frame.getFrameId(), -1, FrameExitStatus.SKIP_RETRY_VALUE, - FrameExitStatus.FAILED_LAUNCH_VALUE, Dispatcher.EXIT_STATUS_FRAME_CLEARED, - Dispatcher.EXIT_STATUS_FRAME_ORPHAN, Dispatcher.EXIT_STATUS_FAILED_KILL, - Dispatcher.EXIT_STATUS_DOWN_HOST); - } catch (DataAccessException e) { - throw new FrameReservationException(e.getCause()); - } - } - - private static final String UPDATE_FRAME_FIXED = - "UPDATE "+ - "frame "+ - "SET " + - "str_state = ?,"+ - "str_host=?, " + - "int_cores=?, "+ - "int_mem_reserved = ?, " + - "int_gpus = ?, " + - "int_gpu_mem_reserved = ?, " + - "ts_updated = current_timestamp, " + - "ts_started = current_timestamp, " + - "ts_stopped = null, "+ - "int_version = int_version + 1 " + - "WHERE " + - "pk_frame = ? " + - "AND " + - "str_state = 'RUNNING'"; - - @Override - public boolean updateFrameFixed(VirtualProc proc, FrameInterface frame) { - return getJdbcTemplate().update(UPDATE_FRAME_FIXED, - FrameState.RUNNING.toString(), proc.hostName, proc.coresReserved, - proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, frame.getFrameId()) == 1; - } - - @Override - public DispatchFrame getDispatchFrame(String uuid) { - return getJdbcTemplate().queryForObject( - GET_DISPATCH_FRAME, DISPATCH_FRAME_MAPPER, uuid); - } - - static final RowMapper DISPATCH_FRAME_MAPPER = new RowMapper() { - public DispatchFrame mapRow(ResultSet rs, int rowNum) throws SQLException { - DispatchFrame frame = new DispatchFrame(); - frame.id = rs.getString("pk_frame"); - frame.name = rs.getString("frame_name"); - frame.layerId = rs.getString("pk_layer"); - frame.jobId = rs.getString("pk_job"); - frame.showId = rs.getString("pk_show"); - frame.facilityId = rs.getString("pk_facility"); - frame.retries = rs.getInt("int_retries"); - frame.state = FrameState.valueOf(rs.getString("frame_state")); - frame.command = rs.getString("str_cmd"); - frame.jobName = rs.getString("job_name"); - frame.layerName = rs.getString("layer_name"); - frame.chunkSize = rs.getInt("int_chunk_size"); - frame.range = rs.getString("str_range"); - frame.logDir = rs.getString("str_log_dir"); - frame.shot = rs.getString("str_shot"); - frame.show = rs.getString("show_name"); - frame.owner = rs.getString("str_user"); - int uid = rs.getInt("int_uid"); - frame.uid = rs.wasNull() ? Optional.empty() : Optional.of(uid); - frame.state = FrameState.valueOf(rs.getString("frame_state")); - frame.minCores = rs.getInt("int_cores_min"); - frame.maxCores = rs.getInt("int_cores_max"); - frame.threadable = rs.getBoolean("b_threadable"); - frame.setMinMemory(rs.getLong("int_mem_min")); - frame.minGpus = rs.getInt("int_gpus_min"); - frame.maxGpus = rs.getInt("int_gpus_max"); - frame.minGpuMemory = rs.getLong("int_gpu_mem_min"); - frame.version = rs.getInt("int_version"); - frame.services = rs.getString("str_services"); - frame.os = rs.getString("str_os"); - return frame; - } - }; - - private static final String GET_DISPATCH_FRAME = - "SELECT " + - "show.str_name AS show_name, "+ - "job.str_name AS job_name, " + - "job.pk_job,"+ - "job.pk_show,"+ - "job.pk_facility,"+ - "job.str_name,"+ - "job.str_shot,"+ - "job.str_user,"+ - "job.int_uid,"+ - "job.str_log_dir,"+ - "COALESCE(str_os, '') AS str_os, " + - "frame.str_name AS frame_name, "+ - "frame.str_state AS frame_state, "+ - "frame.pk_frame, "+ - "frame.pk_layer, "+ - "frame.int_retries, "+ - "frame.int_version, " + - "layer.str_name AS layer_name, " + - "layer.str_type AS layer_type, "+ - "layer.str_cmd, "+ - "layer.int_cores_min,"+ - "layer.int_cores_max,"+ - "layer.b_threadable,"+ - "layer.int_mem_min, "+ - "layer.int_gpus_min,"+ - "layer.int_gpus_max,"+ - "layer.int_gpu_mem_min, "+ - "layer.str_range, "+ - "layer.int_chunk_size, " + - "layer.str_services " + - "FROM " + - "layer, " + - "job, "+ - "show, " + - "frame LEFT JOIN proc ON (proc.pk_frame = frame.pk_frame) " + - "WHERE " + - "job.pk_show = show.pk_show "+ - "AND " + - "frame.pk_job = job.pk_job " + - "AND " + - "frame.pk_layer = layer.pk_layer " + - "AND " + - "frame.pk_frame = ?"; - - private static final String GET_FRAME_DETAIL = - "SELECT " + - "frame.*, " + - "job.pk_facility," + - "job.pk_show " + - "FROM " + - "frame," + - "layer," + - "job," + - "show " + - "WHERE "+ - "frame.pk_job = job.pk_job " + - "AND " + - "frame.pk_layer = layer.pk_layer " + - "AND "+ - "job.pk_show = show.pk_show "; - - private static final String GET_MINIMAL_FRAME = - "SELECT " + - "frame.pk_frame," + - "frame.str_name, " + - "frame.pk_job, " + - "frame.pk_layer, "+ - "frame.str_state, " + - "frame.int_version, "+ - "job.pk_show, " + - "job.pk_facility "+ - "FROM " + - "frame," + - "layer," + - "job," + - "show " + - "WHERE "+ - "frame.pk_job = job.pk_job " + - "AND " + - "frame.pk_layer = layer.pk_layer " + - "AND " + - "job.pk_show = show.pk_show "; - - private static final RowMapper FRAME_MAPPER = - new RowMapper() { - public FrameEntity mapRow(ResultSet rs, - int rowNum) throws SQLException { - FrameEntity frame = new FrameEntity(); - frame.id = rs.getString("pk_frame"); - frame.name = rs.getString("str_name"); - frame.jobId = rs.getString("pk_job"); - frame.layerId = rs.getString("pk_layer"); - frame.showId = rs.getString("pk_show"); - frame.facilityId = rs.getString("pk_facility"); - frame.version = rs.getInt("int_version"); - return frame; - } - }; - - private static final RowMapper FRAME_DETAIL_MAPPER = new RowMapper() { - public FrameDetail mapRow(ResultSet rs, int rowNum) throws SQLException { - FrameDetail frame = new FrameDetail(); - frame.id = rs.getString("pk_frame"); - frame.dependCount = rs.getInt("int_depend_count"); - frame.exitStatus = rs.getInt("int_exit_status"); - frame.jobId = rs.getString("pk_job"); - frame.layerId = rs.getString("pk_layer"); - frame.showId = rs.getString("pk_show"); - frame.maxRss = rs.getLong("int_mem_max_used"); - frame.name = rs.getString("str_name"); - frame.number = rs.getInt("int_number"); - frame.dispatchOrder = rs.getInt("int_dispatch_order"); - frame.retryCount = rs.getInt("int_retries"); - frame.dateStarted = rs.getTimestamp("ts_started"); - frame.dateStopped = rs.getTimestamp("ts_stopped"); - frame.dateUpdated = rs.getTimestamp("ts_updated"); - frame.dateLLU = rs.getTimestamp("ts_llu"); - frame.version = rs.getInt("int_version"); - - if (rs.getString("str_host") != null) { - frame.lastResource = String.format("%s/%d/%d",rs.getString("str_host"),rs.getInt("int_cores"),rs.getInt("int_gpus")); - } - else { - frame.lastResource = ""; - } - frame.state = FrameState.valueOf(rs.getString("str_state")); - - return frame; - } - }; - - public static final String FIND_ORPHANED_FRAMES = - "SELECT " + - "frame.pk_frame, " + - "frame.pk_layer, " + - "frame.str_name, " + - "frame.int_version, " + - "job.pk_job, " + - "job.pk_show, " + - "job.pk_facility " + - "FROM " + - "frame, " + - "job " + - "WHERE " + - "job.pk_job = frame.pk_job " + - "AND " + - "frame.str_state = 'RUNNING' " + - "AND " + - "job.str_state = 'PENDING' " + - "AND " + - "(SELECT COUNT(1) FROM proc WHERE proc.pk_frame = frame.pk_frame) = 0 " + - "AND " + - "current_timestamp - frame.ts_updated > interval '300' second"; - - @Override - public List getOrphanedFrames() { - return getJdbcTemplate().query(FIND_ORPHANED_FRAMES, - FRAME_MAPPER); - } - - private static final String IS_ORPHAN = - "SELECT " + - "COUNT(1) " + - "FROM " + - "frame " + - "WHERE " + - "frame.pk_frame = ? " + - "AND " + - "frame.str_state = 'RUNNING' " + - "AND " + - "(SELECT COUNT(1) FROM proc WHERE proc.pk_frame = frame.pk_frame) = 0 " + - "AND " + - "current_timestamp - frame.ts_updated > interval '300' second"; - - @Override - public boolean isOrphan(FrameInterface frame) { - return getJdbcTemplate().queryForObject(IS_ORPHAN, Integer.class, - frame.getFrameId()) == 1; - } - - private static final String INSERT_FRAME = - "INSERT INTO " + - "frame " + - "("+ - "pk_frame, " + - "pk_layer, " + - "pk_job, " + - "str_name, " + - "str_state, " + - "int_number, " + - "int_dispatch_order, " + - "int_layer_order, "+ - "ts_updated, "+ - "ts_llu "+ - ") " + - "VALUES (?,?,?,?,?,?,?,?,current_timestamp,current_timestamp)"; - - @Override - public void insertFrames(LayerDetail layer, List frames) { - - int count = 0; - for (int frame: frames) { - getJdbcTemplate().update(INSERT_FRAME, - SqlUtil.genKeyRandom(), - layer.getLayerId(), - layer.getJobId(), - CueUtil.buildFrameName(layer, frame), - FrameState.SETUP.toString(), - frame, - count, - layer.dispatchOrder); - count++; - } - } - - @Override - public List getDependentFrames(LightweightDependency depend) { - - /* - * Compound depends are handled in the DependManager. - */ - - String key = null; - StringBuilder sb = new StringBuilder(4096); - sb.append(GET_MINIMAL_FRAME); - sb.append(" AND frame.int_depend_count > 0 "); - - if (EnumSet.of( - DependType.JOB_ON_JOB, - DependType.JOB_ON_LAYER, - DependType.JOB_ON_FRAME).contains(depend.type)) { - sb.append("AND job.pk_job = ?"); - key = depend.dependErJobId; - } - else if (EnumSet.of( - DependType.LAYER_ON_FRAME, - DependType.LAYER_ON_LAYER, - DependType.LAYER_ON_JOB).contains(depend.type)) { - sb.append("AND layer.pk_layer = ?"); - key = depend.dependErLayerId; - } - else if (EnumSet.of( - DependType.FRAME_ON_JOB, - DependType.FRAME_ON_LAYER, - DependType.FRAME_ON_FRAME).contains(depend.type)) { - sb.append("AND frame.pk_frame = ?"); - key = depend.dependErFrameId; - } - else { - return new ArrayList(1); - } - - return getJdbcTemplate().query( - sb.toString(), FRAME_MAPPER, - new Object[] { key }); - } - - @Override - public FrameInterface findFrame(LayerInterface layer, int number) { - return getJdbcTemplate().queryForObject( - GET_MINIMAL_FRAME + " AND frame.pk_layer=? AND int_number=?", - FRAME_MAPPER, layer.getLayerId(), number); - } - - @Override - public FrameDetail getFrameDetail(FrameInterface frame) { - return getJdbcTemplate().queryForObject( - GET_FRAME_DETAIL + " AND pk_frame=?", - FRAME_DETAIL_MAPPER, frame.getFrameId()); - } - - @Override - public FrameDetail getFrameDetail(String id) { - return getJdbcTemplate().queryForObject( - GET_FRAME_DETAIL + " AND pk_frame=?", - FRAME_DETAIL_MAPPER, id); - } - - @Override - public FrameDetail findFrameDetail(JobInterface job, String name) { - //Uses C_FRAME_STR_NAME_UNQ - return getJdbcTemplate().queryForObject( - GET_FRAME_DETAIL + " AND frame.str_name=? AND frame.pk_job=?", - FRAME_DETAIL_MAPPER, name, job.getJobId()); - } - - @Override - public List findFrameDetails(FrameSearchInterface r) { - return getJdbcTemplate().query(r.getFilteredQuery(GET_FRAME_DETAIL), - FRAME_DETAIL_MAPPER, r.getValuesArray()); - } - - @Override - public List findFrames(FrameSearchInterface r) { - return getJdbcTemplate().query(r.getFilteredQuery(GET_MINIMAL_FRAME), - FRAME_MAPPER, r.getValuesArray()); - } - - private static final String FIND_LONGEST_FRAME = - "SELECT " + - "pk_frame " + - "FROM " + - "frame, " + - "layer " + - "WHERE " + - "frame.pk_layer = layer.pk_layer " + - "AND " + - "frame.pk_job = ? "+ - "AND " + - "str_state=? "+ - "AND " + - "layer.str_type=? " + - "ORDER BY "+ - "ts_stopped - ts_started DESC " + - "LIMIT 1"; - - @Override - public FrameDetail findLongestFrame(JobInterface job) { - String pk_frame = getJdbcTemplate().queryForObject( - FIND_LONGEST_FRAME, String.class, job.getJobId(), - FrameState.SUCCEEDED.toString(), LayerType.RENDER.toString()); - return getFrameDetail(pk_frame); - } - - private static final String FIND_SHORTEST_FRAME = - "SELECT " + - "pk_frame " + - "FROM " + - "frame, " + - "layer " + - "WHERE " + - "frame.pk_layer = layer.pk_layer " + - "AND " + - "frame.pk_job = ? " + - "AND " + - "frame.str_state = ? " + - "AND " + - "layer.str_type = ? " + - "ORDER BY " + - "ts_stopped - ts_started ASC " + - "LIMIT 1"; - - @Override - public FrameDetail findShortestFrame(JobInterface job) { - String pk_frame = getJdbcTemplate().queryForObject( - FIND_SHORTEST_FRAME, String.class, job.getJobId(), - FrameState.SUCCEEDED.toString(),LayerType.RENDER.toString()); - return getFrameDetail(pk_frame); - } - - @Override - public FrameInterface getFrame(String id) { - return getJdbcTemplate().queryForObject( - GET_MINIMAL_FRAME + " AND frame.pk_frame=?", - FRAME_MAPPER, id); - } - - @Override - public FrameInterface findFrame(JobInterface job, String name) { - //Uses C_FRAME_STR_NAME_UNQ - return getJdbcTemplate().queryForObject( - GET_MINIMAL_FRAME + " AND frame.str_name=? AND frame.pk_job=?", - FRAME_MAPPER, name, job.getJobId()); - } - - @Override - public void checkRetries(FrameInterface frame) { - int max_retries = getJdbcTemplate().queryForObject( - "SELECT int_max_retries FROM job WHERE pk_job=?", Integer.class, - frame.getJobId()); - - if (getJdbcTemplate().queryForObject( - "SELECT int_retries FROM frame WHERE pk_frame=?", Integer.class, - frame.getFrameId()) >= max_retries) { - getJdbcTemplate().update( - "UPDATE frame SET str_state=? WHERE pk_frame=?", - FrameState.DEAD.toString(), frame.getFrameId()); - } - } - - private static final String UPDATE_FRAME_STATE = - "UPDATE " + - "frame "+ - "SET " + - "str_state = ?, " + - "ts_updated = current_timestamp, " + - "int_version = int_version + 1 " + - "WHERE " + - "pk_frame = ? " + - "AND " + - "int_version = ? "; - - @Override - public boolean updateFrameState(FrameInterface frame, FrameState state) { - if (getJdbcTemplate().update(UPDATE_FRAME_STATE, - state.toString(), - frame.getFrameId(), - frame.getVersion()) == 1) { - logger.info("The frame " + frame + " state changed to " + - state.toString()); - return true; - } - logger.info("Failed to change the frame " + frame + " state to " + - state.toString()); - return false; - } - - private static final String MARK_AS_WAITING = - "UPDATE " + - "frame "+ - "SET " + - "str_state=?, " + - "ts_updated = current_timestamp, " + - "ts_llu = current_timestamp, " + - "int_depend_count = 0, " + - "int_version = int_version + 1 " + - "WHERE " + - "pk_frame = ? " + - "AND " + - "int_version = ? " + - "AND " + - "str_state = ? "; - - @Override - public void markFrameAsWaiting(FrameInterface frame) { - getJdbcTemplate().update( - MARK_AS_WAITING, - FrameState.WAITING.toString(), - frame.getFrameId(), - frame.getVersion(), - FrameState.DEPEND.toString()); - } - - private static final String MARK_AS_DEPEND = - "UPDATE " + - "frame "+ - "SET " + - "str_state=?, " + - "int_depend_count = ?, "+ - "ts_updated = current_timestamp, " + - "int_version = int_version + 1 " + - "WHERE " + - "pk_frame = ? " + - "AND " + - "int_version = ? " + - "AND " + - "str_state = ? "; - - private static final String GET_FRAME_DEPEND_COUNT = - "SELECT " + - "COUNT(1) " + - "FROM " + - "depend " + - "WHERE " + - "( " + - "(pk_job_depend_er = ? AND str_type LIKE 'JOB#_ON%' ESCAPE '#') " + - "OR " + - "pk_layer_depend_er = ? " + - "OR " + - "pk_frame_depend_er = ? " + - ") " + - "AND " + - "depend.b_active = true " + - "AND " + - "depend.b_composite = false "; - - public void markFrameAsDepend(FrameInterface frame) { - // We need to full depend count in this case to reset the - // frames's depend count accurately. - int depend_count = getJdbcTemplate().queryForObject( - GET_FRAME_DEPEND_COUNT, Integer.class, - frame.getJobId(),frame.getLayerId(),frame.getFrameId()); - - if (depend_count > 0) { - getJdbcTemplate().update( - MARK_AS_DEPEND, - FrameState.DEPEND.toString(), - depend_count, - frame.getFrameId(), - frame.getVersion(), - FrameState.WAITING.toString()); - } - } - - private static final String FIND_HIGHEST_MEM_FRAME = - "SELECT " + - "pk_frame " + - "FROM " + - "frame " + - "WHERE " + - "pk_job = ? " + - "AND " + - "str_state = ? " + - "ORDER BY " + - "int_mem_max_used DESC " + - "LIMIT 1"; - - @Override - public FrameDetail findHighestMemoryFrame(JobInterface job) { - String pk_frame = getJdbcTemplate().queryForObject( - FIND_HIGHEST_MEM_FRAME, String.class, job.getJobId(), - FrameState.SUCCEEDED.toString()); - return getFrameDetail(pk_frame); - } +public class FrameDaoJdbc extends JdbcDaoSupport implements FrameDao { + + private static final String UPDATE_FRAME_STOPPED_NORSS = "UPDATE " + "frame " + "SET " + + "str_state=?, " + "int_exit_status = ?, " + "ts_stopped = current_timestamp, " + + "ts_updated = current_timestamp, " + "int_version = int_version + 1, " + + "int_total_past_core_time = int_total_past_core_time + " + + "round(INTERVAL_TO_SECONDS(current_timestamp - ts_started) * int_cores / 100)," + + "int_total_past_gpu_time = int_total_past_gpu_time + " + + "round(INTERVAL_TO_SECONDS(current_timestamp - ts_started) * int_gpus) " + "WHERE " + + "frame.pk_frame = ? " + "AND " + "frame.str_state = ? " + "AND " + "frame.int_version = ? "; + + @Override + public boolean updateFrameStopped(FrameInterface frame, FrameState state, int exitStatus) { + return getJdbcTemplate().update(UPDATE_FRAME_STOPPED_NORSS, state.toString(), exitStatus, + frame.getFrameId(), FrameState.RUNNING.toString(), frame.getVersion()) == 1; + } + + private static final String UPDATE_FRAME_STOPPED = "UPDATE " + "frame " + "SET " + "str_state=?, " + + "int_exit_status = ?, " + "ts_stopped = current_timestamp + interval '1' second, " + + "ts_updated = current_timestamp, " + "int_mem_max_used = ?, " + + "int_version = int_version + 1, " + "int_total_past_core_time = int_total_past_core_time + " + + "round(INTERVAL_TO_SECONDS(current_timestamp + interval '1' second - ts_started) * int_cores / 100), " + + "int_total_past_gpu_time = int_total_past_gpu_time + " + + "round(INTERVAL_TO_SECONDS(current_timestamp + interval '1' second - ts_started) * int_gpus) " + + "WHERE " + "frame.pk_frame = ? " + "AND " + "frame.str_state = ? " + "AND " + + "frame.int_version = ? "; + + @Override + public boolean updateFrameStopped(FrameInterface frame, FrameState state, int exitStatus, + long maxRss) { + + return getJdbcTemplate().update(UPDATE_FRAME_STOPPED, state.toString(), exitStatus, maxRss, + frame.getFrameId(), FrameState.RUNNING.toString(), frame.getVersion()) == 1; + } + + private static final String UPDATE_FRAME_REASON = "UPDATE " + "frame " + "SET " + + "str_state = ?, " + "int_exit_status = ?, " + "ts_stopped = current_timestamp, " + + "ts_updated = current_timestamp, " + "int_version = int_version + 1 " + "WHERE " + + "frame.pk_frame = ? " + "AND " + "frame.pk_frame NOT IN " + "(SELECT proc.pk_frame FROM " + + "proc WHERE proc.pk_frame=?)"; + + private int updateFrame(FrameInterface frame, int exitStatus) { + + int result = getJdbcTemplate().update(UPDATE_FRAME_REASON, FrameState.WAITING.toString(), + exitStatus, frame.getFrameId(), frame.getFrameId()); + + return result; + } + + @Override + public boolean updateFrameHostDown(FrameInterface frame) { + return updateFrame(frame, Dispatcher.EXIT_STATUS_DOWN_HOST) > 0; + } + + @Override + public boolean updateFrameCleared(FrameInterface frame) { + return updateFrame(frame, Dispatcher.EXIT_STATUS_FRAME_CLEARED) > 0; + } + + private static final String UPDATE_FRAME_MEMORY_ERROR = + "UPDATE " + "frame " + "SET " + "int_exit_status = ?, " + "int_version = int_version + 1 " + + "WHERE " + "frame.pk_frame = ? "; + + @Override + public boolean updateFrameMemoryError(FrameInterface frame) { + int result = getJdbcTemplate().update(UPDATE_FRAME_MEMORY_ERROR, + Dispatcher.EXIT_STATUS_MEMORY_FAILURE, frame.getFrameId()); + + return result > 0; + } + + private static final String UPDATE_FRAME_STARTED = "UPDATE " + "frame " + "SET " + + "str_state = ?, " + "str_host = ?, " + "int_cores = ?, " + "int_mem_reserved = ?, " + + "int_gpus = ?, " + "int_gpu_mem_reserved = ?, " + "ts_updated = current_timestamp, " + + "ts_started = current_timestamp, " + "ts_stopped = null, " + + "int_version = int_version + 1 " + "WHERE " + "pk_frame = ? " + "AND " + "str_state = ? " + + "AND " + "int_version = ? " + "AND " + "frame.pk_layer IN (" + "SELECT " + "layer.pk_layer " + + "FROM " + "layer " + "LEFT JOIN layer_limit ON layer_limit.pk_layer = layer.pk_layer " + + "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + + "LEFT JOIN (" + "SELECT " + "limit_record.pk_limit_record, " + + "SUM(layer_stat.int_running_count) AS int_sum_running " + "FROM " + "layer_limit " + + "LEFT JOIN limit_record ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + + "LEFT JOIN layer_stat ON layer_stat.pk_layer = layer_limit.pk_layer " + + "GROUP BY limit_record.pk_limit_record) AS sum_running " + + "ON limit_record.pk_limit_record = sum_running.pk_limit_record " + "WHERE " + + "sum_running.int_sum_running < limit_record.int_max_value " + + "OR sum_running.int_sum_running IS NULL " + ")"; + + private static final String UPDATE_FRAME_RETRIES = + "UPDATE " + "frame " + "SET " + "int_retries = int_retries + 1 " + "WHERE " + "pk_frame = ? " + + "AND " + "int_exit_status NOT IN (?,?,?,?,?,?,?) "; + + @Override + public void updateFrameStarted(VirtualProc proc, FrameInterface frame) { + + lockFrameForUpdate(frame, FrameState.WAITING); + + try { + int result = getJdbcTemplate().update(UPDATE_FRAME_STARTED, FrameState.RUNNING.toString(), + proc.hostName, proc.coresReserved, proc.memoryReserved, proc.gpusReserved, + proc.gpuMemoryReserved, frame.getFrameId(), FrameState.WAITING.toString(), + frame.getVersion()); + if (result == 0) { + String error_msg = "the frame " + frame + " was updated by another thread."; + throw new FrameReservationException(error_msg); + } + } catch (DataAccessException e) { + /* + * This usually happens when the folder's max cores limit has exceeded + */ + throw new FrameReservationException(e.getCause()); + } + + /* + * Frames that were killed via nimby or hardware errors not attributed to the software do not + * increment the retry counter. Like failed launch, orphaned frame, failed kill or down host. + */ + try { + getJdbcTemplate().update(UPDATE_FRAME_RETRIES, frame.getFrameId(), -1, + FrameExitStatus.SKIP_RETRY_VALUE, FrameExitStatus.FAILED_LAUNCH_VALUE, + Dispatcher.EXIT_STATUS_FRAME_CLEARED, Dispatcher.EXIT_STATUS_FRAME_ORPHAN, + Dispatcher.EXIT_STATUS_FAILED_KILL, Dispatcher.EXIT_STATUS_DOWN_HOST); + } catch (DataAccessException e) { + throw new FrameReservationException(e.getCause()); + } + } + + private static final String UPDATE_FRAME_FIXED = + "UPDATE " + "frame " + "SET " + "str_state = ?," + "str_host=?, " + "int_cores=?, " + + "int_mem_reserved = ?, " + "int_gpus = ?, " + "int_gpu_mem_reserved = ?, " + + "ts_updated = current_timestamp, " + "ts_started = current_timestamp, " + + "ts_stopped = null, " + "int_version = int_version + 1 " + "WHERE " + "pk_frame = ? " + + "AND " + "str_state = 'RUNNING'"; + + @Override + public boolean updateFrameFixed(VirtualProc proc, FrameInterface frame) { + return getJdbcTemplate().update(UPDATE_FRAME_FIXED, FrameState.RUNNING.toString(), + proc.hostName, proc.coresReserved, proc.memoryReserved, proc.gpusReserved, + proc.gpuMemoryReserved, frame.getFrameId()) == 1; + } + + @Override + public DispatchFrame getDispatchFrame(String uuid) { + return getJdbcTemplate().queryForObject(GET_DISPATCH_FRAME, DISPATCH_FRAME_MAPPER, uuid); + } + + static final RowMapper DISPATCH_FRAME_MAPPER = new RowMapper() { + public DispatchFrame mapRow(ResultSet rs, int rowNum) throws SQLException { + DispatchFrame frame = new DispatchFrame(); + frame.id = rs.getString("pk_frame"); + frame.name = rs.getString("frame_name"); + frame.layerId = rs.getString("pk_layer"); + frame.jobId = rs.getString("pk_job"); + frame.showId = rs.getString("pk_show"); + frame.facilityId = rs.getString("pk_facility"); + frame.retries = rs.getInt("int_retries"); + frame.state = FrameState.valueOf(rs.getString("frame_state")); + frame.command = rs.getString("str_cmd"); + frame.jobName = rs.getString("job_name"); + frame.layerName = rs.getString("layer_name"); + frame.chunkSize = rs.getInt("int_chunk_size"); + frame.range = rs.getString("str_range"); + frame.logDir = rs.getString("str_log_dir"); + frame.shot = rs.getString("str_shot"); + frame.show = rs.getString("show_name"); + frame.owner = rs.getString("str_user"); + int uid = rs.getInt("int_uid"); + frame.uid = rs.wasNull() ? Optional.empty() : Optional.of(uid); + frame.state = FrameState.valueOf(rs.getString("frame_state")); + frame.minCores = rs.getInt("int_cores_min"); + frame.maxCores = rs.getInt("int_cores_max"); + frame.threadable = rs.getBoolean("b_threadable"); + frame.setMinMemory(rs.getLong("int_mem_min")); + frame.minGpus = rs.getInt("int_gpus_min"); + frame.maxGpus = rs.getInt("int_gpus_max"); + frame.minGpuMemory = rs.getLong("int_gpu_mem_min"); + frame.version = rs.getInt("int_version"); + frame.services = rs.getString("str_services"); + frame.os = rs.getString("str_os"); + return frame; + } + }; + + private static final String GET_DISPATCH_FRAME = "SELECT " + "show.str_name AS show_name, " + + "job.str_name AS job_name, " + "job.pk_job," + "job.pk_show," + "job.pk_facility," + + "job.str_name," + "job.str_shot," + "job.str_user," + "job.int_uid," + "job.str_log_dir," + + "COALESCE(str_os, '') AS str_os, " + "frame.str_name AS frame_name, " + + "frame.str_state AS frame_state, " + "frame.pk_frame, " + "frame.pk_layer, " + + "frame.int_retries, " + "frame.int_version, " + "layer.str_name AS layer_name, " + + "layer.str_type AS layer_type, " + "layer.str_cmd, " + "layer.int_cores_min," + + "layer.int_cores_max," + "layer.b_threadable," + "layer.int_mem_min, " + + "layer.int_gpus_min," + "layer.int_gpus_max," + "layer.int_gpu_mem_min, " + + "layer.str_range, " + "layer.int_chunk_size, " + "layer.str_services " + "FROM " + "layer, " + + "job, " + "show, " + "frame LEFT JOIN proc ON (proc.pk_frame = frame.pk_frame) " + "WHERE " + + "job.pk_show = show.pk_show " + "AND " + "frame.pk_job = job.pk_job " + "AND " + + "frame.pk_layer = layer.pk_layer " + "AND " + "frame.pk_frame = ?"; + + private static final String GET_FRAME_DETAIL = + "SELECT " + "frame.*, " + "job.pk_facility," + "job.pk_show " + "FROM " + "frame," + "layer," + + "job," + "show " + "WHERE " + "frame.pk_job = job.pk_job " + "AND " + + "frame.pk_layer = layer.pk_layer " + "AND " + "job.pk_show = show.pk_show "; + + private static final String GET_MINIMAL_FRAME = "SELECT " + "frame.pk_frame," + "frame.str_name, " + + "frame.pk_job, " + "frame.pk_layer, " + "frame.str_state, " + "frame.int_version, " + + "job.pk_show, " + "job.pk_facility " + "FROM " + "frame," + "layer," + "job," + "show " + + "WHERE " + "frame.pk_job = job.pk_job " + "AND " + "frame.pk_layer = layer.pk_layer " + + "AND " + "job.pk_show = show.pk_show "; + + private static final RowMapper FRAME_MAPPER = new RowMapper() { + public FrameEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + FrameEntity frame = new FrameEntity(); + frame.id = rs.getString("pk_frame"); + frame.name = rs.getString("str_name"); + frame.jobId = rs.getString("pk_job"); + frame.layerId = rs.getString("pk_layer"); + frame.showId = rs.getString("pk_show"); + frame.facilityId = rs.getString("pk_facility"); + frame.version = rs.getInt("int_version"); + return frame; + } + }; + + private static final RowMapper FRAME_DETAIL_MAPPER = new RowMapper() { + public FrameDetail mapRow(ResultSet rs, int rowNum) throws SQLException { + FrameDetail frame = new FrameDetail(); + frame.id = rs.getString("pk_frame"); + frame.dependCount = rs.getInt("int_depend_count"); + frame.exitStatus = rs.getInt("int_exit_status"); + frame.jobId = rs.getString("pk_job"); + frame.layerId = rs.getString("pk_layer"); + frame.showId = rs.getString("pk_show"); + frame.maxRss = rs.getLong("int_mem_max_used"); + frame.name = rs.getString("str_name"); + frame.number = rs.getInt("int_number"); + frame.dispatchOrder = rs.getInt("int_dispatch_order"); + frame.retryCount = rs.getInt("int_retries"); + frame.dateStarted = rs.getTimestamp("ts_started"); + frame.dateStopped = rs.getTimestamp("ts_stopped"); + frame.dateUpdated = rs.getTimestamp("ts_updated"); + frame.dateLLU = rs.getTimestamp("ts_llu"); + frame.version = rs.getInt("int_version"); + + if (rs.getString("str_host") != null) { + frame.lastResource = String.format("%s/%d/%d", rs.getString("str_host"), + rs.getInt("int_cores"), rs.getInt("int_gpus")); + } else { + frame.lastResource = ""; + } + frame.state = FrameState.valueOf(rs.getString("str_state")); + + return frame; + } + }; + + public static final String FIND_ORPHANED_FRAMES = + "SELECT " + "frame.pk_frame, " + "frame.pk_layer, " + "frame.str_name, " + + "frame.int_version, " + "job.pk_job, " + "job.pk_show, " + "job.pk_facility " + "FROM " + + "frame, " + "job " + "WHERE " + "job.pk_job = frame.pk_job " + "AND " + + "frame.str_state = 'RUNNING' " + "AND " + "job.str_state = 'PENDING' " + "AND " + + "(SELECT COUNT(1) FROM proc WHERE proc.pk_frame = frame.pk_frame) = 0 " + "AND " + + "current_timestamp - frame.ts_updated > interval '300' second"; + + @Override + public List getOrphanedFrames() { + return getJdbcTemplate().query(FIND_ORPHANED_FRAMES, FRAME_MAPPER); + } + + private static final String IS_ORPHAN = "SELECT " + "COUNT(1) " + "FROM " + "frame " + "WHERE " + + "frame.pk_frame = ? " + "AND " + "frame.str_state = 'RUNNING' " + "AND " + + "(SELECT COUNT(1) FROM proc WHERE proc.pk_frame = frame.pk_frame) = 0 " + "AND " + + "current_timestamp - frame.ts_updated > interval '300' second"; + + @Override + public boolean isOrphan(FrameInterface frame) { + return getJdbcTemplate().queryForObject(IS_ORPHAN, Integer.class, frame.getFrameId()) == 1; + } + + private static final String INSERT_FRAME = "INSERT INTO " + "frame " + "(" + "pk_frame, " + + "pk_layer, " + "pk_job, " + "str_name, " + "str_state, " + "int_number, " + + "int_dispatch_order, " + "int_layer_order, " + "ts_updated, " + "ts_llu " + ") " + + "VALUES (?,?,?,?,?,?,?,?,current_timestamp,current_timestamp)"; + + @Override + public void insertFrames(LayerDetail layer, List frames) { + + int count = 0; + for (int frame : frames) { + getJdbcTemplate().update(INSERT_FRAME, SqlUtil.genKeyRandom(), layer.getLayerId(), + layer.getJobId(), CueUtil.buildFrameName(layer, frame), FrameState.SETUP.toString(), + frame, count, layer.dispatchOrder); + count++; + } + } + + @Override + public List getDependentFrames(LightweightDependency depend) { + + /* + * Compound depends are handled in the DependManager. + */ - private static final String FIND_LOWEST_MEM_FRAME = - "SELECT " + - "pk_frame " + - "FROM " + - "frame " + - "WHERE " + - "pk_job = ? " + - "AND " + - "str_state = ? " + - "ORDER BY " + - "int_mem_max_used ASC " + - "LIMIT 1"; - - @Override - public FrameDetail findLowestMemoryFrame(JobInterface job) { - String pk_frame = getJdbcTemplate().queryForObject( - FIND_LOWEST_MEM_FRAME, String.class, job.getJobId(), - FrameState.SUCCEEDED.toString()); - return getFrameDetail(pk_frame); + String key = null; + StringBuilder sb = new StringBuilder(4096); + sb.append(GET_MINIMAL_FRAME); + sb.append(" AND frame.int_depend_count > 0 "); + + if (EnumSet.of(DependType.JOB_ON_JOB, DependType.JOB_ON_LAYER, DependType.JOB_ON_FRAME) + .contains(depend.type)) { + sb.append("AND job.pk_job = ?"); + key = depend.dependErJobId; + } else if (EnumSet + .of(DependType.LAYER_ON_FRAME, DependType.LAYER_ON_LAYER, DependType.LAYER_ON_JOB) + .contains(depend.type)) { + sb.append("AND layer.pk_layer = ?"); + key = depend.dependErLayerId; + } else if (EnumSet + .of(DependType.FRAME_ON_JOB, DependType.FRAME_ON_LAYER, DependType.FRAME_ON_FRAME) + .contains(depend.type)) { + sb.append("AND frame.pk_frame = ?"); + key = depend.dependErFrameId; + } else { + return new ArrayList(1); + } + + return getJdbcTemplate().query(sb.toString(), FRAME_MAPPER, new Object[] {key}); + } + + @Override + public FrameInterface findFrame(LayerInterface layer, int number) { + return getJdbcTemplate().queryForObject( + GET_MINIMAL_FRAME + " AND frame.pk_layer=? AND int_number=?", FRAME_MAPPER, + layer.getLayerId(), number); + } + + @Override + public FrameDetail getFrameDetail(FrameInterface frame) { + return getJdbcTemplate().queryForObject(GET_FRAME_DETAIL + " AND pk_frame=?", + FRAME_DETAIL_MAPPER, frame.getFrameId()); + } + + @Override + public FrameDetail getFrameDetail(String id) { + return getJdbcTemplate().queryForObject(GET_FRAME_DETAIL + " AND pk_frame=?", + FRAME_DETAIL_MAPPER, id); + } + + @Override + public FrameDetail findFrameDetail(JobInterface job, String name) { + // Uses C_FRAME_STR_NAME_UNQ + return getJdbcTemplate().queryForObject( + GET_FRAME_DETAIL + " AND frame.str_name=? AND frame.pk_job=?", FRAME_DETAIL_MAPPER, name, + job.getJobId()); + } + + @Override + public List findFrameDetails(FrameSearchInterface r) { + return getJdbcTemplate().query(r.getFilteredQuery(GET_FRAME_DETAIL), FRAME_DETAIL_MAPPER, + r.getValuesArray()); + } + + @Override + public List findFrames(FrameSearchInterface r) { + return getJdbcTemplate().query(r.getFilteredQuery(GET_MINIMAL_FRAME), FRAME_MAPPER, + r.getValuesArray()); + } + + private static final String FIND_LONGEST_FRAME = "SELECT " + "pk_frame " + "FROM " + "frame, " + + "layer " + "WHERE " + "frame.pk_layer = layer.pk_layer " + "AND " + "frame.pk_job = ? " + + "AND " + "str_state=? " + "AND " + "layer.str_type=? " + "ORDER BY " + + "ts_stopped - ts_started DESC " + "LIMIT 1"; + + @Override + public FrameDetail findLongestFrame(JobInterface job) { + String pk_frame = getJdbcTemplate().queryForObject(FIND_LONGEST_FRAME, String.class, + job.getJobId(), FrameState.SUCCEEDED.toString(), LayerType.RENDER.toString()); + return getFrameDetail(pk_frame); + } + + private static final String FIND_SHORTEST_FRAME = "SELECT " + "pk_frame " + "FROM " + "frame, " + + "layer " + "WHERE " + "frame.pk_layer = layer.pk_layer " + "AND " + "frame.pk_job = ? " + + "AND " + "frame.str_state = ? " + "AND " + "layer.str_type = ? " + "ORDER BY " + + "ts_stopped - ts_started ASC " + "LIMIT 1"; + + @Override + public FrameDetail findShortestFrame(JobInterface job) { + String pk_frame = getJdbcTemplate().queryForObject(FIND_SHORTEST_FRAME, String.class, + job.getJobId(), FrameState.SUCCEEDED.toString(), LayerType.RENDER.toString()); + return getFrameDetail(pk_frame); + } + + @Override + public FrameInterface getFrame(String id) { + return getJdbcTemplate().queryForObject(GET_MINIMAL_FRAME + " AND frame.pk_frame=?", + FRAME_MAPPER, id); + } + + @Override + public FrameInterface findFrame(JobInterface job, String name) { + // Uses C_FRAME_STR_NAME_UNQ + return getJdbcTemplate().queryForObject( + GET_MINIMAL_FRAME + " AND frame.str_name=? AND frame.pk_job=?", FRAME_MAPPER, name, + job.getJobId()); + } + + @Override + public void checkRetries(FrameInterface frame) { + int max_retries = getJdbcTemplate().queryForObject( + "SELECT int_max_retries FROM job WHERE pk_job=?", Integer.class, frame.getJobId()); + + if (getJdbcTemplate().queryForObject("SELECT int_retries FROM frame WHERE pk_frame=?", + Integer.class, frame.getFrameId()) >= max_retries) { + getJdbcTemplate().update("UPDATE frame SET str_state=? WHERE pk_frame=?", + FrameState.DEAD.toString(), frame.getFrameId()); + } + } + + private static final String UPDATE_FRAME_STATE = "UPDATE " + "frame " + "SET " + "str_state = ?, " + + "ts_updated = current_timestamp, " + "int_version = int_version + 1 " + "WHERE " + + "pk_frame = ? " + "AND " + "int_version = ? "; + + @Override + public boolean updateFrameState(FrameInterface frame, FrameState state) { + if (getJdbcTemplate().update(UPDATE_FRAME_STATE, state.toString(), frame.getFrameId(), + frame.getVersion()) == 1) { + logger.info("The frame " + frame + " state changed to " + state.toString()); + return true; + } + logger.info("Failed to change the frame " + frame + " state to " + state.toString()); + return false; + } + + private static final String MARK_AS_WAITING = "UPDATE " + "frame " + "SET " + "str_state=?, " + + "ts_updated = current_timestamp, " + "ts_llu = current_timestamp, " + + "int_depend_count = 0, " + "int_version = int_version + 1 " + "WHERE " + "pk_frame = ? " + + "AND " + "int_version = ? " + "AND " + "str_state = ? "; + + @Override + public void markFrameAsWaiting(FrameInterface frame) { + getJdbcTemplate().update(MARK_AS_WAITING, FrameState.WAITING.toString(), frame.getFrameId(), + frame.getVersion(), FrameState.DEPEND.toString()); + } + + private static final String MARK_AS_DEPEND = + "UPDATE " + "frame " + "SET " + "str_state=?, " + "int_depend_count = ?, " + + "ts_updated = current_timestamp, " + "int_version = int_version + 1 " + "WHERE " + + "pk_frame = ? " + "AND " + "int_version = ? " + "AND " + "str_state = ? "; + + private static final String GET_FRAME_DEPEND_COUNT = "SELECT " + "COUNT(1) " + "FROM " + "depend " + + "WHERE " + "( " + "(pk_job_depend_er = ? AND str_type LIKE 'JOB#_ON%' ESCAPE '#') " + "OR " + + "pk_layer_depend_er = ? " + "OR " + "pk_frame_depend_er = ? " + ") " + "AND " + + "depend.b_active = true " + "AND " + "depend.b_composite = false "; + + public void markFrameAsDepend(FrameInterface frame) { + // We need to full depend count in this case to reset the + // frames's depend count accurately. + int depend_count = getJdbcTemplate().queryForObject(GET_FRAME_DEPEND_COUNT, Integer.class, + frame.getJobId(), frame.getLayerId(), frame.getFrameId()); + + if (depend_count > 0) { + getJdbcTemplate().update(MARK_AS_DEPEND, FrameState.DEPEND.toString(), depend_count, + frame.getFrameId(), frame.getVersion(), FrameState.WAITING.toString()); + } + } + + private static final String FIND_HIGHEST_MEM_FRAME = + "SELECT " + "pk_frame " + "FROM " + "frame " + "WHERE " + "pk_job = ? " + "AND " + + "str_state = ? " + "ORDER BY " + "int_mem_max_used DESC " + "LIMIT 1"; + + @Override + public FrameDetail findHighestMemoryFrame(JobInterface job) { + String pk_frame = getJdbcTemplate().queryForObject(FIND_HIGHEST_MEM_FRAME, String.class, + job.getJobId(), FrameState.SUCCEEDED.toString()); + return getFrameDetail(pk_frame); + } + + private static final String FIND_LOWEST_MEM_FRAME = + "SELECT " + "pk_frame " + "FROM " + "frame " + "WHERE " + "pk_job = ? " + "AND " + + "str_state = ? " + "ORDER BY " + "int_mem_max_used ASC " + "LIMIT 1"; + + @Override + public FrameDetail findLowestMemoryFrame(JobInterface job) { + String pk_frame = getJdbcTemplate().queryForObject(FIND_LOWEST_MEM_FRAME, String.class, + job.getJobId(), FrameState.SUCCEEDED.toString()); + return getFrameDetail(pk_frame); + } + + @Override + public void reorderFramesFirst(LayerInterface layer, FrameSet frameSet) { + int start; + int size = frameSet.size(); + int min = getJdbcTemplate().queryForObject( + "SELECT MIN(int_dispatch_order) FROM frame WHERE pk_layer=?", Integer.class, + layer.getLayerId()); + + start = min - size; + for (int frameIdx = 0; frameIdx < size; frameIdx++) { + getJdbcTemplate().update( + "UPDATE frame SET int_dispatch_order=? WHERE str_name=? AND pk_job=?", start, + CueUtil.buildFrameName(layer, frameSet.get(frameIdx)), layer.getJobId()); + + logger.info( + "reordering " + CueUtil.buildFrameName(layer, frameSet.get(frameIdx)) + " to " + start); + start++; + } + } + + @Override + public void reorderFramesLast(LayerInterface layer, FrameSet frameSet) { + int start; + int size = frameSet.size(); + List frames = new ArrayList<>(size); + int max = getJdbcTemplate().queryForObject( + "SELECT MAX(int_dispatch_order) FROM frame WHERE pk_layer=?", Integer.class, + layer.getLayerId()); + + start = max + 1; + for (int i = 0; i <= size; i++) { + frames.add(new Object[] {start + i, CueUtil.buildFrameName(layer, i), layer.getJobId()}); + } + + if (frames.size() > 0) { + getJdbcTemplate().batchUpdate( + "UPDATE frame SET int_dispatch_order=? WHERE str_name=? AND pk_job=?", frames); + } + } + + @Override + public void reorderLayerReverse(LayerInterface layer, FrameSet frameSet) { + + int size = frameSet.size(); + List frames = new ArrayList<>(size); + + for (int i = 0; i < size; i++) { + if (i >= size - i - 1) { + break; + } + try { + int a = getJdbcTemplate().queryForObject( + "SELECT int_dispatch_order FROM frame WHERE str_name=? AND pk_job=? AND pk_layer=?", + Integer.class, CueUtil.buildFrameName(layer, frameSet.get(i)), layer.getJobId(), + layer.getLayerId()); + + int b = getJdbcTemplate().queryForObject( + "SELECT int_dispatch_order FROM frame WHERE str_name=? AND pk_job=? AND pk_layer=?", + Integer.class, CueUtil.buildFrameName(layer, frameSet.get(size - i - 1)), + layer.getJobId(), layer.getLayerId()); + + frames.add(new Object[] {a, layer.getLayerId(), + CueUtil.buildFrameName(layer, frameSet.get(size - i - 1))}); + frames.add( + new Object[] {b, layer.getLayerId(), CueUtil.buildFrameName(layer, frameSet.get(i))}); + + } catch (Exception e) { + logger.info("frame not found while attempting to reverse layer, skipping"); + } + } + + if (frames.size() > 0) { + getJdbcTemplate().batchUpdate( + "UPDATE frame SET int_dispatch_order=? WHERE pk_layer=? and str_name=?", frames); + } + } + + @Override + public void staggerLayer(LayerInterface layer, String frameRange, int stagger) { + + /* + * If the layer is only 1 frame we don't stagger it. + */ + if (getJdbcTemplate().queryForObject("SELECT int_total_count FROM layer_stat WHERE pk_layer=?", + Integer.class, layer.getLayerId()) == 1) { + return; } - @Override - public void reorderFramesFirst(LayerInterface layer, FrameSet frameSet) { - int start; - int size = frameSet.size(); - int min = getJdbcTemplate().queryForObject( - "SELECT MIN(int_dispatch_order) FROM frame WHERE pk_layer=?", Integer.class, - layer.getLayerId()); - - start = min - size; - for (int frameIdx=0; frameIdx < size; frameIdx++) { - getJdbcTemplate().update( - "UPDATE frame SET int_dispatch_order=? WHERE str_name=? AND pk_job=?", - start, CueUtil.buildFrameName(layer, frameSet.get(frameIdx)), layer.getJobId()); - - logger.info("reordering " + CueUtil.buildFrameName(layer, frameSet.get(frameIdx)) + " to " + - start); - start++; - } - } + logger.info("staggering: " + layer.getName() + " range: " + frameRange + " on " + stagger); - @Override - public void reorderFramesLast(LayerInterface layer, FrameSet frameSet) { - int start; - int size = frameSet.size(); - List frames = new ArrayList<>(size); - int max = getJdbcTemplate().queryForObject( - "SELECT MAX(int_dispatch_order) FROM frame WHERE pk_layer=?", Integer.class, - layer.getLayerId()); - - start = max + 1; - for (int i=0; i <= size; i++) { - frames.add(new Object[] { start + i, CueUtil.buildFrameName(layer, i), layer.getJobId() }); - } + FrameSet frameSet = null; + FrameSet range = null; - if (frames.size() > 0) { - getJdbcTemplate().batchUpdate( - "UPDATE frame SET int_dispatch_order=? WHERE str_name=? AND pk_job=?", frames); - } + try { + frameSet = new FrameSet(frameRange + ":" + stagger); + range = new FrameSet(frameRange); + } catch (Exception e) { + logger.warn("failed to stagger layer: " + layer.getName() + ", " + e); + return; } - @Override - public void reorderLayerReverse(LayerInterface layer, FrameSet frameSet) { - - int size = frameSet.size(); - List frames = new ArrayList<>(size); - - for (int i=0; i< size; i++) { - if (i >= size - i -1) { break; } - try { - int a = getJdbcTemplate().queryForObject( - "SELECT int_dispatch_order FROM frame WHERE str_name=? AND pk_job=? AND pk_layer=?", Integer.class, - CueUtil.buildFrameName(layer,frameSet.get(i)), layer.getJobId(), layer.getLayerId()); - - int b = getJdbcTemplate().queryForObject( - "SELECT int_dispatch_order FROM frame WHERE str_name=? AND pk_job=? AND pk_layer=?", Integer.class, - CueUtil.buildFrameName(layer,frameSet.get(size-i-1)), layer.getJobId(), layer.getLayerId()); + /* + * Find the dispatch order of the first frame we're working with and base our other staggers of + * this value. + */ + int first = getJdbcTemplate().queryForObject( + "SELECT int_dispatch_order FROM frame WHERE str_name=? AND pk_job=? AND pk_layer=?", + Integer.class, CueUtil.buildFrameName(layer, range.get(0)), layer.getJobId(), + layer.getLayerId()); - frames.add(new Object[] { a, layer.getLayerId(), CueUtil.buildFrameName(layer,frameSet.get(size-i-1)) }); - frames.add(new Object[] { b, layer.getLayerId(), CueUtil.buildFrameName(layer,frameSet.get(i)) }); + int size = range.size(); + for (int i = 0; i < size; i++) { + int frame = range.get(i); + int newDispatchOrder = frameSet.index(frame) + first; - } catch (Exception e) { - logger.info("frame not found while attempting to reverse layer, skipping"); - } - } - - if (frames.size() > 0) { - getJdbcTemplate().batchUpdate( - "UPDATE frame SET int_dispatch_order=? WHERE pk_layer=? and str_name=?", frames); - } + getJdbcTemplate().update( + "UPDATE frame SET int_dispatch_order=? WHERE pk_layer=? and str_name=?", newDispatchOrder, + layer.getLayerId(), CueUtil.buildFrameName(layer, frame)); } + } - @Override - public void staggerLayer(LayerInterface layer, String frameRange, int stagger) { - - /* - * If the layer is only 1 frame we don't stagger it. - */ - if (getJdbcTemplate().queryForObject( - "SELECT int_total_count FROM layer_stat WHERE pk_layer=?", Integer.class, - layer.getLayerId()) == 1) { - return; - } - - logger.info("staggering: " + layer.getName() + " range: " + frameRange - + " on " + stagger); + @Override + public boolean isFrameComplete(FrameInterface f) { - FrameSet frameSet = null; - FrameSet range = null; - - try { - frameSet = new FrameSet(frameRange + ":" + stagger); - range = new FrameSet(frameRange); - } catch (Exception e) { - logger.warn("failed to stagger layer: " + layer.getName() + ", " + e); - return; - } + String state = getJdbcTemplate().queryForObject("SELECT str_state FROM frame WHERE pk_frame=?", + String.class, f.getFrameId()); - /* - * Find the dispatch order of the first frame we're working with and base - * our other staggers of this value. - */ - int first = getJdbcTemplate().queryForObject( - "SELECT int_dispatch_order FROM frame WHERE str_name=? AND pk_job=? AND pk_layer=?", Integer.class, - CueUtil.buildFrameName(layer, range.get(0)), layer.getJobId(), layer.getLayerId()); - - int size = range.size(); - for (int i=0; i < size; i++) { - int frame = range.get(i); - int newDispatchOrder = frameSet.index(frame) + first; - - getJdbcTemplate().update( - "UPDATE frame SET int_dispatch_order=? WHERE pk_layer=? and str_name=?", - newDispatchOrder, layer.getLayerId(), CueUtil.buildFrameName(layer, frame)); - } + if (state.equals(FrameState.SUCCEEDED.toString()) + || state.equals(FrameState.EATEN.toString())) { + return true; } - @Override - public boolean isFrameComplete(FrameInterface f) { - - String state = getJdbcTemplate().queryForObject( - "SELECT str_state FROM frame WHERE pk_frame=?", - String.class, f.getFrameId()); + return false; + } - if (state.equals(FrameState.SUCCEEDED.toString()) || - state.equals(FrameState.EATEN.toString())) { - return true; + private static final RowMapper RESOURCE_USAGE_MAPPER = + new RowMapper() { + public ResourceUsage mapRow(ResultSet rs, int rowNum) throws SQLException { + return new ResourceUsage(rs.getLong("int_clock_time"), rs.getInt("int_cores"), + rs.getInt("int_gpus")); } - return false; - } + }; - private static final - RowMapper RESOURCE_USAGE_MAPPER = - new RowMapper() { - public ResourceUsage mapRow(ResultSet rs, - int rowNum) throws SQLException { - return new ResourceUsage( - rs.getLong("int_clock_time"), - rs.getInt("int_cores"), - rs.getInt("int_gpus")); - } - - }; - - @Override - public ResourceUsage getResourceUsage(FrameInterface f) { - /* - * Using current_timestamp = ts_started here because ts_stopped is not set. - * Stopping the frame allows it to be dispatched again, which could - * blow away the ts_stopped time. - */ - return getJdbcTemplate().queryForObject( - "SELECT " + - "COALESCE(interval_to_seconds(current_timestamp - ts_started), 1) " + - "AS int_clock_time, " + - "COALESCE(int_cores, 100) AS int_cores," + - "int_gpus " + - "FROM " + - "frame " + - "WHERE " + - "pk_frame = ?", RESOURCE_USAGE_MAPPER, f.getFrameId()); - } - - private static final String UPDATE_FRAME_MEMORY_USAGE_AND_LLU_TIME = - "UPDATE " + - "frame " + - "SET " + - "ts_updated = current_timestamp," + - "int_mem_max_used = ?," + - "int_mem_used = ?," + - "ts_llu = ? " + - "WHERE " + - "pk_frame = ? "; - - @Override - public void updateFrameMemoryUsageAndLluTime(FrameInterface f, long maxRss, long rss, - long lluTime) { - getJdbcTemplate().update(UPDATE_FRAME_MEMORY_USAGE_AND_LLU_TIME, - maxRss, rss, new Timestamp(lluTime * 1000l), f.getFrameId()); - } - - /** - * Attempt a SELECT FOR UPDATE NOWAIT on the frame record. If - * the frame is being modified by another transaction or if - * the version has been incremented a FrameReservationException - * is thrown. - * - * @param frame - * @param state + @Override + public ResourceUsage getResourceUsage(FrameInterface f) { + /* + * Using current_timestamp = ts_started here because ts_stopped is not set. Stopping the frame + * allows it to be dispatched again, which could blow away the ts_stopped time. */ - @Override - public void lockFrameForUpdate(FrameInterface frame, FrameState state) { - try { - getJdbcTemplate().queryForObject( - "SELECT pk_frame FROM frame WHERE pk_frame=? AND " + - "str_state=? AND int_version =? FOR UPDATE NOWAIT", - String.class, frame.getFrameId(), - state.toString(), frame.getVersion()); - } catch (Exception e) { - String error_msg = "the frame " + - frame + " was updated by another thread."; - throw new FrameReservationException(error_msg, e); - } - } - - @Override - public boolean updateFrameCheckpointState(FrameInterface frame, CheckpointState state) { - - logger.info("Setting checkpoint state to: " + state.toString()); - - boolean result = false; - - if (state.equals(CheckpointState.COMPLETE)) { - /* - * Only update the checkpoint state to complete if the state - * is either Copying or Enabled. - */ - result = getJdbcTemplate().update( - "UPDATE frame SET str_checkpoint_state=?, " + - "int_checkpoint_count=int_checkpoint_count + 1 WHERE " + - "pk_frame=? AND str_checkpoint_state IN (?, ?)", - CheckpointState.COMPLETE.toString(), - frame.getFrameId(), - CheckpointState.COPYING.toString(), - CheckpointState.ENABLED.toString()) == 1; - } - else { - result = getJdbcTemplate().update( - "UPDATE frame SET str_checkpoint_state=? WHERE pk_frame=?", - state.toString(), frame.getFrameId()) == 1; - } - - /* - * If the checkpoint state is complete or disabled then set the frame - * state back to waiting, if and only if the frame state is currently - * in the checkpoint state. - */ - if ((state.equals(CheckpointState.DISABLED)) || - state.equals(CheckpointState.COMPLETE) && result) { - getJdbcTemplate().update( - "UPDATE frame SET str_state=? WHERE pk_frame=? AND str_state=?", - FrameState.WAITING.toString(), frame.getFrameId(), - FrameState.CHECKPOINT.toString()); + return getJdbcTemplate().queryForObject( + "SELECT " + "COALESCE(interval_to_seconds(current_timestamp - ts_started), 1) " + + "AS int_clock_time, " + "COALESCE(int_cores, 100) AS int_cores," + "int_gpus " + + "FROM " + "frame " + "WHERE " + "pk_frame = ?", + RESOURCE_USAGE_MAPPER, f.getFrameId()); + } + + private static final String UPDATE_FRAME_MEMORY_USAGE_AND_LLU_TIME = + "UPDATE " + "frame " + "SET " + "ts_updated = current_timestamp," + "int_mem_max_used = ?," + + "int_mem_used = ?," + "ts_llu = ? " + "WHERE " + "pk_frame = ? "; + + @Override + public void updateFrameMemoryUsageAndLluTime(FrameInterface f, long maxRss, long rss, + long lluTime) { + getJdbcTemplate().update(UPDATE_FRAME_MEMORY_USAGE_AND_LLU_TIME, maxRss, rss, + new Timestamp(lluTime * 1000l), f.getFrameId()); + } + + /** + * Attempt a SELECT FOR UPDATE NOWAIT on the frame record. If the frame is being modified by + * another transaction or if the version has been incremented a FrameReservationException is + * thrown. + * + * @param frame + * @param state + */ + @Override + public void lockFrameForUpdate(FrameInterface frame, FrameState state) { + try { + getJdbcTemplate().queryForObject( + "SELECT pk_frame FROM frame WHERE pk_frame=? AND " + + "str_state=? AND int_version =? FOR UPDATE NOWAIT", + String.class, frame.getFrameId(), state.toString(), frame.getVersion()); + } catch (Exception e) { + String error_msg = "the frame " + frame + " was updated by another thread."; + throw new FrameReservationException(error_msg, e); + } + } + + @Override + public boolean updateFrameCheckpointState(FrameInterface frame, CheckpointState state) { + + logger.info("Setting checkpoint state to: " + state.toString()); + + boolean result = false; + + if (state.equals(CheckpointState.COMPLETE)) { + /* + * Only update the checkpoint state to complete if the state is either Copying or Enabled. + */ + result = getJdbcTemplate().update( + "UPDATE frame SET str_checkpoint_state=?, " + + "int_checkpoint_count=int_checkpoint_count + 1 WHERE " + + "pk_frame=? AND str_checkpoint_state IN (?, ?)", + CheckpointState.COMPLETE.toString(), frame.getFrameId(), + CheckpointState.COPYING.toString(), CheckpointState.ENABLED.toString()) == 1; + } else { + result = getJdbcTemplate().update("UPDATE frame SET str_checkpoint_state=? WHERE pk_frame=?", + state.toString(), frame.getFrameId()) == 1; + } + + /* + * If the checkpoint state is complete or disabled then set the frame state back to waiting, if + * and only if the frame state is currently in the checkpoint state. + */ + if ((state.equals(CheckpointState.DISABLED)) + || state.equals(CheckpointState.COMPLETE) && result) { + getJdbcTemplate().update("UPDATE frame SET str_state=? WHERE pk_frame=? AND str_state=?", + FrameState.WAITING.toString(), frame.getFrameId(), FrameState.CHECKPOINT.toString()); + } + + return result; + } + + @Override + public List getStaleCheckpoints(int cutoffTimeSec) { + return getJdbcTemplate().query( + GET_MINIMAL_FRAME + " AND job.str_state=? " + " AND frame.str_state=? " + + " AND current_timestamp - frame.ts_stopped > interval '" + cutoffTimeSec + "' second", + FRAME_MAPPER, JobState.PENDING.toString(), FrameState.CHECKPOINT.toString()); + } + + private static final String CREATE_FRAME_STATE_OVERRIDE = + "INSERT INTO frame_state_display_overrides (" + "pk_frame_override," + "pk_frame," + + "str_frame_state," + "str_override_text," + "str_rgb" + ") " + "VALUES (?,?,?,?,?)"; + + @Override + public void setFrameStateDisplayOverride(String frameId, FrameStateDisplayOverride override) { + getJdbcTemplate().update(CREATE_FRAME_STATE_OVERRIDE, SqlUtil.genKeyRandom(), frameId, + override.getState().toString(), override.getText(), + Integer.toString(override.getColor().getRed()) + "," + + Integer.toString(override.getColor().getGreen()) + "," + + Integer.toString(override.getColor().getBlue())); + } + + private static final String GET_FRAME_STATE_OVERRIDE = + "SELECT * from frame_state_display_overrides WHERE pk_frame = ?"; + + private static final RowMapper OVERRIDE_MAPPER = + new RowMapper() { + public FrameStateDisplayOverride mapRow(ResultSet rs, int rowNum) throws SQLException { + String[] rgb = rs.getString("str_rgb").split(","); + return FrameStateDisplayOverride.newBuilder() + .setState(FrameState.valueOf(rs.getString("str_frame_state"))) + .setText(rs.getString("str_override_text")) + .setColor(FrameStateDisplayOverride.RGB.newBuilder().setRed(Integer.parseInt(rgb[0])) + .setGreen(Integer.parseInt(rgb[1])).setBlue(Integer.parseInt(rgb[2])).build()) + .build(); } - - return result; - } - - @Override - public List getStaleCheckpoints(int cutoffTimeSec) { - return getJdbcTemplate().query( - GET_MINIMAL_FRAME + - " AND job.str_state=? " + - " AND frame.str_state=? " + - " AND current_timestamp - frame.ts_stopped > interval '" + cutoffTimeSec + "' second", - FRAME_MAPPER, - JobState.PENDING.toString(), - FrameState.CHECKPOINT.toString()); - } - - private static final String CREATE_FRAME_STATE_OVERRIDE = - "INSERT INTO frame_state_display_overrides (" + - "pk_frame_override," + - "pk_frame," + - "str_frame_state," + - "str_override_text," + - "str_rgb" + - ") " + - "VALUES (?,?,?,?,?)"; - - @Override - public void setFrameStateDisplayOverride(String frameId, - FrameStateDisplayOverride override) { - getJdbcTemplate().update(CREATE_FRAME_STATE_OVERRIDE, - SqlUtil.genKeyRandom(), - frameId, - override.getState().toString(), - override.getText(), - Integer.toString(override.getColor().getRed()) + "," - + Integer.toString(override.getColor().getGreen()) + "," - + Integer.toString(override.getColor().getBlue()) - ); - } - - private static final String GET_FRAME_STATE_OVERRIDE = - "SELECT * from frame_state_display_overrides WHERE pk_frame = ?"; - - private static final RowMapper OVERRIDE_MAPPER = - new RowMapper() { - public FrameStateDisplayOverride mapRow(ResultSet rs, - int rowNum) throws SQLException { - String[] rgb = rs.getString("str_rgb").split(","); - return FrameStateDisplayOverride.newBuilder() - .setState(FrameState.valueOf(rs.getString("str_frame_state"))) - .setText(rs.getString("str_override_text")) - .setColor(FrameStateDisplayOverride.RGB.newBuilder() - .setRed(Integer.parseInt(rgb[0])) - .setGreen(Integer.parseInt(rgb[1])) - .setBlue(Integer.parseInt(rgb[2])) - .build()) - .build(); - } - }; - - @Override - public FrameStateDisplayOverrideSeq getFrameStateDisplayOverrides(String frameId) { - List overrides = getJdbcTemplate().query( - GET_FRAME_STATE_OVERRIDE, OVERRIDE_MAPPER, frameId); - return FrameStateDisplayOverrideSeq.newBuilder() - .addAllOverrides(overrides) - .build(); - } - - private static final String UPDATE_FRAME_STATE_OVERRIDE = - "UPDATE " + - "frame_state_display_overrides " + - "SET " + - "str_override_text = ?," + - "str_rgb = ? " + - "WHERE " + - "pk_frame = ? " + - "AND " + - "str_frame_state = ?"; - - @Override - public void updateFrameStateDisplayOverride(String frameId, - FrameStateDisplayOverride override) { - getJdbcTemplate().update(UPDATE_FRAME_STATE_OVERRIDE, - override.getText(), - Integer.toString(override.getColor().getRed()) + "," - + Integer.toString(override.getColor().getGreen()) + "," - + Integer.toString(override.getColor().getBlue()), - frameId, - override.getState().toString()); - } -} \ No newline at end of file + }; + + @Override + public FrameStateDisplayOverrideSeq getFrameStateDisplayOverrides(String frameId) { + List overrides = + getJdbcTemplate().query(GET_FRAME_STATE_OVERRIDE, OVERRIDE_MAPPER, frameId); + return FrameStateDisplayOverrideSeq.newBuilder().addAllOverrides(overrides).build(); + } + + private static final String UPDATE_FRAME_STATE_OVERRIDE = + "UPDATE " + "frame_state_display_overrides " + "SET " + "str_override_text = ?," + + "str_rgb = ? " + "WHERE " + "pk_frame = ? " + "AND " + "str_frame_state = ?"; + + @Override + public void updateFrameStateDisplayOverride(String frameId, FrameStateDisplayOverride override) { + getJdbcTemplate().update(UPDATE_FRAME_STATE_OVERRIDE, override.getText(), + Integer.toString(override.getColor().getRed()) + "," + + Integer.toString(override.getColor().getGreen()) + "," + + Integer.toString(override.getColor().getBlue()), + frameId, override.getState().toString()); + } +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/GroupDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/GroupDaoJdbc.java index 948020f9f..7b40cb7a7 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/GroupDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/GroupDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.CallableStatement; @@ -46,432 +42,370 @@ public class GroupDaoJdbc extends JdbcDaoSupport implements GroupDao { - private static final int MAX_NESTING_LEVEL = 10; - - @Override - public String getRootGroupId(ShowInterface show) { - return getJdbcTemplate().queryForObject( - "SELECT pk_folder FROM folder WHERE pk_show=? AND pk_parent_folder IS NULL", - String.class, show.getShowId()); - } + private static final int MAX_NESTING_LEVEL = 10; - @Override - public void deleteGroup(GroupInterface group) { + @Override + public String getRootGroupId(ShowInterface show) { + return getJdbcTemplate().queryForObject( + "SELECT pk_folder FROM folder WHERE pk_show=? AND pk_parent_folder IS NULL", String.class, + show.getShowId()); + } - if (childGroupCount(group) > 0) { - throw new EntityRemovalError("failed to delete group " + group.getName() + - ", still has sub groups"); - } - - if (childJobCount(group) > 0) { - throw new EntityRemovalError("failed to delete group " + group.getName() + - ", still has sub jobs"); - } + @Override + public void deleteGroup(GroupInterface group) { - // reparent all jobs to root group - getJdbcTemplate().update( - "UPDATE job SET pk_folder=? WHERE pk_folder=?", - getRootGroupId(group), group.getId()); - - getJdbcTemplate().update( - "DELETE FROM folder WHERE pk_parent_folder IS NOT NULL AND pk_folder=?", group.getId()); + if (childGroupCount(group) > 0) { + throw new EntityRemovalError( + "failed to delete group " + group.getName() + ", still has sub groups"); } - public static final String INSERT_GROUP = - "INSERT INTO " + - "folder " + - "( " + - "pk_folder," + - "pk_parent_folder,"+ - "pk_show, " + - "pk_dept,"+ - "str_name " + - ") " + - "VALUES (?,?,?,?,?)"; - - @Override - public void insertGroup(GroupDetail group) { - group.id = SqlUtil.genKeyRandom(); - String parentId = group.parentId; - try { - getJdbcTemplate().update(INSERT_GROUP, - group.id, parentId, group.showId, group.deptId, group.name); - } catch (Exception e) { - throw new EntityCreationError("error creating group, " + e); - } + if (childJobCount(group) > 0) { + throw new EntityRemovalError( + "failed to delete group " + group.getName() + ", still has sub jobs"); } - @Override - public void insertGroup(GroupDetail group, GroupInterface parent) { - if (parent != null) { - group.parentId = parent.getGroupId(); - } - insertGroup(group); + // reparent all jobs to root group + getJdbcTemplate().update("UPDATE job SET pk_folder=? WHERE pk_folder=?", getRootGroupId(group), + group.getId()); + + getJdbcTemplate().update( + "DELETE FROM folder WHERE pk_parent_folder IS NOT NULL AND pk_folder=?", group.getId()); + } + + public static final String INSERT_GROUP = "INSERT INTO " + "folder " + "( " + "pk_folder," + + "pk_parent_folder," + "pk_show, " + "pk_dept," + "str_name " + ") " + "VALUES (?,?,?,?,?)"; + + @Override + public void insertGroup(GroupDetail group) { + group.id = SqlUtil.genKeyRandom(); + String parentId = group.parentId; + try { + getJdbcTemplate().update(INSERT_GROUP, group.id, parentId, group.showId, group.deptId, + group.name); + } catch (Exception e) { + throw new EntityCreationError("error creating group, " + e); } + } - @Override - public void updateGroupParent(GroupInterface group, GroupInterface dest) { - - if (group.getGroupId().equals(dest.getGroupId())) { - throw new EntityModificationError("error moving group, " + - "cannot move group into itself"); - } - - if (!group.getShowId().equals(dest.getShowId())) { - throw new EntityModificationError("error moving group, " + - "cannot move groups between shows"); - } - - int recurse = 0; - String destParent = dest.getGroupId(); - while (true) { - destParent = getJdbcTemplate().queryForObject( - "SELECT pk_parent_folder FROM folder WHERE pk_folder=?", - String.class, destParent); - if (destParent == null) { break; } - if (destParent.equals(group.getGroupId())) { - throw new EntityModificationError("error moving group, you cannot move a group " + - "into one of its sub groups"); - } - recurse++; - if (recurse > MAX_NESTING_LEVEL) { - throw new EntityModificationError("error moving group, cannot tell " + - "if your moving a group into one of its sub groups"); - } - } - - int result = getJdbcTemplate().update( - "UPDATE folder SET pk_parent_folder=? WHERE pk_folder=? AND pk_parent_folder IS NOT NULL", - dest.getId(), group.getId()); - - recurseParentChange(group.getId(), dest.getId()); - if (result == 0) { - throw new EntityModificationError("error moving group, " - + group.getName() + ", the group does not exist or its the top level group"); - } + @Override + public void insertGroup(GroupDetail group, GroupInterface parent) { + if (parent != null) { + group.parentId = parent.getGroupId(); } + insertGroup(group); + } - @Override - public void updateName(GroupInterface group, String value) { - getJdbcTemplate().update( - "UPDATE folder SET str_name=? WHERE pk_folder=?", - value, group.getId()); - } + @Override + public void updateGroupParent(GroupInterface group, GroupInterface dest) { - @Override - public void updateDepartment(GroupInterface group, DepartmentInterface dept) { - getJdbcTemplate().update( - "UPDATE folder SET pk_dept=? WHERE pk_folder=?", - dept.getDepartmentId(), group.getId()); + if (group.getGroupId().equals(dest.getGroupId())) { + throw new EntityModificationError("error moving group, " + "cannot move group into itself"); } - @Override - public void updateDefaultJobMaxCores(GroupInterface group, int value) { - if (value <= 0) { value = CueUtil.FEATURE_DISABLED; } - if (value < CueUtil.ONE_CORE && value != CueUtil.FEATURE_DISABLED) { - String msg = "The default max cores for a job must " + - "be greater than a single core"; - throw new IllegalArgumentException(msg); - } - getJdbcTemplate().update( - "UPDATE folder SET int_job_max_cores=? WHERE pk_folder=?", - value, group.getId()); + if (!group.getShowId().equals(dest.getShowId())) { + throw new EntityModificationError( + "error moving group, " + "cannot move groups between shows"); } - @Override - public void updateDefaultJobMinCores(GroupInterface group, int value) { - if (value <= 0) { value = CueUtil.FEATURE_DISABLED; } - if (value < CueUtil.ONE_CORE && value != CueUtil.FEATURE_DISABLED) { - String msg = "The default min cores for a job must " + - "be greater than a single core"; - throw new IllegalArgumentException(msg); - } - getJdbcTemplate().update( - "UPDATE folder SET int_job_min_cores=? WHERE pk_folder=?", - value, group.getId()); + int recurse = 0; + String destParent = dest.getGroupId(); + while (true) { + destParent = getJdbcTemplate().queryForObject( + "SELECT pk_parent_folder FROM folder WHERE pk_folder=?", String.class, destParent); + if (destParent == null) { + break; + } + if (destParent.equals(group.getGroupId())) { + throw new EntityModificationError( + "error moving group, you cannot move a group " + "into one of its sub groups"); + } + recurse++; + if (recurse > MAX_NESTING_LEVEL) { + throw new EntityModificationError("error moving group, cannot tell " + + "if your moving a group into one of its sub groups"); + } } - @Override - public void updateMaxCores(GroupInterface group, int value) { - if (value < 0) { value = CueUtil.FEATURE_DISABLED; } - if (value < CueUtil.ONE_CORE && value != CueUtil.FEATURE_DISABLED) { - String msg = "The group max cores feature must " + - "be a whole core or greater, pass in: " + value; - throw new IllegalArgumentException(msg); - } + int result = getJdbcTemplate().update( + "UPDATE folder SET pk_parent_folder=? WHERE pk_folder=? AND pk_parent_folder IS NOT NULL", + dest.getId(), group.getId()); - getJdbcTemplate().update( - "UPDATE folder_resource SET int_max_cores=? WHERE pk_folder=?", - value, group.getId()); + recurseParentChange(group.getId(), dest.getId()); + if (result == 0) { + throw new EntityModificationError("error moving group, " + group.getName() + + ", the group does not exist or its the top level group"); } - - @Override - public void updateMinCores(GroupInterface group, int value) { - if (value < 0) { value = 0; } - getJdbcTemplate().update( - "UPDATE folder_resource SET int_min_cores=? WHERE pk_folder=?", - value, group.getId()); + } + + @Override + public void updateName(GroupInterface group, String value) { + getJdbcTemplate().update("UPDATE folder SET str_name=? WHERE pk_folder=?", value, + group.getId()); + } + + @Override + public void updateDepartment(GroupInterface group, DepartmentInterface dept) { + getJdbcTemplate().update("UPDATE folder SET pk_dept=? WHERE pk_folder=?", + dept.getDepartmentId(), group.getId()); + } + + @Override + public void updateDefaultJobMaxCores(GroupInterface group, int value) { + if (value <= 0) { + value = CueUtil.FEATURE_DISABLED; } - - private static final String IS_OVER_MIN_CORES = - "SELECT " + - "COUNT(1) " + - "FROM " + - "job,"+ - "folder_resource fr "+ - "WHERE " + - "job.pk_folder = fr.pk_folder " + - "AND " + - "fr.int_cores > fr.int_min_cores " + - "AND "+ - "job.pk_job = ?"; - - @Override - public boolean isOverMinCores(JobInterface job) { - return getJdbcTemplate().queryForObject(IS_OVER_MIN_CORES, - Integer.class, job.getJobId()) > 0; + if (value < CueUtil.ONE_CORE && value != CueUtil.FEATURE_DISABLED) { + String msg = "The default max cores for a job must " + "be greater than a single core"; + throw new IllegalArgumentException(msg); } - - @Override - public void updateDefaultJobMaxGpus(GroupInterface group, int value) { - if (value <= 0) { value = CueUtil.FEATURE_DISABLED; } - getJdbcTemplate().update( - "UPDATE folder SET int_job_max_gpus=? WHERE pk_folder=?", - value, group.getId()); + getJdbcTemplate().update("UPDATE folder SET int_job_max_cores=? WHERE pk_folder=?", value, + group.getId()); + } + + @Override + public void updateDefaultJobMinCores(GroupInterface group, int value) { + if (value <= 0) { + value = CueUtil.FEATURE_DISABLED; } - - @Override - public void updateDefaultJobMinGpus(GroupInterface group, int value) { - if (value <= 0) { value = CueUtil.FEATURE_DISABLED; } - getJdbcTemplate().update( - "UPDATE folder SET int_job_min_gpus=? WHERE pk_folder=?", - value, group.getId()); + if (value < CueUtil.ONE_CORE && value != CueUtil.FEATURE_DISABLED) { + String msg = "The default min cores for a job must " + "be greater than a single core"; + throw new IllegalArgumentException(msg); } - - @Override - public void updateMaxGpus(GroupInterface group, int value) { - if (value < 0) { value = CueUtil.FEATURE_DISABLED; } - - getJdbcTemplate().update( - "UPDATE folder_resource SET int_max_gpus=? WHERE pk_folder=?", - value, group.getId()); + getJdbcTemplate().update("UPDATE folder SET int_job_min_cores=? WHERE pk_folder=?", value, + group.getId()); + } + + @Override + public void updateMaxCores(GroupInterface group, int value) { + if (value < 0) { + value = CueUtil.FEATURE_DISABLED; } - - @Override - public void updateMinGpus(GroupInterface group, int value) { - if (value < 0) { value = 0; } - getJdbcTemplate().update( - "UPDATE folder_resource SET int_min_gpus=? WHERE pk_folder=?", - value, group.getId()); + if (value < CueUtil.ONE_CORE && value != CueUtil.FEATURE_DISABLED) { + String msg = + "The group max cores feature must " + "be a whole core or greater, pass in: " + value; + throw new IllegalArgumentException(msg); } - @Override - public void updateDefaultJobPriority(GroupInterface group, int value) { - if (value < 0) { value = CueUtil.FEATURE_DISABLED; } - getJdbcTemplate().update( - "UPDATE folder SET int_job_priority=? WHERE pk_folder=?", - value, group.getId()); - if (value != CueUtil.FEATURE_DISABLED) { - getJdbcTemplate().update( - "UPDATE job_resource SET int_priority=? WHERE pk_job IN (" + - "SELECT pk_job from job WHERE pk_folder=?)", - value, group.getId()); - } - } + getJdbcTemplate().update("UPDATE folder_resource SET int_max_cores=? WHERE pk_folder=?", value, + group.getId()); + } - private static final String GET_GROUP_DETAIL = - "SELECT " + - "folder.pk_folder, " + - "folder.int_job_max_cores,"+ - "folder.int_job_min_cores,"+ - "folder.int_job_max_gpus,"+ - "folder.int_job_min_gpus,"+ - "folder.int_job_priority,"+ - "folder.str_name,"+ - "folder.pk_parent_folder,"+ - "folder.pk_show,"+ - "folder.pk_dept,"+ - "folder_level.int_level, " + - "folder_resource.int_min_cores,"+ - "folder_resource.int_max_cores," + - "folder_resource.int_min_gpus,"+ - "folder_resource.int_max_gpus " + - "FROM " + - "folder, "+ - "folder_level, " + - "folder_resource " + - "WHERE " + - "folder.pk_folder = folder_level.pk_folder " + - "AND " + - "folder.pk_folder = folder_resource.pk_folder"; - - private static final String GET_GROUP_DETAIL_BY_JOB = - "SELECT " + - "folder.pk_folder, " + - "folder.int_job_max_cores,"+ - "folder.int_job_min_cores,"+ - "folder.int_job_max_gpus,"+ - "folder.int_job_min_gpus,"+ - "folder.int_job_priority,"+ - "folder.str_name,"+ - "folder.pk_parent_folder,"+ - "folder.pk_show,"+ - "folder.pk_dept,"+ - "folder_level.int_level, " + - "folder_resource.int_min_cores,"+ - "folder_resource.int_max_cores," + - "folder_resource.int_min_gpus,"+ - "folder_resource.int_max_gpus " + - "FROM " + - "folder, "+ - "folder_level, " + - "folder_resource, " + - "job "+ - "WHERE " + - "folder.pk_folder = folder_level.pk_folder " + - "AND " + - "folder.pk_folder = folder_resource.pk_folder " + - "AND " + - "job.pk_folder = folder.pk_folder " + - "AND " + - "job.pk_job = ?"; - - @Override - public GroupDetail getGroupDetail(String id) { - return getJdbcTemplate().queryForObject( - GET_GROUP_DETAIL + " AND folder.pk_folder=?", GROUP_DETAIL_MAPPER, id); + @Override + public void updateMinCores(GroupInterface group, int value) { + if (value < 0) { + value = 0; } - - @Override - public GroupDetail getGroupDetail(JobInterface job) { - return getJdbcTemplate().queryForObject(GET_GROUP_DETAIL_BY_JOB, - GROUP_DETAIL_MAPPER, job.getId()); + getJdbcTemplate().update("UPDATE folder_resource SET int_min_cores=? WHERE pk_folder=?", value, + group.getId()); + } + + private static final String IS_OVER_MIN_CORES = "SELECT " + "COUNT(1) " + "FROM " + "job," + + "folder_resource fr " + "WHERE " + "job.pk_folder = fr.pk_folder " + "AND " + + "fr.int_cores > fr.int_min_cores " + "AND " + "job.pk_job = ?"; + + @Override + public boolean isOverMinCores(JobInterface job) { + return getJdbcTemplate().queryForObject(IS_OVER_MIN_CORES, Integer.class, job.getJobId()) > 0; + } + + @Override + public void updateDefaultJobMaxGpus(GroupInterface group, int value) { + if (value <= 0) { + value = CueUtil.FEATURE_DISABLED; } - - @Override - public GroupDetail getRootGroupDetail(ShowInterface show) { - return getJdbcTemplate().queryForObject( - GET_GROUP_DETAIL + " AND folder.pk_show=? AND pk_parent_folder IS NULL", - GROUP_DETAIL_MAPPER, show.getShowId()); + getJdbcTemplate().update("UPDATE folder SET int_job_max_gpus=? WHERE pk_folder=?", value, + group.getId()); + } + + @Override + public void updateDefaultJobMinGpus(GroupInterface group, int value) { + if (value <= 0) { + value = CueUtil.FEATURE_DISABLED; } - - @Override - public GroupInterface getGroup(String id) { - return getJdbcTemplate().queryForObject( - "SELECT pk_show, pk_folder,str_name FROM folder WHERE pk_folder=?", - GROUP_MAPPER, id); + getJdbcTemplate().update("UPDATE folder SET int_job_min_gpus=? WHERE pk_folder=?", value, + group.getId()); + } + + @Override + public void updateMaxGpus(GroupInterface group, int value) { + if (value < 0) { + value = CueUtil.FEATURE_DISABLED; } - @Override - public List getGroups(List idl) { - return getJdbcTemplate().query( - "SELECT pk_show, pk_folder, str_name FROM folder WHERE " + - SqlUtil.buildBindVariableArray("pk_folder", idl.size()), - GROUP_MAPPER, idl.toArray()); - } + getJdbcTemplate().update("UPDATE folder_resource SET int_max_gpus=? WHERE pk_folder=?", value, + group.getId()); + } - @Override - public List getChildrenRecursive(GroupInterface group) { - List groups = new ArrayList(32); - GroupInterface current = group; - for (GroupInterface g: getChildren(current)) { - current = g; - groups.add(current); - groups.addAll(getChildrenRecursive(current)); - } - return groups; + @Override + public void updateMinGpus(GroupInterface group, int value) { + if (value < 0) { + value = 0; } - - @Override - public List getChildren(GroupInterface group) { - return getJdbcTemplate().query( - "SELECT pk_show, pk_folder, str_name FROM folder WHERE pk_parent_folder = ?", - GROUP_MAPPER, group.getGroupId()); + getJdbcTemplate().update("UPDATE folder_resource SET int_min_gpus=? WHERE pk_folder=?", value, + group.getId()); + } + + @Override + public void updateDefaultJobPriority(GroupInterface group, int value) { + if (value < 0) { + value = CueUtil.FEATURE_DISABLED; } - - private static final String IS_MANAGED = - "SELECT " + - "COUNT(1) " + - "FROM " + - "folder, " + - "point " + - "WHERE " + - "folder.pk_show = point.pk_show " + - "AND " + - "folder.pk_dept = point.pk_dept " + - "AND " + - "folder.b_exclude_managed = false " + - "AND " + - "point.b_managed = true " + - "AND " + - "folder.pk_folder = ?"; - - @Override - public boolean isManaged(GroupInterface group) { - return getJdbcTemplate().queryForObject(IS_MANAGED, - Integer.class, group.getGroupId()) > 0; + getJdbcTemplate().update("UPDATE folder SET int_job_priority=? WHERE pk_folder=?", value, + group.getId()); + if (value != CueUtil.FEATURE_DISABLED) { + getJdbcTemplate().update("UPDATE job_resource SET int_priority=? WHERE pk_job IN (" + + "SELECT pk_job from job WHERE pk_folder=?)", value, group.getId()); } - - public static final RowMapper GROUP_MAPPER = - new RowMapper() { - public GroupInterface mapRow(final ResultSet rs, int rowNum) throws SQLException { - return new GroupInterface() { - String id = rs.getString("pk_folder"); - String show = rs.getString("pk_show"); - String name = rs.getString("str_name"); - public String getGroupId() { return id; } - public String getShowId() { return show; } - public String getId() { return id; } - public String getName() { return name; } - }; - } - }; - - public static final RowMapper GROUP_DETAIL_MAPPER = - new RowMapper() { - public GroupDetail mapRow(ResultSet rs, int rowNum) throws SQLException { - GroupDetail group = new GroupDetail(); - group.id = rs.getString("pk_folder"); - group.jobMaxCores = rs.getInt("int_job_max_cores"); - group.jobMinCores = rs.getInt("int_job_min_cores"); - group.jobMaxGpus = rs.getInt("int_job_max_gpus"); - group.jobMinGpus = rs.getInt("int_job_min_gpus"); - group.jobPriority = rs.getInt("int_job_priority"); - group.minCores = rs.getInt("int_min_cores"); - group.maxCores = rs.getInt("int_max_cores"); - group.minGpus = rs.getInt("int_min_gpus"); - group.maxGpus = rs.getInt("int_max_gpus"); - group.name = rs.getString("str_name"); - group.parentId = rs.getString("pk_parent_folder"); - group.showId = rs.getString("pk_show"); - group.deptId = rs.getString("pk_dept"); - return group; + } + + private static final String GET_GROUP_DETAIL = + "SELECT " + "folder.pk_folder, " + "folder.int_job_max_cores," + "folder.int_job_min_cores," + + "folder.int_job_max_gpus," + "folder.int_job_min_gpus," + "folder.int_job_priority," + + "folder.str_name," + "folder.pk_parent_folder," + "folder.pk_show," + "folder.pk_dept," + + "folder_level.int_level, " + "folder_resource.int_min_cores," + + "folder_resource.int_max_cores," + "folder_resource.int_min_gpus," + + "folder_resource.int_max_gpus " + "FROM " + "folder, " + "folder_level, " + + "folder_resource " + "WHERE " + "folder.pk_folder = folder_level.pk_folder " + "AND " + + "folder.pk_folder = folder_resource.pk_folder"; + + private static final String GET_GROUP_DETAIL_BY_JOB = + "SELECT " + "folder.pk_folder, " + "folder.int_job_max_cores," + "folder.int_job_min_cores," + + "folder.int_job_max_gpus," + "folder.int_job_min_gpus," + "folder.int_job_priority," + + "folder.str_name," + "folder.pk_parent_folder," + "folder.pk_show," + "folder.pk_dept," + + "folder_level.int_level, " + "folder_resource.int_min_cores," + + "folder_resource.int_max_cores," + "folder_resource.int_min_gpus," + + "folder_resource.int_max_gpus " + "FROM " + "folder, " + "folder_level, " + + "folder_resource, " + "job " + "WHERE " + "folder.pk_folder = folder_level.pk_folder " + + "AND " + "folder.pk_folder = folder_resource.pk_folder " + "AND " + + "job.pk_folder = folder.pk_folder " + "AND " + "job.pk_job = ?"; + + @Override + public GroupDetail getGroupDetail(String id) { + return getJdbcTemplate().queryForObject(GET_GROUP_DETAIL + " AND folder.pk_folder=?", + GROUP_DETAIL_MAPPER, id); + } + + @Override + public GroupDetail getGroupDetail(JobInterface job) { + return getJdbcTemplate().queryForObject(GET_GROUP_DETAIL_BY_JOB, GROUP_DETAIL_MAPPER, + job.getId()); + } + + @Override + public GroupDetail getRootGroupDetail(ShowInterface show) { + return getJdbcTemplate().queryForObject( + GET_GROUP_DETAIL + " AND folder.pk_show=? AND pk_parent_folder IS NULL", + GROUP_DETAIL_MAPPER, show.getShowId()); + } + + @Override + public GroupInterface getGroup(String id) { + return getJdbcTemplate().queryForObject( + "SELECT pk_show, pk_folder,str_name FROM folder WHERE pk_folder=?", GROUP_MAPPER, id); + } + + @Override + public List getGroups(List idl) { + return getJdbcTemplate().query("SELECT pk_show, pk_folder, str_name FROM folder WHERE " + + SqlUtil.buildBindVariableArray("pk_folder", idl.size()), GROUP_MAPPER, idl.toArray()); + } + + @Override + public List getChildrenRecursive(GroupInterface group) { + List groups = new ArrayList(32); + GroupInterface current = group; + for (GroupInterface g : getChildren(current)) { + current = g; + groups.add(current); + groups.addAll(getChildrenRecursive(current)); + } + return groups; + } + + @Override + public List getChildren(GroupInterface group) { + return getJdbcTemplate().query( + "SELECT pk_show, pk_folder, str_name FROM folder WHERE pk_parent_folder = ?", GROUP_MAPPER, + group.getGroupId()); + } + + private static final String IS_MANAGED = "SELECT " + "COUNT(1) " + "FROM " + "folder, " + "point " + + "WHERE " + "folder.pk_show = point.pk_show " + "AND " + "folder.pk_dept = point.pk_dept " + + "AND " + "folder.b_exclude_managed = false " + "AND " + "point.b_managed = true " + "AND " + + "folder.pk_folder = ?"; + + @Override + public boolean isManaged(GroupInterface group) { + return getJdbcTemplate().queryForObject(IS_MANAGED, Integer.class, group.getGroupId()) > 0; + } + + public static final RowMapper GROUP_MAPPER = new RowMapper() { + public GroupInterface mapRow(final ResultSet rs, int rowNum) throws SQLException { + return new GroupInterface() { + String id = rs.getString("pk_folder"); + String show = rs.getString("pk_show"); + String name = rs.getString("str_name"); + + public String getGroupId() { + return id; } - }; + public String getShowId() { + return show; + } - private int childGroupCount(GroupInterface group) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(*) FROM folder WHERE pk_parent_folder=?", - Integer.class, group.getId()); - } + public String getId() { + return id; + } - private int childJobCount(GroupInterface group) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(*) FROM job WHERE pk_folder=? AND str_state=?", - Integer.class, group.getId(), JobState.PENDING.toString()); + public String getName() { + return name; + } + }; } - - private void recurseParentChange(final String folderId, final String newParentId) { - getJdbcTemplate().call(new CallableStatementCreator() { - - public CallableStatement createCallableStatement(Connection con) throws SQLException { - CallableStatement c = con.prepareCall("{ call recurse_folder_parent_change(?,?) }"); - c.setString(1, folderId); - c.setString(2, newParentId); - return c; - } - }, new ArrayList()); + }; + + public static final RowMapper GROUP_DETAIL_MAPPER = new RowMapper() { + public GroupDetail mapRow(ResultSet rs, int rowNum) throws SQLException { + GroupDetail group = new GroupDetail(); + group.id = rs.getString("pk_folder"); + group.jobMaxCores = rs.getInt("int_job_max_cores"); + group.jobMinCores = rs.getInt("int_job_min_cores"); + group.jobMaxGpus = rs.getInt("int_job_max_gpus"); + group.jobMinGpus = rs.getInt("int_job_min_gpus"); + group.jobPriority = rs.getInt("int_job_priority"); + group.minCores = rs.getInt("int_min_cores"); + group.maxCores = rs.getInt("int_max_cores"); + group.minGpus = rs.getInt("int_min_gpus"); + group.maxGpus = rs.getInt("int_max_gpus"); + group.name = rs.getString("str_name"); + group.parentId = rs.getString("pk_parent_folder"); + group.showId = rs.getString("pk_show"); + group.deptId = rs.getString("pk_dept"); + return group; } + }; + + private int childGroupCount(GroupInterface group) { + return getJdbcTemplate().queryForObject("SELECT COUNT(*) FROM folder WHERE pk_parent_folder=?", + Integer.class, group.getId()); + } + + private int childJobCount(GroupInterface group) { + return getJdbcTemplate().queryForObject( + "SELECT COUNT(*) FROM job WHERE pk_folder=? AND str_state=?", Integer.class, group.getId(), + JobState.PENDING.toString()); + } + + private void recurseParentChange(final String folderId, final String newParentId) { + getJdbcTemplate().call(new CallableStatementCreator() { + + public CallableStatement createCallableStatement(Connection con) throws SQLException { + CallableStatement c = con.prepareCall("{ call recurse_folder_parent_change(?,?) }"); + c.setString(1, folderId); + c.setString(2, newParentId); + return c; + } + }, new ArrayList()); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HistoricalDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HistoricalDaoJdbc.java index 3734ae803..13318a499 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HistoricalDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HistoricalDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.util.List; @@ -29,24 +25,19 @@ public class HistoricalDaoJdbc extends JdbcDaoSupport implements HistoricalDao { - private static final String GET_FINISHED_JOBS = - JobDaoJdbc.GET_JOB + - "WHERE " + - "job.str_state = ? " + - "AND " + - "current_timestamp - job.ts_stopped > "; - - public List getFinishedJobs(int cutoffHours) { - String interval = "interval '" + cutoffHours + "' hour"; - return getJdbcTemplate().query(GET_FINISHED_JOBS + interval, - JobDaoJdbc.JOB_MAPPER, JobState.FINISHED.toString()); - } - - public void transferJob(JobInterface job) { - /** - * All of the historical transfer happens inside of triggers - */ - getJdbcTemplate().update("DELETE FROM job WHERE pk_job=?", job.getJobId()); - } + private static final String GET_FINISHED_JOBS = JobDaoJdbc.GET_JOB + "WHERE " + + "job.str_state = ? " + "AND " + "current_timestamp - job.ts_stopped > "; + + public List getFinishedJobs(int cutoffHours) { + String interval = "interval '" + cutoffHours + "' hour"; + return getJdbcTemplate().query(GET_FINISHED_JOBS + interval, JobDaoJdbc.JOB_MAPPER, + JobState.FINISHED.toString()); + } + + public void transferJob(JobInterface job) { + /** + * All of the historical transfer happens inside of triggers + */ + getJdbcTemplate().update("DELETE FROM job WHERE pk_job=?", job.getJobId()); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HostDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HostDaoJdbc.java index 20fe7b1ef..77cc3b2d9 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HostDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HostDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.net.InetAddress; @@ -57,703 +53,520 @@ import com.imageworks.spcue.util.CueUtil; import com.imageworks.spcue.util.SqlUtil; - public class HostDaoJdbc extends JdbcDaoSupport implements HostDao { - @Autowired - private Environment env; - - public static final RowMapper HOST_DETAIL_MAPPER = new RowMapper() { - public HostEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - HostEntity host = new HostEntity(); - host.facilityId = rs.getString("pk_facility"); - host.allocId = rs.getString("pk_alloc"); - host.id = rs.getString("pk_host"); - host.lockState = LockState.valueOf(rs.getString("str_lock_state")); - host.name = rs.getString("str_name"); - host.nimbyEnabled = rs.getBoolean("b_nimby"); - host.state = HardwareState.valueOf(rs.getString("str_state")); - host.unlockAtBoot = rs.getBoolean("b_unlock_boot"); - host.cores = rs.getInt("int_cores"); - host.idleCores = rs.getInt("int_cores_idle"); - host.memory = rs.getLong("int_mem"); - host.idleMemory = rs.getLong("int_mem_idle"); - host.gpus = rs.getInt("int_gpus"); - host.idleGpus = rs.getInt("int_gpus_idle"); - host.gpuMemory = rs.getLong("int_gpu_mem"); - host.idleGpuMemory = rs.getLong("int_gpu_mem_idle"); - host.dateBooted = rs.getDate("ts_booted"); - host.dateCreated = rs.getDate("ts_created"); - host.datePinged = rs.getDate("ts_ping"); - return host; - } - }; - - public static final RowMapper HOST_MAPPER = new RowMapper() { - public HostInterface mapRow(final ResultSet rs, int rowNum) throws SQLException { - return new HostInterface() { - final String id = rs.getString("pk_host"); - final String allocid = rs.getString("pk_alloc"); - final String name = rs.getString("str_name"); - final String facility = rs.getString("pk_facility"); - - public String getHostId() { return id; } - public String getAllocationId() { return allocid; } - public String getId() { return id; } - public String getName() { return name; } - public String getFacilityId() { return facility; }; - }; + @Autowired + private Environment env; + + public static final RowMapper HOST_DETAIL_MAPPER = new RowMapper() { + public HostEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + HostEntity host = new HostEntity(); + host.facilityId = rs.getString("pk_facility"); + host.allocId = rs.getString("pk_alloc"); + host.id = rs.getString("pk_host"); + host.lockState = LockState.valueOf(rs.getString("str_lock_state")); + host.name = rs.getString("str_name"); + host.nimbyEnabled = rs.getBoolean("b_nimby"); + host.state = HardwareState.valueOf(rs.getString("str_state")); + host.unlockAtBoot = rs.getBoolean("b_unlock_boot"); + host.cores = rs.getInt("int_cores"); + host.idleCores = rs.getInt("int_cores_idle"); + host.memory = rs.getLong("int_mem"); + host.idleMemory = rs.getLong("int_mem_idle"); + host.gpus = rs.getInt("int_gpus"); + host.idleGpus = rs.getInt("int_gpus_idle"); + host.gpuMemory = rs.getLong("int_gpu_mem"); + host.idleGpuMemory = rs.getLong("int_gpu_mem_idle"); + host.dateBooted = rs.getDate("ts_booted"); + host.dateCreated = rs.getDate("ts_created"); + host.datePinged = rs.getDate("ts_ping"); + return host; + } + }; + + public static final RowMapper HOST_MAPPER = new RowMapper() { + public HostInterface mapRow(final ResultSet rs, int rowNum) throws SQLException { + return new HostInterface() { + final String id = rs.getString("pk_host"); + final String allocid = rs.getString("pk_alloc"); + final String name = rs.getString("str_name"); + final String facility = rs.getString("pk_facility"); + + public String getHostId() { + return id; } - }; - - private static final String GET_HOST_DETAIL = - "SELECT " + - "host.pk_host, " + - "host.pk_alloc,"+ - "host.str_lock_state,"+ - "host.b_nimby,"+ - "host.b_unlock_boot,"+ - "host.int_cores,"+ - "host.int_cores_idle,"+ - "host.int_mem,"+ - "host.int_mem_idle,"+ - "host.int_gpus,"+ - "host.int_gpus_idle,"+ - "host.int_gpu_mem,"+ - "host.int_gpu_mem_idle,"+ - "host.ts_created,"+ - "host.str_name, " + - "host_stat.str_state,"+ - "host_stat.ts_ping,"+ - "host_stat.ts_booted, "+ - "alloc.pk_facility " + - "FROM " + - "host, " + - "alloc, " + - "host_stat " + - "WHERE " + - "host.pk_host = host_stat.pk_host " + - "AND " + - "host.pk_alloc = alloc.pk_alloc "; - - @Override - public void lockForUpdate(HostInterface host) { - try { - getJdbcTemplate().queryForObject( - "SELECT pk_host FROM host WHERE pk_host=? " + - "FOR UPDATE NOWAIT", - String.class, host.getHostId()); - } catch (Exception e) { - throw new ResourceReservationFailureException("unable to lock host " + - host.getName() + ", the host was locked by another thread.", e); - } - } - @Override - public HostEntity getHostDetail(HostInterface host) { - return getJdbcTemplate().queryForObject(GET_HOST_DETAIL + " AND host.pk_host=?", - HOST_DETAIL_MAPPER, host.getHostId()); - } - - @Override - public HostEntity getHostDetail(String id) { - return getJdbcTemplate().queryForObject(GET_HOST_DETAIL + " AND host.pk_host=?", - HOST_DETAIL_MAPPER, id); - } - - @Override - public HostEntity findHostDetail(String name) { - return getJdbcTemplate().queryForObject(GET_HOST_DETAIL + " AND host.str_name=?", - HOST_DETAIL_MAPPER, name); - } - - private static final String GET_HOST= - "SELECT " + - "host.pk_host, " + - "host.pk_alloc,"+ - "host.str_name, " + - "alloc.pk_facility " + - "FROM " + - "host," + - "alloc " + - "WHERE " + - "host.pk_alloc = alloc.pk_alloc " ; - - @Override - public HostInterface getHost(String id) { - return getJdbcTemplate().queryForObject(GET_HOST + " AND host.pk_host=?", - HOST_MAPPER, id); - } - - @Override - public HostInterface getHost(LocalHostAssignment l) { - return getJdbcTemplate().queryForObject(GET_HOST + " AND host.pk_host = ("+ - "SELECT pk_host FROM host_local WHERE pk_host_local=?)", - HOST_MAPPER, l.getId()); - } - - @Override - public HostInterface findHost(String name) { - return getJdbcTemplate().queryForObject( - GET_HOST + " AND (host.str_name=? OR host.str_fqdn=?)", - HOST_MAPPER, name, name); - } - - public static final RowMapper DISPATCH_HOST_MAPPER = - new RowMapper() { - public DispatchHost mapRow(ResultSet rs, int rowNum) throws SQLException { - DispatchHost host = new DispatchHost(); - host.id = rs.getString("pk_host"); - host.allocationId = rs.getString("pk_alloc"); - host.facilityId = rs.getString("pk_facility"); - host.name = rs.getString("str_name"); - host.lockState = LockState.valueOf(rs.getString("str_lock_state")); - host.memory = rs.getLong("int_mem"); - host.cores = rs.getInt("int_cores"); - host.gpus = rs.getInt("int_gpus"); - host.gpuMemory = rs.getLong("int_gpu_mem"); - host.idleMemory= rs.getLong("int_mem_idle"); - host.idleCores = rs.getInt("int_cores_idle"); - host.idleGpuMemory = rs.getLong("int_gpu_mem_idle"); - host.idleGpus = rs.getInt("int_gpus_idle"); - host.isNimby = rs.getBoolean("b_nimby"); - host.threadMode = rs.getInt("int_thread_mode"); - host.tags = rs.getString("str_tags"); - host.setOs(rs.getString("str_os")); - host.hardwareState = - HardwareState.valueOf(rs.getString("str_state")); - return host; - } - }; - - public static final String GET_DISPATCH_HOST = - "SELECT " + - "host.pk_host,"+ - "host.pk_alloc,"+ - "host.str_name," + - "host.str_lock_state, " + - "host.int_cores, "+ - "host.int_cores_idle, " + - "host.int_mem,"+ - "host.int_mem_idle, "+ - "host.int_gpus, "+ - "host.int_gpus_idle, " + - "host.int_gpu_mem,"+ - "host.int_gpu_mem_idle, "+ - "host.b_nimby, "+ - "host.int_thread_mode, "+ - "host.str_tags, " + - "host_stat.str_os, " + - "host_stat.str_state, " + - "alloc.pk_facility " + - "FROM " + - "host " + - "INNER JOIN host_stat " + - "ON (host.pk_host = host_stat.pk_host) " + - "INNER JOIN alloc " + - "ON (host.pk_alloc = alloc.pk_alloc) "; - - @Override - public DispatchHost findDispatchHost(String name) { - try { - return getJdbcTemplate().queryForObject( - GET_DISPATCH_HOST + - "WHERE (host.str_name=? OR host.str_fqdn=?)", - DISPATCH_HOST_MAPPER, name, name); - } catch (EmptyResultDataAccessException e) { - throw new EmptyResultDataAccessException( - "Failed to find host " + name, 1); + public String getAllocationId() { + return allocid; } - } - - @Override - public DispatchHost getDispatchHost(String id) { - return getJdbcTemplate().queryForObject( - GET_DISPATCH_HOST + - "WHERE host.pk_host=?", - DISPATCH_HOST_MAPPER, id); - } - private static final String[] INSERT_HOST_DETAIL = - { - "INSERT INTO " + - "host " + - "("+ - "pk_host, " + - "pk_alloc, " + - "str_name, " + - "b_nimby, " + - "str_lock_state, " + - "int_procs,"+ - "int_cores, " + - "int_cores_idle, " + - "int_mem,"+ - "int_mem_idle,"+ - "int_gpus, " + - "int_gpus_idle, " + - "int_gpu_mem,"+ - "int_gpu_mem_idle,"+ - "str_fqdn, " + - "int_thread_mode "+ - ") " + - "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", - - "INSERT INTO " + - "host_stat " + - "("+ - "pk_host_stat," + - "pk_host,"+ - "int_mem_total, " + - "int_mem_free,"+ - "int_gpu_mem_total, " + - "int_gpu_mem_free,"+ - "int_swap_total, " + - "int_swap_free,"+ - "int_mcp_total, " + - "int_mcp_free,"+ - "int_load, " + - "ts_booted, " + - "str_state, " + - "str_os " + - ") "+ - "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)" - - }; - - @Override - public void insertRenderHost(RenderHost host, AllocationInterface a, boolean useLongNames) { - - ThreadMode threadMode = ThreadMode.AUTO; - if (host.getNimbyEnabled()) { - threadMode = ThreadMode.ALL; + public String getId() { + return id; } - long memUnits = convertMemoryUnits(host); - long memReserverMin = env.getRequiredProperty( - "dispatcher.memory.mem_reserved_min", - Long.class); - if (memUnits < memReserverMin) { - throw new EntityCreationError("could not create host " + host.getName() + ", " + - " must have at least " + memReserverMin + " free memory."); + public String getName() { + return name; } - String fqdn; - String name = host.getName(); - try { - fqdn = InetAddress.getByName(host.getName()).getCanonicalHostName(); - // If the provided host name matches the pinged name, use the pinged name. - // Otherwise use the provided name. - // If the host lookup fails, use the provided name. - // In all cases attempt to strip off the domain when setting the name. - if (fqdn.equals(host.getName())) { - name = getHostNameFromFQDN(fqdn, useLongNames); - } - else { - name = getHostNameFromFQDN(host.getName(), useLongNames); - fqdn = host.getName(); - } - } catch (UnknownHostException e) { - logger.info(e); - fqdn = host.getName(); - name = getHostNameFromFQDN(name, useLongNames); - } - - String hid = SqlUtil.genKeyRandom(); - int coreUnits = host.getNumProcs() * host.getCoresPerProc(); - String os = host.getAttributesMap().get("SP_OS"); - if (os == null) { - os = Dispatcher.OS_DEFAULT; - } - - getJdbcTemplate().update(INSERT_HOST_DETAIL[0], - hid, a.getAllocationId(), name, host.getNimbyEnabled(), - LockState.OPEN.toString(), host.getNumProcs(), coreUnits, coreUnits, - memUnits, memUnits, - host.getNumGpus(), host.getNumGpus(), - host.getTotalGpuMem(), host.getTotalGpuMem(), - fqdn, threadMode.getNumber()); - - getJdbcTemplate().update(INSERT_HOST_DETAIL[1], - hid, hid, host.getTotalMem(), host.getFreeMem(), - host.getTotalGpuMem(), host.getFreeGpuMem(), - host.getTotalSwap(), host.getFreeSwap(), - host.getTotalMcp(), host.getFreeMcp(), - host.getLoad(), new Timestamp(host.getBootTime() * 1000l), - host.getState().toString(), os); - } - - @Override - public void recalcuateTags(final String id) { - getJdbcTemplate().call(new CallableStatementCreator() { - public CallableStatement createCallableStatement(Connection con) throws SQLException { - CallableStatement c = con.prepareCall("{ call recalculate_tags(?) }"); - c.setString(1, id); - return c; - } - }, new ArrayList()); - } - - private static final String UPDATE_RENDER_HOST = - "UPDATE " + - "host_stat " + - "SET " + - "int_mem_total = ?, " + - "int_mem_free = ?, " + - "int_swap_total = ?, " + - "int_swap_free = ?, "+ - "int_mcp_total = ?, " + - "int_mcp_free = ?, " + - "int_gpu_mem_total = ?, " + - "int_gpu_mem_free = ?, " + - "int_load = ?," + - "ts_booted = ?, " + - "ts_ping = current_timestamp, "+ - "str_os = ? " + - "WHERE " + - "pk_host = ?"; - - @Override - public void updateHostStats(HostInterface host, - long totalMemory, long freeMemory, - long totalSwap, long freeSwap, - long totalMcp, long freeMcp, - long totalGpuMemory, long freeGpuMemory, - int load, Timestamp bootTime, - String os) { - - if (os == null) { - os = Dispatcher.OS_DEFAULT; - } - - getJdbcTemplate().update(UPDATE_RENDER_HOST, - totalMemory, freeMemory, totalSwap, - freeSwap, totalMcp, freeMcp, totalGpuMemory, freeGpuMemory, load, - bootTime, os, host.getHostId()); - } - - @Override - public boolean hostExists(String hostname) { - try { - return getJdbcTemplate().queryForObject( - "SELECT 1 FROM host WHERE (str_fqdn=? OR str_name=?)", - Integer.class, hostname, hostname) > 0; - } catch (EmptyResultDataAccessException e) { - return false; - } - } - - @Override - public void updateHostResources(HostInterface host, HostReport report) { - - long memory = convertMemoryUnits(report.getHost()); - int cores = report.getHost().getNumProcs() * report.getHost().getCoresPerProc(); - long gpu_memory = report.getHost().getTotalGpuMem(); - int gpus = report.getHost().getNumGpus(); - - getJdbcTemplate().update( - "UPDATE " + - "host " + - "SET " + - "b_nimby=?,"+ - "int_cores=?," + - "int_cores_idle=?," + - "int_mem=?," + - "int_mem_idle=?, " + - "int_gpus=?," + - "int_gpus_idle=?," + - "int_gpu_mem=?," + - "int_gpu_mem_idle=? " + - "WHERE " + - "pk_host=? "+ - "AND " + - "int_cores = int_cores_idle " + - "AND " + - "int_mem = int_mem_idle " + - "AND " + - "int_gpus = int_gpus_idle", - report.getHost().getNimbyEnabled(), cores, cores, - memory, memory, gpus, gpus, gpu_memory, gpu_memory, host.getId()); - } - - @Override - public void updateHostLock(HostInterface host, LockState state, Source source) { - getJdbcTemplate().update( - "UPDATE host SET str_lock_state=?, str_lock_source=? WHERE pk_host=?", - state.toString(), source.toString(), host.getHostId()); - } - - @Override - public void updateHostRebootWhenIdle(HostInterface host, boolean enabled) { - getJdbcTemplate().update("UPDATE host SET b_reboot_idle=? WHERE pk_host=?", - enabled, host.getHostId()); - } - - @Override - public void deleteHost(HostInterface host) { - getJdbcTemplate().update( - "DELETE FROM comments WHERE pk_host=?",host.getHostId()); - getJdbcTemplate().update( - "DELETE FROM host WHERE pk_host=?",host.getHostId()); - } - - private static final String DELETE_DOWN_HOST_COMMENTS = - "DELETE " + - "FROM " + - "comments " + - "USING " + - "host_stat " + - "WHERE " + - "comments.pk_host = host_stat.pk_host " + - "AND " + - "host_stat.str_state = ?"; - - private static final String DELETE_DOWN_HOSTS = - "DELETE " + - "FROM " + - "host " + - "USING " + - "host_stat " + - "WHERE " + - "host.pk_host = host_stat.pk_host " + - "AND " + - "host_stat.str_state=?"; - - @Override - public void deleteDownHosts() { - getJdbcTemplate().update(DELETE_DOWN_HOST_COMMENTS, HardwareState.DOWN.toString()); - getJdbcTemplate().update(DELETE_DOWN_HOSTS, HardwareState.DOWN.toString()); - } - - @Override - public void updateHostState(HostInterface host, HardwareState state) { - getJdbcTemplate().update( - "UPDATE host_stat SET str_state=? WHERE pk_host=?", - state.toString(), host.getHostId()); - } - - @Override - public void updateHostFreeTempDir(HostInterface host, Long freeTempDir) { - getJdbcTemplate().update( - "UPDATE host_stat SET int_mcp_free=? WHERE pk_host=?", - freeTempDir, host.getHostId()); - } - - @Override - public void updateHostSetAllocation(HostInterface host, AllocationInterface alloc) { - - String tag = getJdbcTemplate().queryForObject( - "SELECT str_tag FROM alloc WHERE pk_alloc=?", - String.class, alloc.getAllocationId()); - getJdbcTemplate().update( - "UPDATE host SET pk_alloc=? WHERE pk_host=?", - alloc.getAllocationId(), host.getHostId()); - - removeTagsByType(host, HostTagType.ALLOC); - tagHost(host, tag, HostTagType.ALLOC); - } - - @Override - public boolean isHostLocked(HostInterface host) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM host WHERE pk_host=? AND str_lock_state!=?", - Integer.class, host.getHostId(), LockState.OPEN.toString()) > 0; - } - - private static final String INSERT_TAG = - "INSERT INTO " + - "host_tag " + - "(" + - "pk_host_tag,"+ - "pk_host,"+ - "str_tag,"+ - "str_tag_type, " + - "b_constant " + - ") VALUES (?,?,?,?,?)"; - - - @Override - public void tagHost(String id, String tag, HostTagType type) { - boolean constant = false; - if (type.equals(HostTagType.ALLOC)) - constant = true; - - getJdbcTemplate().update(INSERT_TAG, - SqlUtil.genKeyRandom(), id, tag.trim(), type.toString(), constant); - } - - @Override - public void tagHost(HostInterface host, String tag, HostTagType type) { - tagHost(host.getHostId(), tag, type); - } - - @Override - public void removeTagsByType(HostInterface host, HostTagType type) { - getJdbcTemplate().update("DELETE FROM host_tag WHERE pk_host=? AND str_tag_type=?", - host.getHostId(), type.toString()); - } - - @Override - public void removeTag(HostInterface host, String tag) { - getJdbcTemplate().update( - "DELETE FROM host_tag WHERE pk_host=? AND str_tag=? AND b_constant=false", - host.getHostId(), tag); - } - - @Override - public void renameTag(HostInterface host, String oldTag, String newTag) { - getJdbcTemplate().update( - "UPDATE host_tag SET str_tag=? WHERE pk_host=? AND str_tag=? AND b_constant=false", - newTag, host.getHostId(), oldTag); - } - - @Override - public void updateThreadMode(HostInterface host, ThreadMode mode) { - getJdbcTemplate().update( - "UPDATE host SET int_thread_mode=? WHERE pk_host=?", - mode.getNumber(), host.getHostId()); - } - - @Override - public void updateHostOs(HostInterface host, String os) { - getJdbcTemplate().update( - "UPDATE host_stat SET str_os=? WHERE pk_host=?", - os, host.getHostId()); - } - - @Override - public int getStrandedCoreUnits(HostInterface h) { - try { - int idle_cores = getJdbcTemplate().queryForObject( - "SELECT int_cores_idle FROM host WHERE pk_host = ? AND int_mem_idle <= ?", - Integer.class, h.getHostId(), - Dispatcher.MEM_STRANDED_THRESHHOLD); - return (int) (Math.floor(idle_cores / 100.0)) * 100; - } catch (EmptyResultDataAccessException e) { - return 0; - } - } - - @Override - public int getStrandedGpus(HostInterface h) { - try { - int idle_gpus = getJdbcTemplate().queryForObject( - "SELECT int_gpus_idle FROM host WHERE pk_host = ?", - Integer.class, h.getHostId()); - return idle_gpus; - } catch (EmptyResultDataAccessException e) { - return 0; - } - } - - private static final String IS_HOST_UP = - "SELECT " + - "COUNT(1) " + - "FROM " + - "host_stat "+ - "WHERE " + - "host_stat.str_state = ? " + - "AND " + - "host_stat.pk_host = ? "; - - @Override - public boolean isHostUp(HostInterface host) { - return getJdbcTemplate().queryForObject(IS_HOST_UP, - Integer.class, HardwareState.UP.toString(), - host.getHostId()) == 1; - } - - private static final String IS_PREFER_SHOW = - "SELECT " + - "COUNT(1) " + - "FROM " + - "host," + - "owner," + - "deed "+ - "WHERE " + - "host.pk_host = deed.pk_host " + - "AND " + - "deed.pk_owner = owner.pk_owner " + - "AND " + - "host.pk_host = ?"; - - @Override - public boolean isPreferShow(HostInterface h) { - return getJdbcTemplate().queryForObject(IS_PREFER_SHOW, - Integer.class, h.getHostId()) > 0; - } - - @Override - public boolean isNimbyHost(HostInterface h) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM host WHERE b_nimby=true AND pk_host=?", - Integer.class, h.getHostId()) > 0; - } - - /** - * Checks if the passed in name looks like a fully qualified domain name. - * If so, returns the hostname without the domain. Otherwise returns the passed - * in name unchanged. - * @param fqdn - String - * @return String - hostname + public String getFacilityId() { + return facility; + }; + }; + } + }; + + private static final String GET_HOST_DETAIL = "SELECT " + "host.pk_host, " + "host.pk_alloc," + + "host.str_lock_state," + "host.b_nimby," + "host.b_unlock_boot," + "host.int_cores," + + "host.int_cores_idle," + "host.int_mem," + "host.int_mem_idle," + "host.int_gpus," + + "host.int_gpus_idle," + "host.int_gpu_mem," + "host.int_gpu_mem_idle," + "host.ts_created," + + "host.str_name, " + "host_stat.str_state," + "host_stat.ts_ping," + "host_stat.ts_booted, " + + "alloc.pk_facility " + "FROM " + "host, " + "alloc, " + "host_stat " + "WHERE " + + "host.pk_host = host_stat.pk_host " + "AND " + "host.pk_alloc = alloc.pk_alloc "; + + @Override + public void lockForUpdate(HostInterface host) { + try { + getJdbcTemplate().queryForObject( + "SELECT pk_host FROM host WHERE pk_host=? " + "FOR UPDATE NOWAIT", String.class, + host.getHostId()); + } catch (Exception e) { + throw new ResourceReservationFailureException( + "unable to lock host " + host.getName() + ", the host was locked by another thread.", e); + } + } + + @Override + public HostEntity getHostDetail(HostInterface host) { + return getJdbcTemplate().queryForObject(GET_HOST_DETAIL + " AND host.pk_host=?", + HOST_DETAIL_MAPPER, host.getHostId()); + } + + @Override + public HostEntity getHostDetail(String id) { + return getJdbcTemplate().queryForObject(GET_HOST_DETAIL + " AND host.pk_host=?", + HOST_DETAIL_MAPPER, id); + } + + @Override + public HostEntity findHostDetail(String name) { + return getJdbcTemplate().queryForObject(GET_HOST_DETAIL + " AND host.str_name=?", + HOST_DETAIL_MAPPER, name); + } + + private static final String GET_HOST = + "SELECT " + "host.pk_host, " + "host.pk_alloc," + "host.str_name, " + "alloc.pk_facility " + + "FROM " + "host," + "alloc " + "WHERE " + "host.pk_alloc = alloc.pk_alloc "; + + @Override + public HostInterface getHost(String id) { + return getJdbcTemplate().queryForObject(GET_HOST + " AND host.pk_host=?", HOST_MAPPER, id); + } + + @Override + public HostInterface getHost(LocalHostAssignment l) { + return getJdbcTemplate().queryForObject(GET_HOST + " AND host.pk_host = (" + + "SELECT pk_host FROM host_local WHERE pk_host_local=?)", HOST_MAPPER, l.getId()); + } + + @Override + public HostInterface findHost(String name) { + return getJdbcTemplate().queryForObject(GET_HOST + " AND (host.str_name=? OR host.str_fqdn=?)", + HOST_MAPPER, name, name); + } + + public static final RowMapper DISPATCH_HOST_MAPPER = new RowMapper() { + public DispatchHost mapRow(ResultSet rs, int rowNum) throws SQLException { + DispatchHost host = new DispatchHost(); + host.id = rs.getString("pk_host"); + host.allocationId = rs.getString("pk_alloc"); + host.facilityId = rs.getString("pk_facility"); + host.name = rs.getString("str_name"); + host.lockState = LockState.valueOf(rs.getString("str_lock_state")); + host.memory = rs.getLong("int_mem"); + host.cores = rs.getInt("int_cores"); + host.gpus = rs.getInt("int_gpus"); + host.gpuMemory = rs.getLong("int_gpu_mem"); + host.idleMemory = rs.getLong("int_mem_idle"); + host.idleCores = rs.getInt("int_cores_idle"); + host.idleGpuMemory = rs.getLong("int_gpu_mem_idle"); + host.idleGpus = rs.getInt("int_gpus_idle"); + host.isNimby = rs.getBoolean("b_nimby"); + host.threadMode = rs.getInt("int_thread_mode"); + host.tags = rs.getString("str_tags"); + host.setOs(rs.getString("str_os")); + host.hardwareState = HardwareState.valueOf(rs.getString("str_state")); + return host; + } + }; + + public static final String GET_DISPATCH_HOST = "SELECT " + "host.pk_host," + "host.pk_alloc," + + "host.str_name," + "host.str_lock_state, " + "host.int_cores, " + "host.int_cores_idle, " + + "host.int_mem," + "host.int_mem_idle, " + "host.int_gpus, " + "host.int_gpus_idle, " + + "host.int_gpu_mem," + "host.int_gpu_mem_idle, " + "host.b_nimby, " + + "host.int_thread_mode, " + "host.str_tags, " + "host_stat.str_os, " + + "host_stat.str_state, " + "alloc.pk_facility " + "FROM " + "host " + "INNER JOIN host_stat " + + "ON (host.pk_host = host_stat.pk_host) " + "INNER JOIN alloc " + + "ON (host.pk_alloc = alloc.pk_alloc) "; + + @Override + public DispatchHost findDispatchHost(String name) { + try { + return getJdbcTemplate().queryForObject( + GET_DISPATCH_HOST + "WHERE (host.str_name=? OR host.str_fqdn=?)", DISPATCH_HOST_MAPPER, + name, name); + } catch (EmptyResultDataAccessException e) { + throw new EmptyResultDataAccessException("Failed to find host " + name, 1); + } + } + + @Override + public DispatchHost getDispatchHost(String id) { + return getJdbcTemplate().queryForObject(GET_DISPATCH_HOST + "WHERE host.pk_host=?", + DISPATCH_HOST_MAPPER, id); + } + + private static final String[] INSERT_HOST_DETAIL = { + "INSERT INTO " + "host " + "(" + "pk_host, " + "pk_alloc, " + "str_name, " + "b_nimby, " + + "str_lock_state, " + "int_procs," + "int_cores, " + "int_cores_idle, " + "int_mem," + + "int_mem_idle," + "int_gpus, " + "int_gpus_idle, " + "int_gpu_mem," + + "int_gpu_mem_idle," + "str_fqdn, " + "int_thread_mode " + ") " + + "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", + + "INSERT INTO " + "host_stat " + "(" + "pk_host_stat," + "pk_host," + "int_mem_total, " + + "int_mem_free," + "int_gpu_mem_total, " + "int_gpu_mem_free," + "int_swap_total, " + + "int_swap_free," + "int_mcp_total, " + "int_mcp_free," + "int_load, " + "ts_booted, " + + "str_state, " + "str_os " + ") " + "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)" + + }; + + @Override + public void insertRenderHost(RenderHost host, AllocationInterface a, boolean useLongNames) { + + ThreadMode threadMode = ThreadMode.AUTO; + if (host.getNimbyEnabled()) { + threadMode = ThreadMode.ALL; + } + + long memUnits = convertMemoryUnits(host); + long memReserverMin = env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); + if (memUnits < memReserverMin) { + throw new EntityCreationError("could not create host " + host.getName() + ", " + + " must have at least " + memReserverMin + " free memory."); + } + + String fqdn; + String name = host.getName(); + try { + fqdn = InetAddress.getByName(host.getName()).getCanonicalHostName(); + // If the provided host name matches the pinged name, use the pinged name. + // Otherwise use the provided name. + // If the host lookup fails, use the provided name. + // In all cases attempt to strip off the domain when setting the name. + if (fqdn.equals(host.getName())) { + name = getHostNameFromFQDN(fqdn, useLongNames); + } else { + name = getHostNameFromFQDN(host.getName(), useLongNames); + fqdn = host.getName(); + } + } catch (UnknownHostException e) { + logger.info(e); + fqdn = host.getName(); + name = getHostNameFromFQDN(name, useLongNames); + } + + String hid = SqlUtil.genKeyRandom(); + int coreUnits = host.getNumProcs() * host.getCoresPerProc(); + String os = host.getAttributesMap().get("SP_OS"); + if (os == null) { + os = Dispatcher.OS_DEFAULT; + } + + getJdbcTemplate().update(INSERT_HOST_DETAIL[0], hid, a.getAllocationId(), name, + host.getNimbyEnabled(), LockState.OPEN.toString(), host.getNumProcs(), coreUnits, coreUnits, + memUnits, memUnits, host.getNumGpus(), host.getNumGpus(), host.getTotalGpuMem(), + host.getTotalGpuMem(), fqdn, threadMode.getNumber()); + + getJdbcTemplate().update(INSERT_HOST_DETAIL[1], hid, hid, host.getTotalMem(), host.getFreeMem(), + host.getTotalGpuMem(), host.getFreeGpuMem(), host.getTotalSwap(), host.getFreeSwap(), + host.getTotalMcp(), host.getFreeMcp(), host.getLoad(), + new Timestamp(host.getBootTime() * 1000l), host.getState().toString(), os); + } + + @Override + public void recalcuateTags(final String id) { + getJdbcTemplate().call(new CallableStatementCreator() { + public CallableStatement createCallableStatement(Connection con) throws SQLException { + CallableStatement c = con.prepareCall("{ call recalculate_tags(?) }"); + c.setString(1, id); + return c; + } + }, new ArrayList()); + } + + private static final String UPDATE_RENDER_HOST = "UPDATE " + "host_stat " + "SET " + + "int_mem_total = ?, " + "int_mem_free = ?, " + "int_swap_total = ?, " + + "int_swap_free = ?, " + "int_mcp_total = ?, " + "int_mcp_free = ?, " + + "int_gpu_mem_total = ?, " + "int_gpu_mem_free = ?, " + "int_load = ?," + "ts_booted = ?, " + + "ts_ping = current_timestamp, " + "str_os = ? " + "WHERE " + "pk_host = ?"; + + @Override + public void updateHostStats(HostInterface host, long totalMemory, long freeMemory, long totalSwap, + long freeSwap, long totalMcp, long freeMcp, long totalGpuMemory, long freeGpuMemory, int load, + Timestamp bootTime, String os) { + + if (os == null) { + os = Dispatcher.OS_DEFAULT; + } + + getJdbcTemplate().update(UPDATE_RENDER_HOST, totalMemory, freeMemory, totalSwap, freeSwap, + totalMcp, freeMcp, totalGpuMemory, freeGpuMemory, load, bootTime, os, host.getHostId()); + } + + @Override + public boolean hostExists(String hostname) { + try { + return getJdbcTemplate().queryForObject("SELECT 1 FROM host WHERE (str_fqdn=? OR str_name=?)", + Integer.class, hostname, hostname) > 0; + } catch (EmptyResultDataAccessException e) { + return false; + } + } + + @Override + public void updateHostResources(HostInterface host, HostReport report) { + + long memory = convertMemoryUnits(report.getHost()); + int cores = report.getHost().getNumProcs() * report.getHost().getCoresPerProc(); + long gpu_memory = report.getHost().getTotalGpuMem(); + int gpus = report.getHost().getNumGpus(); + + getJdbcTemplate().update( + "UPDATE " + "host " + "SET " + "b_nimby=?," + "int_cores=?," + "int_cores_idle=?," + + "int_mem=?," + "int_mem_idle=?, " + "int_gpus=?," + "int_gpus_idle=?," + + "int_gpu_mem=?," + "int_gpu_mem_idle=? " + "WHERE " + "pk_host=? " + "AND " + + "int_cores = int_cores_idle " + "AND " + "int_mem = int_mem_idle " + "AND " + + "int_gpus = int_gpus_idle", + report.getHost().getNimbyEnabled(), cores, cores, memory, memory, gpus, gpus, gpu_memory, + gpu_memory, host.getId()); + } + + @Override + public void updateHostLock(HostInterface host, LockState state, Source source) { + getJdbcTemplate().update("UPDATE host SET str_lock_state=?, str_lock_source=? WHERE pk_host=?", + state.toString(), source.toString(), host.getHostId()); + } + + @Override + public void updateHostRebootWhenIdle(HostInterface host, boolean enabled) { + getJdbcTemplate().update("UPDATE host SET b_reboot_idle=? WHERE pk_host=?", enabled, + host.getHostId()); + } + + @Override + public void deleteHost(HostInterface host) { + getJdbcTemplate().update("DELETE FROM comments WHERE pk_host=?", host.getHostId()); + getJdbcTemplate().update("DELETE FROM host WHERE pk_host=?", host.getHostId()); + } + + private static final String DELETE_DOWN_HOST_COMMENTS = + "DELETE " + "FROM " + "comments " + "USING " + "host_stat " + "WHERE " + + "comments.pk_host = host_stat.pk_host " + "AND " + "host_stat.str_state = ?"; + + private static final String DELETE_DOWN_HOSTS = + "DELETE " + "FROM " + "host " + "USING " + "host_stat " + "WHERE " + + "host.pk_host = host_stat.pk_host " + "AND " + "host_stat.str_state=?"; + + @Override + public void deleteDownHosts() { + getJdbcTemplate().update(DELETE_DOWN_HOST_COMMENTS, HardwareState.DOWN.toString()); + getJdbcTemplate().update(DELETE_DOWN_HOSTS, HardwareState.DOWN.toString()); + } + + @Override + public void updateHostState(HostInterface host, HardwareState state) { + getJdbcTemplate().update("UPDATE host_stat SET str_state=? WHERE pk_host=?", state.toString(), + host.getHostId()); + } + + @Override + public void updateHostFreeTempDir(HostInterface host, Long freeTempDir) { + getJdbcTemplate().update("UPDATE host_stat SET int_mcp_free=? WHERE pk_host=?", freeTempDir, + host.getHostId()); + } + + @Override + public void updateHostSetAllocation(HostInterface host, AllocationInterface alloc) { + + String tag = getJdbcTemplate().queryForObject("SELECT str_tag FROM alloc WHERE pk_alloc=?", + String.class, alloc.getAllocationId()); + getJdbcTemplate().update("UPDATE host SET pk_alloc=? WHERE pk_host=?", alloc.getAllocationId(), + host.getHostId()); + + removeTagsByType(host, HostTagType.ALLOC); + tagHost(host, tag, HostTagType.ALLOC); + } + + @Override + public boolean isHostLocked(HostInterface host) { + return getJdbcTemplate().queryForObject( + "SELECT COUNT(1) FROM host WHERE pk_host=? AND str_lock_state!=?", Integer.class, + host.getHostId(), LockState.OPEN.toString()) > 0; + } + + private static final String INSERT_TAG = "INSERT INTO " + "host_tag " + "(" + "pk_host_tag," + + "pk_host," + "str_tag," + "str_tag_type, " + "b_constant " + ") VALUES (?,?,?,?,?)"; + + @Override + public void tagHost(String id, String tag, HostTagType type) { + boolean constant = false; + if (type.equals(HostTagType.ALLOC)) + constant = true; + + getJdbcTemplate().update(INSERT_TAG, SqlUtil.genKeyRandom(), id, tag.trim(), type.toString(), + constant); + } + + @Override + public void tagHost(HostInterface host, String tag, HostTagType type) { + tagHost(host.getHostId(), tag, type); + } + + @Override + public void removeTagsByType(HostInterface host, HostTagType type) { + getJdbcTemplate().update("DELETE FROM host_tag WHERE pk_host=? AND str_tag_type=?", + host.getHostId(), type.toString()); + } + + @Override + public void removeTag(HostInterface host, String tag) { + getJdbcTemplate().update( + "DELETE FROM host_tag WHERE pk_host=? AND str_tag=? AND b_constant=false", host.getHostId(), + tag); + } + + @Override + public void renameTag(HostInterface host, String oldTag, String newTag) { + getJdbcTemplate().update( + "UPDATE host_tag SET str_tag=? WHERE pk_host=? AND str_tag=? AND b_constant=false", newTag, + host.getHostId(), oldTag); + } + + @Override + public void updateThreadMode(HostInterface host, ThreadMode mode) { + getJdbcTemplate().update("UPDATE host SET int_thread_mode=? WHERE pk_host=?", mode.getNumber(), + host.getHostId()); + } + + @Override + public void updateHostOs(HostInterface host, String os) { + getJdbcTemplate().update("UPDATE host_stat SET str_os=? WHERE pk_host=?", os, host.getHostId()); + } + + @Override + public int getStrandedCoreUnits(HostInterface h) { + try { + int idle_cores = getJdbcTemplate().queryForObject( + "SELECT int_cores_idle FROM host WHERE pk_host = ? AND int_mem_idle <= ?", Integer.class, + h.getHostId(), Dispatcher.MEM_STRANDED_THRESHHOLD); + return (int) (Math.floor(idle_cores / 100.0)) * 100; + } catch (EmptyResultDataAccessException e) { + return 0; + } + } + + @Override + public int getStrandedGpus(HostInterface h) { + try { + int idle_gpus = getJdbcTemplate().queryForObject( + "SELECT int_gpus_idle FROM host WHERE pk_host = ?", Integer.class, h.getHostId()); + return idle_gpus; + } catch (EmptyResultDataAccessException e) { + return 0; + } + } + + private static final String IS_HOST_UP = "SELECT " + "COUNT(1) " + "FROM " + "host_stat " + + "WHERE " + "host_stat.str_state = ? " + "AND " + "host_stat.pk_host = ? "; + + @Override + public boolean isHostUp(HostInterface host) { + return getJdbcTemplate().queryForObject(IS_HOST_UP, Integer.class, HardwareState.UP.toString(), + host.getHostId()) == 1; + } + + private static final String IS_PREFER_SHOW = "SELECT " + "COUNT(1) " + "FROM " + "host," + + "owner," + "deed " + "WHERE " + "host.pk_host = deed.pk_host " + "AND " + + "deed.pk_owner = owner.pk_owner " + "AND " + "host.pk_host = ?"; + + @Override + public boolean isPreferShow(HostInterface h) { + return getJdbcTemplate().queryForObject(IS_PREFER_SHOW, Integer.class, h.getHostId()) > 0; + } + + @Override + public boolean isNimbyHost(HostInterface h) { + return getJdbcTemplate().queryForObject( + "SELECT COUNT(1) FROM host WHERE b_nimby=true AND pk_host=?", Integer.class, + h.getHostId()) > 0; + } + + /** + * Checks if the passed in name looks like a fully qualified domain name. If so, returns the + * hostname without the domain. Otherwise returns the passed in name unchanged. + * + * @param fqdn - String + * @return String - hostname + */ + private String getHostNameFromFQDN(String fqdn, Boolean useLongNames) { + String hostName; + Pattern ipPattern = Pattern.compile("^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$"); + Matcher ipMatcher = ipPattern.matcher(fqdn); + if (ipMatcher.matches()) { + hostName = fqdn; + } else if (fqdn.contains(":")) { + // looks like IPv6 address. + hostName = fqdn; + } else if (useLongNames) { + hostName = fqdn; + Pattern domainPattern = + Pattern.compile(".*(\\.(.*)\\.(co(m|.[a-z]{2})|biz|edu|info|net|org|cn|de|eu|nl))$"); + Matcher domainMatcher = domainPattern.matcher(fqdn); + if (domainMatcher.matches()) { + hostName = fqdn.replace(domainMatcher.group(1), ""); + } + } else { + hostName = fqdn.split("\\.")[0]; + } + return hostName; + + } + + /** + * Converts the amount of memory reported by the machine to a modificed value which takes into + * account the operating system and the possibility of user applications. + * + * @param host + * @return + */ + private long convertMemoryUnits(RenderHost host) { + + long memUnits; + if (host.getTagsList().contains("64bit")) { + memUnits = CueUtil.convertKbToFakeKb64bit(env, host.getTotalMem()); + } else { + memUnits = CueUtil.convertKbToFakeKb32bit(env, host.getTotalMem()); + } + + /* + * If this is a desktop, we'll just cut the memory so we don't annoy the user. */ - private String getHostNameFromFQDN(String fqdn, Boolean useLongNames) { - String hostName; - Pattern ipPattern = Pattern.compile("^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$"); - Matcher ipMatcher = ipPattern.matcher(fqdn); - if (ipMatcher.matches()){ - hostName = fqdn; - } - else if (fqdn.contains(":")) { - // looks like IPv6 address. - hostName = fqdn; - } - else if (useLongNames) { - hostName = fqdn; - Pattern domainPattern = Pattern.compile( - ".*(\\.(.*)\\.(co(m|.[a-z]{2})|biz|edu|info|net|org|cn|de|eu|nl))$"); - Matcher domainMatcher = domainPattern.matcher(fqdn); - if (domainMatcher.matches()){ - hostName = fqdn.replace(domainMatcher.group(1), ""); - } - } - else { - hostName = fqdn.split("\\.")[0]; - } - return hostName; - + if (host.getNimbyEnabled()) { + long memReservedSystem = + env.getRequiredProperty("dispatcher.memory.mem_reserved_system", Long.class); + memUnits = (long) (memUnits / 1.5) + memReservedSystem; } - /** - * Converts the amount of memory reported by the machine - * to a modificed value which takes into account the - * operating system and the possibility of user applications. - * - * @param host - * @return - */ - private long convertMemoryUnits(RenderHost host) { - - long memUnits; - if (host.getTagsList().contains("64bit")) { - memUnits = CueUtil.convertKbToFakeKb64bit(env, host.getTotalMem()); - } - else { - memUnits = CueUtil.convertKbToFakeKb32bit(env, host.getTotalMem()); - } - - /* - * If this is a desktop, we'll just cut the memory - * so we don't annoy the user. - */ - if (host.getNimbyEnabled()) { - long memReservedSystem = env.getRequiredProperty( - "dispatcher.memory.mem_reserved_system", - Long.class); - memUnits = (long) (memUnits / 1.5) + memReservedSystem; - } - - return memUnits; - } + return memUnits; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/JobDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/JobDaoJdbc.java index 872ab41d7..5838cef1f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/JobDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/JobDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -60,1016 +56,760 @@ import com.imageworks.spcue.util.SqlUtil; public class JobDaoJdbc extends JdbcDaoSupport implements JobDao { - private static final Pattern LAST_JOB_STRIP_PATTERN = Pattern.compile("_v*([_0-9]*$)"); - - /* - * Maps a row to a DispatchJob object - */ - public static final RowMapper DISPATCH_JOB_MAPPER = new RowMapper() { - public DispatchJob mapRow(ResultSet rs, int rowNum) throws SQLException { - DispatchJob job = new DispatchJob(); - job.id = rs.getString("pk_job"); - job.showId = rs.getString("pk_show"); - job.facilityId = rs.getString("pk_facility"); - job.name = rs.getString("str_name"); - job.state = JobState.valueOf(rs.getString("str_state")); - job.maxRetries = rs.getInt("int_max_retries"); - job.paused = rs.getBoolean("b_paused"); - job.autoEat = rs.getBoolean("b_autoeat"); - job.autoBook = rs.getBoolean("b_auto_book"); - job.autoUnbook = rs.getBoolean("b_auto_unbook"); - return job; - } - }; - - /* - * Maps a row to minimal job. - */ - public static final RowMapper JOB_MAPPER = new RowMapper() { - public JobInterface mapRow(final ResultSet rs, int rowNum) throws SQLException { - return new JobInterface() { - final String jobid = rs.getString("pk_job"); - final String showid = rs.getString("pk_show"); - final String name = rs.getString("str_name"); - final String facility = rs.getString("pk_facility"); - public String getJobId() { return jobid; } - public String getShowId() { return showid; } - public String getId() { return jobid; } - public String getName() { return name; } - public String getFacilityId() { return facility; } - }; - } - }; - - /* - * Maps a row to a JobDetail object - */ - private static final RowMapper JOB_DETAIL_MAPPER = - new RowMapper() { - public JobDetail mapRow(ResultSet rs, int rowNum) throws SQLException { - JobDetail job = new JobDetail(); - job.id = rs.getString("pk_job"); - job.showId = rs.getString("pk_show"); - job.facilityId = rs.getString("pk_facility"); - job.deptId = rs.getString("pk_dept"); - job.groupId = rs.getString("pk_folder"); - job.logDir = rs.getString("str_log_dir"); - job.maxCoreUnits = rs.getInt("int_max_cores"); - job.minCoreUnits = rs.getInt("int_min_cores"); - job.maxGpuUnits = rs.getInt("int_max_gpus"); - job.minGpuUnits = rs.getInt("int_min_gpus"); - job.name = rs.getString("str_name"); - job.priority = rs.getInt("int_priority"); - job.shot = rs.getString("str_shot"); - job.state = JobState.valueOf(rs.getString("str_state")); - int uid = rs.getInt("int_uid"); - job.uid = rs.wasNull() ? Optional.empty() : Optional.of(uid); - job.user = rs.getString("str_user"); - job.email = rs.getString("str_email"); - job.totalFrames = rs.getInt("int_frame_count"); - job.totalLayers = rs.getInt("int_layer_count"); - Timestamp startTime = rs.getTimestamp("ts_started"); - job.startTime = startTime != null ? (int) (startTime.getTime() / 1000) : 0; - Timestamp stopTime = rs.getTimestamp("ts_stopped"); - job.stopTime = stopTime != null ? (int) (stopTime.getTime() / 1000) : 0; - job.isPaused = rs.getBoolean("b_paused"); - job.maxRetries = rs.getInt("int_max_retries"); - job.showName = rs.getString("show_name"); - job.facilityName = rs.getString("facility_name"); - job.deptName = rs.getString("dept_name"); - return job; - } - }; - - private static final String GET_DISPATCH_JOB = - "SELECT " + - "job.pk_job, " + - "job.pk_facility, " + - "job.pk_show, " + - "job.str_name, "+ - "job.str_show, " + - "job.str_state, "+ - "job.b_paused, "+ - "job.int_max_retries, " + - "job.b_autoeat, " + - "job.b_auto_book,"+ - "job.b_auto_unbook " + - "FROM " + - "job "+ - "WHERE " + - "pk_job = ?"; - - @Override - public DispatchJob getDispatchJob(String uuid) { - return getJdbcTemplate().queryForObject( - GET_DISPATCH_JOB, DISPATCH_JOB_MAPPER, uuid); - } - - private static final String IS_JOB_COMPLETE = - "SELECT " + - "SUM (" + - "int_waiting_count + " + - "int_running_count + " + - "int_dead_count + " + - "int_depend_count + " + - "int_checkpoint_count " + - ") " + - "FROM " + - "job_stat " + - "WHERE " + - "pk_job=?"; - - @Override - public boolean isJobComplete(JobInterface job) { - if (isLaunching(job)) { - return false; - } - return getJdbcTemplate().queryForObject(IS_JOB_COMPLETE, - Integer.class, job.getJobId()) == 0; - } - - public static final String GET_JOB= - "SELECT " + - "job.pk_job, "+ - "job.pk_show, "+ - "job.pk_dept,"+ - "job.pk_facility,"+ - "job.str_name " + - "FROM " + - "job "; - - private static final String GET_JOB_DETAIL = - "SELECT " + - "job.pk_job,"+ - "job.pk_show,"+ - "job.pk_facility,"+ - "job.pk_dept,"+ - "job.pk_folder,"+ - "job.str_log_dir,"+ - "job.str_name,"+ - "job.str_shot,"+ - "job.str_state,"+ - "job.int_uid,"+ - "job.str_user,"+ - "job.str_email,"+ - "job.int_frame_count,"+ - "job.int_layer_count,"+ - "job.ts_started,"+ - "job.ts_stopped,"+ - "job.b_paused,"+ - "job.int_max_retries,"+ - "job_resource.int_max_cores,"+ - "job_resource.int_min_cores,"+ - "job_resource.int_max_gpus,"+ - "job_resource.int_min_gpus,"+ - "job_resource.int_priority,"+ - "show.str_name AS show_name, " + - "dept.str_name AS dept_name, "+ - "facility.str_name AS facility_name "+ - "FROM " + - "job, " + - "job_resource, "+ - "show, " + - "dept, "+ - "facility "+ - "WHERE " + - "job.pk_job = job_resource.pk_job " + - "AND " + - "job.pk_show = show.pk_show " + - "AND " + - "job.pk_dept = dept.pk_dept " + - "AND " + - "job.pk_facility = facility.pk_facility "; - - private static final String GET_JOB_BY_ID = - GET_JOB_DETAIL + "AND job.pk_job=?"; - - private static final String FIND_JOB_BY_NAME = - GET_JOB_DETAIL + "AND job.str_visible_name=? "; - - @Override - public JobDetail getJobDetail(String id) { - return getJdbcTemplate().queryForObject( - GET_JOB_BY_ID, JOB_DETAIL_MAPPER, id); - } - - @Override - public JobDetail findLastJob(String name) { - Matcher matcher = LAST_JOB_STRIP_PATTERN.matcher(name); - name = matcher.replaceAll("%"); - - return getJdbcTemplate().queryForObject( - GET_JOB_DETAIL + " AND job.str_state = 'FINISHED' AND job.str_name LIKE ? " + - "ORDER BY job.ts_stopped LIMIT 1", JOB_DETAIL_MAPPER, name); - } - - @Override - public JobInterface getJob(String id) { - return getJdbcTemplate().queryForObject( - GET_JOB + " WHERE pk_job=?", JOB_MAPPER, id); - } - - - public static final String GET_JOBS_BY_TASK = - "SELECT " + - "job.pk_job, " + - "job.pk_show, " + - "job.pk_dept, " + - "job.pk_facility, " + - "job.str_name " + - "FROM " + - "job," + - "folder " + - "WHERE " + - "job.pk_folder = folder.pk_folder " + - "AND " + - "folder.b_exclude_managed = false " + - "AND " + - "job.str_state = ? " + - "AND " + - "job.pk_dept = ? " + - "AND " + - "job.str_shot = ? " + - "ORDER BY "+ - "ts_started ASC "; - - @Override - public List getJobs(TaskEntity t) { - return getJdbcTemplate().query(GET_JOBS_BY_TASK, - JOB_MAPPER, JobState.PENDING.toString(), t.deptId, t.shot); - } - - @Override - public JobDetail findJobDetail(String name) { - return getJdbcTemplate().queryForObject( - FIND_JOB_BY_NAME, JOB_DETAIL_MAPPER, name); - } - - @Override - public JobInterface findJob(String name) { - return getJdbcTemplate().queryForObject( - GET_JOB + " WHERE job.str_visible_name=?", JOB_MAPPER, name); - } - - @Override - public List findJobs(ShowInterface show) { - return getJdbcTemplate().query( - GET_JOB_DETAIL + " AND job.pk_show=?", JOB_DETAIL_MAPPER, show.getShowId()); - } - - @Override - public List findJobs(GroupInterface group) { - return getJdbcTemplate().query( - GET_JOB_DETAIL + " AND job.pk_folder=?", JOB_DETAIL_MAPPER, group.getId()); - } - - @Override - public void deleteJob(JobInterface j) { - /* See trigger before_delete_job */ - getJdbcTemplate().update("DELETE FROM job WHERE pk_job=?", j.getId()); - } - - @Override - public void updatePriority(JobInterface j, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_priority=? WHERE pk_job=?", - v, j.getJobId()); - } - - @Override - public void updatePriority(GroupInterface g, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_priority=? WHERE " + - "pk_job IN (SELECT pk_job FROM job WHERE job.pk_folder=?)", - v, g.getGroupId()); - } - - @Override - public void updateMinCores(GroupInterface g, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_min_cores=? WHERE " + - "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", - v, g.getGroupId()); - } - - @Override - public void updateMaxCores(GroupInterface g, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_max_cores=? WHERE " + - "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", - v, g.getGroupId()); - } - - @Override - public void updateMinCores(JobInterface j, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_min_cores=? WHERE pk_job=?", - v, j.getJobId()); - } - - @Override - public void updateMaxCores(JobInterface j, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_max_cores=? WHERE pk_job=?", - v, j.getJobId()); - } - - @Override - public void updateMinGpus(GroupInterface g, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_min_gpus=? WHERE " + - "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", - v, g.getGroupId()); - } - - @Override - public void updateMaxGpus(GroupInterface g, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_max_gpus=? WHERE " + - "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", - v, g.getGroupId()); - } - - @Override - public void updateMinGpus(JobInterface j, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_min_gpus=? WHERE pk_job=?", - v, j.getJobId()); - } - - @Override - public void updateMaxGpus(JobInterface j, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_max_gpus=? WHERE pk_job=?", - v, j.getJobId()); - } - - @Override - public void updatePaused(JobInterface j, boolean b) { - getJdbcTemplate().update("UPDATE job SET b_paused=? WHERE pk_job=?", - b, j.getJobId()); - } - - @Override - public void updateAutoEat(JobInterface j, boolean b) { - int maxRetries = 1; - if (b) { - maxRetries = 0; - } - getJdbcTemplate().update("UPDATE job SET b_autoeat=?, int_max_retries=? WHERE pk_job=?", - b, maxRetries, j.getJobId()); - } - - @Override - public void updateState(JobInterface job, JobState state) { - getJdbcTemplate().update("UPDATE job SET str_state=? WHERE pk_job=?", - state.toString(), job.getJobId()); - } - - @Override - public void updateLogPath(JobInterface job, String path) { - getJdbcTemplate().update("UPDATE job SET str_log_dir=? WHERE pk_job=?", - path, job.getJobId()); - } - - @Override - public void updateMaxRSS(JobInterface job, long value) { - getJdbcTemplate().update( - "UPDATE job_mem SET int_max_rss=? WHERE pk_job=? AND int_max_rss < ?", - value, job.getJobId(), value); - } - - private static final String UPDATE_JOB_FINISHED = - "UPDATE " + - "job " + - "SET " + - "str_state = ?, "+ - "str_visible_name = NULL, " + - "ts_stopped = current_timestamp "+ - "WHERE " + - "str_state = 'PENDING' " + - "AND " + - "pk_job = ?"; - - @Override - public boolean updateJobFinished(JobInterface job) { - // Only return true if this thread was the one who actually - // set the job state to finished. - if(getJdbcTemplate().update(UPDATE_JOB_FINISHED, - JobState.FINISHED.toString(), job.getJobId()) == 1) { - return true; - } - return false; - } - - private static final String INSERT_JOB = - "INSERT INTO " + - "job " + - "(" + - "pk_job," + - "pk_show," + - "pk_folder,"+ - "pk_facility,"+ - "pk_dept,"+ - "str_name," + - "str_visible_name,"+ - "str_show,"+ - "str_shot," + - "str_user," + - "str_email,"+ - "str_state," + - "str_log_dir," + - "str_os, "+ - "int_uid," + - "b_paused," + - "b_autoeat,"+ - "int_max_retries " + - ") " + - "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; - - @Override - public void insertJob(JobDetail j, JobLogUtil jobLogUtil) { - j.id = SqlUtil.genKeyRandom(); - j.logDir = jobLogUtil.getJobLogPath(j); - if (j.minCoreUnits < 100) { j.minCoreUnits = 100; } - - getJdbcTemplate().update(INSERT_JOB, - j.id, j.showId, j.groupId, j.facilityId, j.deptId, - j.name, j.name, j.showName, j.shot, j.user, j.email, j.state.toString(), - j.logDir, j.os, j.uid.orElse(null), j.isPaused, j.isAutoEat, j.maxRetries); - } - - private static final String JOB_EXISTS = - "SELECT " + - "1 " + - "FROM " + - "job " + - "WHERE " + - "str_name = ? " + - "AND " + - "str_state='PENDING' " + - "LIMIT 1"; - - @Override - public boolean exists(String name) { - try { - return (getJdbcTemplate().queryForObject(JOB_EXISTS, - Integer.class, name) >= 1); - } catch (Exception e) { - return false; - } - } - - private static final String IS_LAUNCHING = - "SELECT " + - "str_state " + - "FROM " + - "job " + - "WHERE " + - "pk_job=?"; - - @Override - public boolean isLaunching(JobInterface j) { - return getJdbcTemplate().queryForObject( - IS_LAUNCHING, String.class, j.getJobId()).equals( - JobState.STARTUP.toString()); - } - - @Override - public void activateJob(JobInterface job, JobState jobState) { - - Long[] jobTotals = { 0L, 0L }; // Depend, Waiting - - /* - * Sets all frames in the setup state to Waiting. Frames with a depend - * count > 0 are automatically updated to Depend via the - * update_frame_wait_to_dep trigger. - */ - getJdbcTemplate().update("UPDATE frame SET str_state=? WHERE pk_job=? AND str_state=?", - FrameState.WAITING.toString(), job.getId(), FrameState.SETUP.toString()); - - List> layers = getJdbcTemplate().queryForList( - "SELECT pk_layer, str_state, count(1) AS c FROM frame " + - "WHERE pk_job=? GROUP BY pk_layer, str_state", job.getId()); - - for (Map row: layers) { - String layer = (String) row.get("pk_layer"); - FrameState state = FrameState.valueOf((String) row.get("str_state")); - Long count = (Long) row.get("c"); - - if (count == 0 || state == null) { continue; } - - switch (state) { - case DEPEND: - jobTotals[0] = jobTotals[0] + count; - getJdbcTemplate().update( - "UPDATE layer_stat SET int_depend_count=?,int_total_count=int_total_count + ? WHERE pk_layer=?", - count, count, layer); - break; - case WAITING: - jobTotals[1] = jobTotals[1] + count; - getJdbcTemplate().update( - "UPDATE layer_stat SET int_waiting_count=?,int_total_count=int_total_count + ? WHERE pk_layer=?", - count, count, layer); - break; - } + private static final Pattern LAST_JOB_STRIP_PATTERN = Pattern.compile("_v*([_0-9]*$)"); + + /* + * Maps a row to a DispatchJob object + */ + public static final RowMapper DISPATCH_JOB_MAPPER = new RowMapper() { + public DispatchJob mapRow(ResultSet rs, int rowNum) throws SQLException { + DispatchJob job = new DispatchJob(); + job.id = rs.getString("pk_job"); + job.showId = rs.getString("pk_show"); + job.facilityId = rs.getString("pk_facility"); + job.name = rs.getString("str_name"); + job.state = JobState.valueOf(rs.getString("str_state")); + job.maxRetries = rs.getInt("int_max_retries"); + job.paused = rs.getBoolean("b_paused"); + job.autoEat = rs.getBoolean("b_autoeat"); + job.autoBook = rs.getBoolean("b_auto_book"); + job.autoUnbook = rs.getBoolean("b_auto_unbook"); + return job; + } + }; + + /* + * Maps a row to minimal job. + */ + public static final RowMapper JOB_MAPPER = new RowMapper() { + public JobInterface mapRow(final ResultSet rs, int rowNum) throws SQLException { + return new JobInterface() { + final String jobid = rs.getString("pk_job"); + final String showid = rs.getString("pk_show"); + final String name = rs.getString("str_name"); + final String facility = rs.getString("pk_facility"); + + public String getJobId() { + return jobid; } - getJdbcTemplate().update( - "UPDATE job_stat SET int_depend_count=?,int_waiting_count=? WHERE pk_job=?", - jobTotals[0], jobTotals[1], job.getJobId()); - - getJdbcTemplate().update( - "UPDATE job SET int_frame_count=?, int_layer_count=? WHERE pk_job=?", - jobTotals[0] + jobTotals[1], layers.size(), job.getJobId()); - - getJdbcTemplate().update( - "UPDATE show_stats SET int_frame_insert_count=int_frame_insert_count+?, int_job_insert_count=int_job_insert_count+1 WHERE pk_show=?", - jobTotals[0] + jobTotals[1], job.getShowId()); - - updateState(job, jobState); - } - - private static final String HAS_PENDING_FRAMES = - "SELECT " + - "int_waiting_count " + - "FROM " + - "job,"+ - "job_stat " + - "WHERE " + - "job.pk_job = job_stat.pk_job " + - "AND " + - "job.str_state = 'PENDING' " + - "AND " + - "job.b_paused = false " + - "AND " + - "job.b_auto_book = true " + - "AND " + - "job.pk_job = ?"; - - @Override - public boolean hasPendingFrames(JobInterface job) { - try { - return getJdbcTemplate().queryForObject(HAS_PENDING_FRAMES, - Integer.class, job.getJobId()) > 0; - } catch (DataAccessException e) { - return false; + public String getShowId() { + return showid; } - } - - private static final String IS_JOB_OVER_MIN_CORES = - "SELECT " + - "COUNT(1) " + - "FROM " + - "job_resource " + - "WHERE " + - "job_resource.pk_job = ? " + - "AND " + - "job_resource.int_cores > job_resource.int_min_cores"; - - @Override - public boolean isOverMinCores(JobInterface job) { - return getJdbcTemplate().queryForObject(IS_JOB_OVER_MIN_CORES, - Integer.class, job.getJobId()) > 0; - } - private static final String IS_JOB_OVER_MAX_CORES = - "SELECT " + - "COUNT(1) " + - "FROM " + - "job_resource " + - "WHERE " + - "job_resource.pk_job = ? " + - "AND " + - "job_resource.int_cores + ? > job_resource.int_max_cores"; - - @Override - public boolean isOverMaxCores(JobInterface job) { - return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_CORES, - Integer.class, job.getJobId(), 0) > 0; - } - - @Override - public boolean isOverMaxCores(JobInterface job, int coreUnits) { - return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_CORES, - Integer.class, job.getJobId(), coreUnits) > 0; - } - - - private static final String IS_JOB_AT_MAX_CORES = - "SELECT " + - "COUNT(1) " + - "FROM " + - "job_resource " + - "WHERE " + - "job_resource.pk_job = ? " + - "AND " + - "job_resource.int_cores >= job_resource.int_max_cores "; - - @Override - public boolean isAtMaxCores(JobInterface job) { - return getJdbcTemplate().queryForObject(IS_JOB_AT_MAX_CORES, - Integer.class, job.getJobId()) > 0; - } - - private static final String IS_JOB_OVER_MAX_GPUS = - "SELECT " + - "COUNT(1) " + - "FROM " + - "job_resource " + - "WHERE " + - "job_resource.pk_job = ? " + - "AND " + - "job_resource.int_gpus + ? > job_resource.int_max_gpus"; - - @Override - public boolean isOverMaxGpus(JobInterface job) { - return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_GPUS, - Integer.class, job.getJobId(), 0) > 0; - } - - @Override - public boolean isOverMaxGpus(JobInterface job, int gpu) { - return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_GPUS, - Integer.class, job.getJobId(), gpu) > 0; - } - - private static final String IS_JOB_AT_MAX_GPUS = - "SELECT " + - "COUNT(1) " + - "FROM " + - "job_resource " + - "WHERE " + - "job_resource.pk_job = ? " + - "AND " + - "job_resource.int_gpus >= job_resource.int_max_gpus "; - - @Override - public boolean isAtMaxGpus(JobInterface job) { - return getJdbcTemplate().queryForObject(IS_JOB_AT_MAX_GPUS, - Integer.class, job.getJobId()) > 0; - } - - @Override - public void updateMaxFrameRetries(JobInterface j, int max_retries) { - if (max_retries < 0) { - throw new IllegalArgumentException("max retries must be greater than 0"); + public String getId() { + return jobid; } - int max_max_retries = getJdbcTemplate().queryForObject( - "SELECT int_value FROM config WHERE str_key=?", - Integer.class, "MAX_FRAME_RETRIES"); - - if (max_retries > max_max_retries) { - throw new IllegalArgumentException("max retries must be less than " - + max_max_retries); + public String getName() { + return name; } - getJdbcTemplate().update( - "UPDATE job SET int_max_retries=? WHERE pk_job=?", - max_retries, j.getJobId()); - } - - private static final String GET_FRAME_STATE_TOTALS = - "SELECT " + - "job.int_frame_count," + - "job_stat.* " + - "FROM " + - "job," + - "job_stat " + - "WHERE " + - "job.pk_job = job_stat.pk_job " + - "AND " + - "job.pk_job=?"; - - public FrameStateTotals getFrameStateTotals(JobInterface job) { - return getJdbcTemplate().queryForObject( - GET_FRAME_STATE_TOTALS, - new RowMapper() { - public FrameStateTotals mapRow(ResultSet rs, int rowNum) throws SQLException { - FrameStateTotals t = new FrameStateTotals(); - t.dead = rs.getInt("int_dead_count"); - t.depend = rs.getInt("int_depend_count"); - t.eaten = rs.getInt("int_eaten_count"); - t.running = rs.getInt("int_running_count"); - t.succeeded = rs.getInt("int_succeeded_count"); - t.waiting = rs.getInt("int_waiting_count"); - t.total = rs.getInt("int_frame_count"); - return t; - } - },job.getJobId()); - } - - private static final String GET_EXECUTION_SUMMARY = - "SELECT " + - "job_usage.int_core_time_success,"+ - "job_usage.int_core_time_fail,"+ - "job_usage.int_gpu_time_success,"+ - "job_usage.int_gpu_time_fail,"+ - "job_mem.int_max_rss " + - "FROM " + - "job," + - "job_usage, "+ - "job_mem " + - "WHERE " + - "job.pk_job = job_usage.pk_job "+ - "AND " + - "job.pk_job = job_mem.pk_job " + - "AND " + - "job.pk_job = ?"; - - public ExecutionSummary getExecutionSummary(JobInterface job) { - return getJdbcTemplate().queryForObject( - GET_EXECUTION_SUMMARY, - new RowMapper() { - public ExecutionSummary mapRow(ResultSet rs, int rowNum) throws SQLException { - ExecutionSummary e = new ExecutionSummary(); - e.coreTimeSuccess = rs.getLong("int_core_time_success"); - e.coreTimeFail = rs.getLong("int_core_time_fail"); - e.coreTime = e.coreTimeSuccess + e.coreTimeFail; - e.gpuTimeSuccess = rs.getLong("int_gpu_time_success"); - e.gpuTimeFail = rs.getLong("int_gpu_time_fail"); - e.gpuTime = e.gpuTimeSuccess + e.gpuTimeFail; - e.highMemoryKb = rs.getLong("int_max_rss"); - - return e; - } - }, job.getJobId()); - } - - private static final String INSERT_JOB_ENV = - "INSERT INTO " + - "job_env " + - "(" + - "pk_job_env, pk_job, str_key, str_value " + - ") " + - "VALUES (?,?,?,?)"; - - @Override - public void insertEnvironment(JobInterface job, Map env) { - for (Map.Entry e: env.entrySet()) { - String pk = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_JOB_ENV, - pk, job.getJobId(), e.getKey(), e.getValue()); + public String getFacilityId() { + return facility; } - } - - @Override - public void insertEnvironment(JobInterface job, String key, String value) { - String pk = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_JOB_ENV, - pk, job.getJobId(), key, value); - } - - @Override - public Map getEnvironment(JobInterface job) { - Map result = new HashMap(); - - List> _result = getJdbcTemplate().queryForList( - "SELECT str_key, str_value FROM job_env WHERE pk_job=?", job.getJobId()); - - for (Map o: _result) { - result.put((String) o.get("str_key"), (String) o.get("str_value")); - } - - return result; - } + }; + } + }; + + /* + * Maps a row to a JobDetail object + */ + private static final RowMapper JOB_DETAIL_MAPPER = new RowMapper() { + public JobDetail mapRow(ResultSet rs, int rowNum) throws SQLException { + JobDetail job = new JobDetail(); + job.id = rs.getString("pk_job"); + job.showId = rs.getString("pk_show"); + job.facilityId = rs.getString("pk_facility"); + job.deptId = rs.getString("pk_dept"); + job.groupId = rs.getString("pk_folder"); + job.logDir = rs.getString("str_log_dir"); + job.maxCoreUnits = rs.getInt("int_max_cores"); + job.minCoreUnits = rs.getInt("int_min_cores"); + job.maxGpuUnits = rs.getInt("int_max_gpus"); + job.minGpuUnits = rs.getInt("int_min_gpus"); + job.name = rs.getString("str_name"); + job.priority = rs.getInt("int_priority"); + job.shot = rs.getString("str_shot"); + job.state = JobState.valueOf(rs.getString("str_state")); + int uid = rs.getInt("int_uid"); + job.uid = rs.wasNull() ? Optional.empty() : Optional.of(uid); + job.user = rs.getString("str_user"); + job.email = rs.getString("str_email"); + job.totalFrames = rs.getInt("int_frame_count"); + job.totalLayers = rs.getInt("int_layer_count"); + Timestamp startTime = rs.getTimestamp("ts_started"); + job.startTime = startTime != null ? (int) (startTime.getTime() / 1000) : 0; + Timestamp stopTime = rs.getTimestamp("ts_stopped"); + job.stopTime = stopTime != null ? (int) (stopTime.getTime() / 1000) : 0; + job.isPaused = rs.getBoolean("b_paused"); + job.maxRetries = rs.getInt("int_max_retries"); + job.showName = rs.getString("show_name"); + job.facilityName = rs.getString("facility_name"); + job.deptName = rs.getString("dept_name"); + return job; + } + }; + + private static final String GET_DISPATCH_JOB = "SELECT " + "job.pk_job, " + "job.pk_facility, " + + "job.pk_show, " + "job.str_name, " + "job.str_show, " + "job.str_state, " + "job.b_paused, " + + "job.int_max_retries, " + "job.b_autoeat, " + "job.b_auto_book," + "job.b_auto_unbook " + + "FROM " + "job " + "WHERE " + "pk_job = ?"; + + @Override + public DispatchJob getDispatchJob(String uuid) { + return getJdbcTemplate().queryForObject(GET_DISPATCH_JOB, DISPATCH_JOB_MAPPER, uuid); + } + + private static final String IS_JOB_COMPLETE = "SELECT " + "SUM (" + "int_waiting_count + " + + "int_running_count + " + "int_dead_count + " + "int_depend_count + " + + "int_checkpoint_count " + ") " + "FROM " + "job_stat " + "WHERE " + "pk_job=?"; + + @Override + public boolean isJobComplete(JobInterface job) { + if (isLaunching(job)) { + return false; + } + return getJdbcTemplate().queryForObject(IS_JOB_COMPLETE, Integer.class, job.getJobId()) == 0; + } + + public static final String GET_JOB = "SELECT " + "job.pk_job, " + "job.pk_show, " + "job.pk_dept," + + "job.pk_facility," + "job.str_name " + "FROM " + "job "; + + private static final String GET_JOB_DETAIL = "SELECT " + "job.pk_job," + "job.pk_show," + + "job.pk_facility," + "job.pk_dept," + "job.pk_folder," + "job.str_log_dir," + + "job.str_name," + "job.str_shot," + "job.str_state," + "job.int_uid," + "job.str_user," + + "job.str_email," + "job.int_frame_count," + "job.int_layer_count," + "job.ts_started," + + "job.ts_stopped," + "job.b_paused," + "job.int_max_retries," + "job_resource.int_max_cores," + + "job_resource.int_min_cores," + "job_resource.int_max_gpus," + "job_resource.int_min_gpus," + + "job_resource.int_priority," + "show.str_name AS show_name, " + + "dept.str_name AS dept_name, " + "facility.str_name AS facility_name " + "FROM " + "job, " + + "job_resource, " + "show, " + "dept, " + "facility " + "WHERE " + + "job.pk_job = job_resource.pk_job " + "AND " + "job.pk_show = show.pk_show " + "AND " + + "job.pk_dept = dept.pk_dept " + "AND " + "job.pk_facility = facility.pk_facility "; + + private static final String GET_JOB_BY_ID = GET_JOB_DETAIL + "AND job.pk_job=?"; + + private static final String FIND_JOB_BY_NAME = GET_JOB_DETAIL + "AND job.str_visible_name=? "; + + @Override + public JobDetail getJobDetail(String id) { + return getJdbcTemplate().queryForObject(GET_JOB_BY_ID, JOB_DETAIL_MAPPER, id); + } + + @Override + public JobDetail findLastJob(String name) { + Matcher matcher = LAST_JOB_STRIP_PATTERN.matcher(name); + name = matcher.replaceAll("%"); + + return getJdbcTemplate() + .queryForObject(GET_JOB_DETAIL + " AND job.str_state = 'FINISHED' AND job.str_name LIKE ? " + + "ORDER BY job.ts_stopped LIMIT 1", JOB_DETAIL_MAPPER, name); + } + + @Override + public JobInterface getJob(String id) { + return getJdbcTemplate().queryForObject(GET_JOB + " WHERE pk_job=?", JOB_MAPPER, id); + } + + public static final String GET_JOBS_BY_TASK = "SELECT " + "job.pk_job, " + "job.pk_show, " + + "job.pk_dept, " + "job.pk_facility, " + "job.str_name " + "FROM " + "job," + "folder " + + "WHERE " + "job.pk_folder = folder.pk_folder " + "AND " + + "folder.b_exclude_managed = false " + "AND " + "job.str_state = ? " + "AND " + + "job.pk_dept = ? " + "AND " + "job.str_shot = ? " + "ORDER BY " + "ts_started ASC "; + + @Override + public List getJobs(TaskEntity t) { + return getJdbcTemplate().query(GET_JOBS_BY_TASK, JOB_MAPPER, JobState.PENDING.toString(), + t.deptId, t.shot); + } + + @Override + public JobDetail findJobDetail(String name) { + return getJdbcTemplate().queryForObject(FIND_JOB_BY_NAME, JOB_DETAIL_MAPPER, name); + } + + @Override + public JobInterface findJob(String name) { + return getJdbcTemplate().queryForObject(GET_JOB + " WHERE job.str_visible_name=?", JOB_MAPPER, + name); + } + + @Override + public List findJobs(ShowInterface show) { + return getJdbcTemplate().query(GET_JOB_DETAIL + " AND job.pk_show=?", JOB_DETAIL_MAPPER, + show.getShowId()); + } + + @Override + public List findJobs(GroupInterface group) { + return getJdbcTemplate().query(GET_JOB_DETAIL + " AND job.pk_folder=?", JOB_DETAIL_MAPPER, + group.getId()); + } + + @Override + public void deleteJob(JobInterface j) { + /* See trigger before_delete_job */ + getJdbcTemplate().update("DELETE FROM job WHERE pk_job=?", j.getId()); + } + + @Override + public void updatePriority(JobInterface j, int v) { + getJdbcTemplate().update("UPDATE job_resource SET int_priority=? WHERE pk_job=?", v, + j.getJobId()); + } + + @Override + public void updatePriority(GroupInterface g, int v) { + getJdbcTemplate().update("UPDATE job_resource SET int_priority=? WHERE " + + "pk_job IN (SELECT pk_job FROM job WHERE job.pk_folder=?)", v, g.getGroupId()); + } + + @Override + public void updateMinCores(GroupInterface g, int v) { + getJdbcTemplate().update("UPDATE job_resource SET int_min_cores=? WHERE " + + "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", v, g.getGroupId()); + } + + @Override + public void updateMaxCores(GroupInterface g, int v) { + getJdbcTemplate().update("UPDATE job_resource SET int_max_cores=? WHERE " + + "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", v, g.getGroupId()); + } + + @Override + public void updateMinCores(JobInterface j, int v) { + getJdbcTemplate().update("UPDATE job_resource SET int_min_cores=? WHERE pk_job=?", v, + j.getJobId()); + } + + @Override + public void updateMaxCores(JobInterface j, int v) { + getJdbcTemplate().update("UPDATE job_resource SET int_max_cores=? WHERE pk_job=?", v, + j.getJobId()); + } + + @Override + public void updateMinGpus(GroupInterface g, int v) { + getJdbcTemplate().update("UPDATE job_resource SET int_min_gpus=? WHERE " + + "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", v, g.getGroupId()); + } + + @Override + public void updateMaxGpus(GroupInterface g, int v) { + getJdbcTemplate().update("UPDATE job_resource SET int_max_gpus=? WHERE " + + "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", v, g.getGroupId()); + } + + @Override + public void updateMinGpus(JobInterface j, int v) { + getJdbcTemplate().update("UPDATE job_resource SET int_min_gpus=? WHERE pk_job=?", v, + j.getJobId()); + } + + @Override + public void updateMaxGpus(JobInterface j, int v) { + getJdbcTemplate().update("UPDATE job_resource SET int_max_gpus=? WHERE pk_job=?", v, + j.getJobId()); + } + + @Override + public void updatePaused(JobInterface j, boolean b) { + getJdbcTemplate().update("UPDATE job SET b_paused=? WHERE pk_job=?", b, j.getJobId()); + } + + @Override + public void updateAutoEat(JobInterface j, boolean b) { + int maxRetries = 1; + if (b) { + maxRetries = 0; + } + getJdbcTemplate().update("UPDATE job SET b_autoeat=?, int_max_retries=? WHERE pk_job=?", b, + maxRetries, j.getJobId()); + } + + @Override + public void updateState(JobInterface job, JobState state) { + getJdbcTemplate().update("UPDATE job SET str_state=? WHERE pk_job=?", state.toString(), + job.getJobId()); + } + + @Override + public void updateLogPath(JobInterface job, String path) { + getJdbcTemplate().update("UPDATE job SET str_log_dir=? WHERE pk_job=?", path, job.getJobId()); + } + + @Override + public void updateMaxRSS(JobInterface job, long value) { + getJdbcTemplate().update("UPDATE job_mem SET int_max_rss=? WHERE pk_job=? AND int_max_rss < ?", + value, job.getJobId(), value); + } + + private static final String UPDATE_JOB_FINISHED = "UPDATE " + "job " + "SET " + "str_state = ?, " + + "str_visible_name = NULL, " + "ts_stopped = current_timestamp " + "WHERE " + + "str_state = 'PENDING' " + "AND " + "pk_job = ?"; + + @Override + public boolean updateJobFinished(JobInterface job) { + // Only return true if this thread was the one who actually + // set the job state to finished. + if (getJdbcTemplate().update(UPDATE_JOB_FINISHED, JobState.FINISHED.toString(), + job.getJobId()) == 1) { + return true; + } + return false; + } + + private static final String INSERT_JOB = "INSERT INTO " + "job " + "(" + "pk_job," + "pk_show," + + "pk_folder," + "pk_facility," + "pk_dept," + "str_name," + "str_visible_name," + "str_show," + + "str_shot," + "str_user," + "str_email," + "str_state," + "str_log_dir," + "str_os, " + + "int_uid," + "b_paused," + "b_autoeat," + "int_max_retries " + ") " + + "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + + @Override + public void insertJob(JobDetail j, JobLogUtil jobLogUtil) { + j.id = SqlUtil.genKeyRandom(); + j.logDir = jobLogUtil.getJobLogPath(j); + if (j.minCoreUnits < 100) { + j.minCoreUnits = 100; + } + + getJdbcTemplate().update(INSERT_JOB, j.id, j.showId, j.groupId, j.facilityId, j.deptId, j.name, + j.name, j.showName, j.shot, j.user, j.email, j.state.toString(), j.logDir, j.os, + j.uid.orElse(null), j.isPaused, j.isAutoEat, j.maxRetries); + } + + private static final String JOB_EXISTS = "SELECT " + "1 " + "FROM " + "job " + "WHERE " + + "str_name = ? " + "AND " + "str_state='PENDING' " + "LIMIT 1"; + + @Override + public boolean exists(String name) { + try { + return (getJdbcTemplate().queryForObject(JOB_EXISTS, Integer.class, name) >= 1); + } catch (Exception e) { + return false; + } + } + + private static final String IS_LAUNCHING = + "SELECT " + "str_state " + "FROM " + "job " + "WHERE " + "pk_job=?"; + + @Override + public boolean isLaunching(JobInterface j) { + return getJdbcTemplate().queryForObject(IS_LAUNCHING, String.class, j.getJobId()) + .equals(JobState.STARTUP.toString()); + } + + @Override + public void activateJob(JobInterface job, JobState jobState) { + + Long[] jobTotals = {0L, 0L}; // Depend, Waiting - @Override - public void updateParent(JobInterface job, GroupDetail dest) { - updateParent(job, dest, new Inherit[] { Inherit.All }); - } - - @Override - public void updateParent(JobInterface job, GroupDetail dest, Inherit[] inherits) { - - if (!job.getShowId().equals(dest.getShowId())) { - throw new EntityModificationError("error moving job, " + - "cannot move jobs between shows"); - } - - StringBuilder query = new StringBuilder(1024); - query.append("UPDATE job_resource SET "); - List values= new ArrayList(); - - Set inheritSet = new HashSet(inherits.length); - inheritSet.addAll(Arrays.asList(inherits)); - - for (Inherit i: inheritSet) { - switch(i) { - case Priority: - if (dest.jobPriority != CueUtil.FEATURE_DISABLED) { - query.append("int_priority=?,"); - values.add(dest.jobPriority); - } - break; - - case MinCores: - if (dest.jobMinCores != CueUtil.FEATURE_DISABLED) { - query.append("int_min_cores=?,"); - values.add(dest.jobMinCores); - } - break; - - case MaxCores: - if (dest.jobMaxCores != CueUtil.FEATURE_DISABLED) { - query.append("int_max_cores=?,"); - values.add(dest.jobMaxCores); - } - break; - - case MinGpus: - if (dest.jobMinGpus != CueUtil.FEATURE_DISABLED) { - query.append("int_min_gpus=?,"); - values.add(dest.jobMinGpus); - } - break; - - case MaxGpus: - if (dest.jobMaxGpus != CueUtil.FEATURE_DISABLED) { - query.append("int_max_gpus=?,"); - values.add(dest.jobMaxGpus); - } - break; - - case All: - if (dest.jobPriority != CueUtil.FEATURE_DISABLED) { - query.append("int_priority=?,"); - values.add(dest.jobPriority); - } - - if (dest.jobMinCores != CueUtil.FEATURE_DISABLED) { - query.append("int_min_cores=?,"); - values.add(dest.jobMinCores); - } - - if (dest.jobMaxCores != CueUtil.FEATURE_DISABLED) { - query.append("int_max_cores=?,"); - values.add(dest.jobMaxCores); - } - - if (dest.jobMinGpus != CueUtil.FEATURE_DISABLED) { - query.append("int_min_gpus=?,"); - values.add(dest.jobMinGpus); - } - - if (dest.jobMaxGpus != CueUtil.FEATURE_DISABLED) { - query.append("int_max_gpus=?,"); - values.add(dest.jobMaxGpus); - } - break; - } - } - - getJdbcTemplate().update( - "UPDATE job SET pk_folder=?, pk_dept=? WHERE pk_job=?", - dest.getGroupId(), dest.getDepartmentId(), job.getJobId()); - - getJdbcTemplate().update( - "UPDATE job_history SET pk_dept=? WHERE pk_job=?", - dest.getDepartmentId(), job.getJobId()); - - if (values.size() > 0) { - query.deleteCharAt(query.length()-1); - query.append(" WHERE pk_job=?"); - values.add(job.getJobId()); - getJdbcTemplate().update(query.toString(), values.toArray()); - } - } - - private static final String HAS_PENDING_JOBS = - "SELECT " + - "job.pk_job " + - "FROM " + - "job, " + - "job_stat, " + - "job_resource " + - "WHERE " + - "job.pk_job = job_stat.pk_job " + - "AND " + - "job.pk_job = job_resource.pk_job " + - "AND " + - "job.str_state = 'PENDING' " + - "AND " + - "job.b_paused = false " + - "AND " + - "job.b_auto_book = true " + - "AND " + - "job_stat.int_waiting_count != 0 " + - "AND " + - "job_resource.int_cores < job_resource.int_max_cores " + - "AND " + - "job_resource.int_gpus < job_resource.int_max_gpus " + - "AND " + - "job.pk_facility = ? " + - "LIMIT 1"; - - @Override - public boolean cueHasPendingJobs(FacilityInterface f) { - return getJdbcTemplate().queryForList( - HAS_PENDING_JOBS, f.getFacilityId()).size() > 0; - } - - @Override - public void enableAutoBooking(JobInterface job, boolean value) { - getJdbcTemplate().update( - "UPDATE job SET b_auto_book=? WHERE pk_job=?", value, job.getJobId()); - } - - @Override - public void enableAutoUnBooking(JobInterface job, boolean value) { - getJdbcTemplate().update( - "UPDATE job SET b_auto_unbook=? WHERE pk_job=?", value, job.getJobId()); - } - - public static final String MAP_POST_JOB = - "INSERT INTO " + - "job_post " + - "(pk_job_post, pk_job, pk_post_job) " + - "VALUES (?,?,?)"; - - @Override - public void mapPostJob(BuildableJob job) { - getJdbcTemplate().update(MAP_POST_JOB, - SqlUtil.genKeyRandom(), job.detail.id, job.getPostJob().detail.id); - } - - public static final String ACTIVATE_POST_JOB = - "UPDATE " + - "job " + - "SET " + - "str_state=? " + - "WHERE " + - "pk_job IN (SELECT pk_post_job FROM job_post WHERE pk_job = ?)"; - - @Override - public void activatePostJob(JobInterface job) { - getJdbcTemplate().update(ACTIVATE_POST_JOB, - JobState.PENDING.toString(), job.getJobId()); - getJdbcTemplate().update("DELETE FROM job_post WHERE pk_job=?",job.getJobId()); - } - - @Override - public void updateDepartment(GroupInterface group, DepartmentInterface dept) { - getJdbcTemplate().update("UPDATE job SET pk_dept=? WHERE pk_folder=?", - dept.getDepartmentId(), group.getGroupId()); - } - - @Override - public void updateDepartment(JobInterface job, DepartmentInterface dept) { - getJdbcTemplate().update("UPDATE job SET pk_dept=? WHERE pk_job=?", - dept.getDepartmentId(), job.getJobId()); - } - - - public void updateUsage(JobInterface job, ResourceUsage usage, int exitStatus) { - - if (exitStatus == 0) { - - getJdbcTemplate().update( - "UPDATE " + - "job_usage " + - "SET " + - "int_core_time_success = int_core_time_success + ?," + - "int_gpu_time_success = int_gpu_time_success + ?," + - "int_clock_time_success = int_clock_time_success + ?,"+ - "int_frame_success_count = int_frame_success_count + 1 " + - "WHERE " + - "pk_job = ? ", - usage.getCoreTimeSeconds(), - usage.getGpuTimeSeconds(), - usage.getClockTimeSeconds(), - job.getJobId()); - - getJdbcTemplate().update( - "UPDATE " + - "job_usage " + - "SET " + - "int_clock_time_high = ? " + - "WHERE " + - "pk_job = ? " + - "AND " + - "int_clock_time_high < ?", - usage.getClockTimeSeconds(), - job.getJobId(), - usage.getClockTimeSeconds()); - } - else { - - getJdbcTemplate().update( - "UPDATE " + - "job_usage " + - "SET " + - "int_core_time_fail = int_core_time_fail + ?," + - "int_clock_time_fail = int_clock_time_fail + ?,"+ - "int_frame_fail_count = int_frame_fail_count + 1 " + - "WHERE " + - "pk_job = ? ", - usage.getCoreTimeSeconds(), - usage.getClockTimeSeconds(), - job.getJobId()); - } - } - - public void updateEmail(JobInterface job, String email) { - getJdbcTemplate().update( - "UPDATE job SET str_email=? WHERE pk_job=?", - email, job.getJobId()); - } - - public String getEmail(JobInterface job) { - String jobId = job.getJobId(); - return getJdbcTemplate().queryForObject("SELECT str_email FROM job WHERE pk_job = ?", String.class, jobId); - } + /* + * Sets all frames in the setup state to Waiting. Frames with a depend count > 0 are + * automatically updated to Depend via the update_frame_wait_to_dep trigger. + */ + getJdbcTemplate().update("UPDATE frame SET str_state=? WHERE pk_job=? AND str_state=?", + FrameState.WAITING.toString(), job.getId(), FrameState.SETUP.toString()); + + List> layers = + getJdbcTemplate().queryForList("SELECT pk_layer, str_state, count(1) AS c FROM frame " + + "WHERE pk_job=? GROUP BY pk_layer, str_state", job.getId()); + + for (Map row : layers) { + String layer = (String) row.get("pk_layer"); + FrameState state = FrameState.valueOf((String) row.get("str_state")); + Long count = (Long) row.get("c"); + + if (count == 0 || state == null) { + continue; + } + + switch (state) { + case DEPEND: + jobTotals[0] = jobTotals[0] + count; + getJdbcTemplate().update( + "UPDATE layer_stat SET int_depend_count=?,int_total_count=int_total_count + ? WHERE pk_layer=?", + count, count, layer); + break; + case WAITING: + jobTotals[1] = jobTotals[1] + count; + getJdbcTemplate().update( + "UPDATE layer_stat SET int_waiting_count=?,int_total_count=int_total_count + ? WHERE pk_layer=?", + count, count, layer); + break; + } + } + + getJdbcTemplate().update( + "UPDATE job_stat SET int_depend_count=?,int_waiting_count=? WHERE pk_job=?", jobTotals[0], + jobTotals[1], job.getJobId()); + + getJdbcTemplate().update("UPDATE job SET int_frame_count=?, int_layer_count=? WHERE pk_job=?", + jobTotals[0] + jobTotals[1], layers.size(), job.getJobId()); + + getJdbcTemplate().update( + "UPDATE show_stats SET int_frame_insert_count=int_frame_insert_count+?, int_job_insert_count=int_job_insert_count+1 WHERE pk_show=?", + jobTotals[0] + jobTotals[1], job.getShowId()); + + updateState(job, jobState); + } + + private static final String HAS_PENDING_FRAMES = "SELECT " + "int_waiting_count " + "FROM " + + "job," + "job_stat " + "WHERE " + "job.pk_job = job_stat.pk_job " + "AND " + + "job.str_state = 'PENDING' " + "AND " + "job.b_paused = false " + "AND " + + "job.b_auto_book = true " + "AND " + "job.pk_job = ?"; + + @Override + public boolean hasPendingFrames(JobInterface job) { + try { + return getJdbcTemplate().queryForObject(HAS_PENDING_FRAMES, Integer.class, + job.getJobId()) > 0; + } catch (DataAccessException e) { + return false; + } + } + + private static final String IS_JOB_OVER_MIN_CORES = + "SELECT " + "COUNT(1) " + "FROM " + "job_resource " + "WHERE " + "job_resource.pk_job = ? " + + "AND " + "job_resource.int_cores > job_resource.int_min_cores"; + + @Override + public boolean isOverMinCores(JobInterface job) { + return getJdbcTemplate().queryForObject(IS_JOB_OVER_MIN_CORES, Integer.class, + job.getJobId()) > 0; + } + + private static final String IS_JOB_OVER_MAX_CORES = + "SELECT " + "COUNT(1) " + "FROM " + "job_resource " + "WHERE " + "job_resource.pk_job = ? " + + "AND " + "job_resource.int_cores + ? > job_resource.int_max_cores"; + + @Override + public boolean isOverMaxCores(JobInterface job) { + return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_CORES, Integer.class, job.getJobId(), + 0) > 0; + } + + @Override + public boolean isOverMaxCores(JobInterface job, int coreUnits) { + return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_CORES, Integer.class, job.getJobId(), + coreUnits) > 0; + } + + private static final String IS_JOB_AT_MAX_CORES = + "SELECT " + "COUNT(1) " + "FROM " + "job_resource " + "WHERE " + "job_resource.pk_job = ? " + + "AND " + "job_resource.int_cores >= job_resource.int_max_cores "; + + @Override + public boolean isAtMaxCores(JobInterface job) { + return getJdbcTemplate().queryForObject(IS_JOB_AT_MAX_CORES, Integer.class, job.getJobId()) > 0; + } + + private static final String IS_JOB_OVER_MAX_GPUS = + "SELECT " + "COUNT(1) " + "FROM " + "job_resource " + "WHERE " + "job_resource.pk_job = ? " + + "AND " + "job_resource.int_gpus + ? > job_resource.int_max_gpus"; + + @Override + public boolean isOverMaxGpus(JobInterface job) { + return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_GPUS, Integer.class, job.getJobId(), + 0) > 0; + } + + @Override + public boolean isOverMaxGpus(JobInterface job, int gpu) { + return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_GPUS, Integer.class, job.getJobId(), + gpu) > 0; + } + + private static final String IS_JOB_AT_MAX_GPUS = + "SELECT " + "COUNT(1) " + "FROM " + "job_resource " + "WHERE " + "job_resource.pk_job = ? " + + "AND " + "job_resource.int_gpus >= job_resource.int_max_gpus "; + + @Override + public boolean isAtMaxGpus(JobInterface job) { + return getJdbcTemplate().queryForObject(IS_JOB_AT_MAX_GPUS, Integer.class, job.getJobId()) > 0; + } + + @Override + public void updateMaxFrameRetries(JobInterface j, int max_retries) { + if (max_retries < 0) { + throw new IllegalArgumentException("max retries must be greater than 0"); + } + + int max_max_retries = getJdbcTemplate().queryForObject( + "SELECT int_value FROM config WHERE str_key=?", Integer.class, "MAX_FRAME_RETRIES"); + + if (max_retries > max_max_retries) { + throw new IllegalArgumentException("max retries must be less than " + max_max_retries); + } + + getJdbcTemplate().update("UPDATE job SET int_max_retries=? WHERE pk_job=?", max_retries, + j.getJobId()); + } + + private static final String GET_FRAME_STATE_TOTALS = + "SELECT " + "job.int_frame_count," + "job_stat.* " + "FROM " + "job," + "job_stat " + "WHERE " + + "job.pk_job = job_stat.pk_job " + "AND " + "job.pk_job=?"; + + public FrameStateTotals getFrameStateTotals(JobInterface job) { + return getJdbcTemplate().queryForObject(GET_FRAME_STATE_TOTALS, + new RowMapper() { + public FrameStateTotals mapRow(ResultSet rs, int rowNum) throws SQLException { + FrameStateTotals t = new FrameStateTotals(); + t.dead = rs.getInt("int_dead_count"); + t.depend = rs.getInt("int_depend_count"); + t.eaten = rs.getInt("int_eaten_count"); + t.running = rs.getInt("int_running_count"); + t.succeeded = rs.getInt("int_succeeded_count"); + t.waiting = rs.getInt("int_waiting_count"); + t.total = rs.getInt("int_frame_count"); + return t; + } + }, job.getJobId()); + } + + private static final String GET_EXECUTION_SUMMARY = "SELECT " + "job_usage.int_core_time_success," + + "job_usage.int_core_time_fail," + "job_usage.int_gpu_time_success," + + "job_usage.int_gpu_time_fail," + "job_mem.int_max_rss " + "FROM " + "job," + "job_usage, " + + "job_mem " + "WHERE " + "job.pk_job = job_usage.pk_job " + "AND " + + "job.pk_job = job_mem.pk_job " + "AND " + "job.pk_job = ?"; + + public ExecutionSummary getExecutionSummary(JobInterface job) { + return getJdbcTemplate().queryForObject(GET_EXECUTION_SUMMARY, + new RowMapper() { + public ExecutionSummary mapRow(ResultSet rs, int rowNum) throws SQLException { + ExecutionSummary e = new ExecutionSummary(); + e.coreTimeSuccess = rs.getLong("int_core_time_success"); + e.coreTimeFail = rs.getLong("int_core_time_fail"); + e.coreTime = e.coreTimeSuccess + e.coreTimeFail; + e.gpuTimeSuccess = rs.getLong("int_gpu_time_success"); + e.gpuTimeFail = rs.getLong("int_gpu_time_fail"); + e.gpuTime = e.gpuTimeSuccess + e.gpuTimeFail; + e.highMemoryKb = rs.getLong("int_max_rss"); + + return e; + } + }, job.getJobId()); + } + + private static final String INSERT_JOB_ENV = "INSERT INTO " + "job_env " + "(" + + "pk_job_env, pk_job, str_key, str_value " + ") " + "VALUES (?,?,?,?)"; + + @Override + public void insertEnvironment(JobInterface job, Map env) { + for (Map.Entry e : env.entrySet()) { + String pk = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_JOB_ENV, pk, job.getJobId(), e.getKey(), e.getValue()); + } + } + + @Override + public void insertEnvironment(JobInterface job, String key, String value) { + String pk = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_JOB_ENV, pk, job.getJobId(), key, value); + } + + @Override + public Map getEnvironment(JobInterface job) { + Map result = new HashMap(); + + List> _result = getJdbcTemplate() + .queryForList("SELECT str_key, str_value FROM job_env WHERE pk_job=?", job.getJobId()); + + for (Map o : _result) { + result.put((String) o.get("str_key"), (String) o.get("str_value")); + } + + return result; + } + + @Override + public void updateParent(JobInterface job, GroupDetail dest) { + updateParent(job, dest, new Inherit[] {Inherit.All}); + } + + @Override + public void updateParent(JobInterface job, GroupDetail dest, Inherit[] inherits) { + + if (!job.getShowId().equals(dest.getShowId())) { + throw new EntityModificationError("error moving job, " + "cannot move jobs between shows"); + } + + StringBuilder query = new StringBuilder(1024); + query.append("UPDATE job_resource SET "); + List values = new ArrayList(); + + Set inheritSet = new HashSet(inherits.length); + inheritSet.addAll(Arrays.asList(inherits)); + + for (Inherit i : inheritSet) { + switch (i) { + case Priority: + if (dest.jobPriority != CueUtil.FEATURE_DISABLED) { + query.append("int_priority=?,"); + values.add(dest.jobPriority); + } + break; + + case MinCores: + if (dest.jobMinCores != CueUtil.FEATURE_DISABLED) { + query.append("int_min_cores=?,"); + values.add(dest.jobMinCores); + } + break; + + case MaxCores: + if (dest.jobMaxCores != CueUtil.FEATURE_DISABLED) { + query.append("int_max_cores=?,"); + values.add(dest.jobMaxCores); + } + break; + + case MinGpus: + if (dest.jobMinGpus != CueUtil.FEATURE_DISABLED) { + query.append("int_min_gpus=?,"); + values.add(dest.jobMinGpus); + } + break; + + case MaxGpus: + if (dest.jobMaxGpus != CueUtil.FEATURE_DISABLED) { + query.append("int_max_gpus=?,"); + values.add(dest.jobMaxGpus); + } + break; + + case All: + if (dest.jobPriority != CueUtil.FEATURE_DISABLED) { + query.append("int_priority=?,"); + values.add(dest.jobPriority); + } + + if (dest.jobMinCores != CueUtil.FEATURE_DISABLED) { + query.append("int_min_cores=?,"); + values.add(dest.jobMinCores); + } + + if (dest.jobMaxCores != CueUtil.FEATURE_DISABLED) { + query.append("int_max_cores=?,"); + values.add(dest.jobMaxCores); + } + + if (dest.jobMinGpus != CueUtil.FEATURE_DISABLED) { + query.append("int_min_gpus=?,"); + values.add(dest.jobMinGpus); + } + + if (dest.jobMaxGpus != CueUtil.FEATURE_DISABLED) { + query.append("int_max_gpus=?,"); + values.add(dest.jobMaxGpus); + } + break; + } + } + + getJdbcTemplate().update("UPDATE job SET pk_folder=?, pk_dept=? WHERE pk_job=?", + dest.getGroupId(), dest.getDepartmentId(), job.getJobId()); + + getJdbcTemplate().update("UPDATE job_history SET pk_dept=? WHERE pk_job=?", + dest.getDepartmentId(), job.getJobId()); + + if (values.size() > 0) { + query.deleteCharAt(query.length() - 1); + query.append(" WHERE pk_job=?"); + values.add(job.getJobId()); + getJdbcTemplate().update(query.toString(), values.toArray()); + } + } + + private static final String HAS_PENDING_JOBS = + "SELECT " + "job.pk_job " + "FROM " + "job, " + "job_stat, " + "job_resource " + "WHERE " + + "job.pk_job = job_stat.pk_job " + "AND " + "job.pk_job = job_resource.pk_job " + "AND " + + "job.str_state = 'PENDING' " + "AND " + "job.b_paused = false " + "AND " + + "job.b_auto_book = true " + "AND " + "job_stat.int_waiting_count != 0 " + "AND " + + "job_resource.int_cores < job_resource.int_max_cores " + "AND " + + "job_resource.int_gpus < job_resource.int_max_gpus " + "AND " + "job.pk_facility = ? " + + "LIMIT 1"; + + @Override + public boolean cueHasPendingJobs(FacilityInterface f) { + return getJdbcTemplate().queryForList(HAS_PENDING_JOBS, f.getFacilityId()).size() > 0; + } + + @Override + public void enableAutoBooking(JobInterface job, boolean value) { + getJdbcTemplate().update("UPDATE job SET b_auto_book=? WHERE pk_job=?", value, job.getJobId()); + } + + @Override + public void enableAutoUnBooking(JobInterface job, boolean value) { + getJdbcTemplate().update("UPDATE job SET b_auto_unbook=? WHERE pk_job=?", value, + job.getJobId()); + } + + public static final String MAP_POST_JOB = + "INSERT INTO " + "job_post " + "(pk_job_post, pk_job, pk_post_job) " + "VALUES (?,?,?)"; + + @Override + public void mapPostJob(BuildableJob job) { + getJdbcTemplate().update(MAP_POST_JOB, SqlUtil.genKeyRandom(), job.detail.id, + job.getPostJob().detail.id); + } + + public static final String ACTIVATE_POST_JOB = "UPDATE " + "job " + "SET " + "str_state=? " + + "WHERE " + "pk_job IN (SELECT pk_post_job FROM job_post WHERE pk_job = ?)"; + + @Override + public void activatePostJob(JobInterface job) { + getJdbcTemplate().update(ACTIVATE_POST_JOB, JobState.PENDING.toString(), job.getJobId()); + getJdbcTemplate().update("DELETE FROM job_post WHERE pk_job=?", job.getJobId()); + } + + @Override + public void updateDepartment(GroupInterface group, DepartmentInterface dept) { + getJdbcTemplate().update("UPDATE job SET pk_dept=? WHERE pk_folder=?", dept.getDepartmentId(), + group.getGroupId()); + } + + @Override + public void updateDepartment(JobInterface job, DepartmentInterface dept) { + getJdbcTemplate().update("UPDATE job SET pk_dept=? WHERE pk_job=?", dept.getDepartmentId(), + job.getJobId()); + } + + public void updateUsage(JobInterface job, ResourceUsage usage, int exitStatus) { + + if (exitStatus == 0) { + + getJdbcTemplate().update( + "UPDATE " + "job_usage " + "SET " + "int_core_time_success = int_core_time_success + ?," + + "int_gpu_time_success = int_gpu_time_success + ?," + + "int_clock_time_success = int_clock_time_success + ?," + + "int_frame_success_count = int_frame_success_count + 1 " + "WHERE " + "pk_job = ? ", + usage.getCoreTimeSeconds(), usage.getGpuTimeSeconds(), usage.getClockTimeSeconds(), + job.getJobId()); + + getJdbcTemplate().update( + "UPDATE " + "job_usage " + "SET " + "int_clock_time_high = ? " + "WHERE " + "pk_job = ? " + + "AND " + "int_clock_time_high < ?", + usage.getClockTimeSeconds(), job.getJobId(), usage.getClockTimeSeconds()); + } else { + + getJdbcTemplate().update( + "UPDATE " + "job_usage " + "SET " + "int_core_time_fail = int_core_time_fail + ?," + + "int_clock_time_fail = int_clock_time_fail + ?," + + "int_frame_fail_count = int_frame_fail_count + 1 " + "WHERE " + "pk_job = ? ", + usage.getCoreTimeSeconds(), usage.getClockTimeSeconds(), job.getJobId()); + } + } + + public void updateEmail(JobInterface job, String email) { + getJdbcTemplate().update("UPDATE job SET str_email=? WHERE pk_job=?", email, job.getJobId()); + } + + public String getEmail(JobInterface job) { + String jobId = job.getJobId(); + return getJdbcTemplate().queryForObject("SELECT str_email FROM job WHERE pk_job = ?", + String.class, jobId); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LayerDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LayerDaoJdbc.java index a78475c46..eab61fcdf 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LayerDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LayerDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -52,854 +48,610 @@ import com.imageworks.spcue.util.CueUtil; import com.imageworks.spcue.util.SqlUtil; - import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; public class LayerDaoJdbc extends JdbcDaoSupport implements LayerDao { - private final long MEM_RESERVED_MIN; - private static final Logger logger = LogManager.getLogger(LayerDaoJdbc.class); - private static final String INSERT_OUTPUT_PATH = - "INSERT INTO " + - "layer_output " + - "( " + - "pk_layer_output,"+ - "pk_layer,"+ - "pk_job,"+ - "str_filespec " + - ") VALUES (?,?,?,?)"; - - @Autowired - public LayerDaoJdbc(Environment env) { - this.MEM_RESERVED_MIN = env.getRequiredProperty( - "dispatcher.memory.mem_reserved_min", - Long.class - ); - } - - @Override - public void insertLayerOutput(LayerInterface layer, String filespec) { - getJdbcTemplate().update( - INSERT_OUTPUT_PATH, UUID.randomUUID().toString(), - layer.getLayerId(), layer.getJobId(), - filespec); - } - - private static final String GET_OUTPUT = - "SELECT " + - "str_filespec " + - "FROM " + - "layer_output " + - "WHERE " + - "pk_layer = ? " + - "ORDER BY " + - "ser_order"; - - private static final RowMapper OUTPUT_MAPPER = - new RowMapper() { - public String mapRow(ResultSet rs, int rowNum) throws SQLException { - return rs.getString("str_filespec"); - } - }; - - @Override - public List getLayerOutputs(LayerInterface layer) { - return getJdbcTemplate().query(GET_OUTPUT, - OUTPUT_MAPPER, layer.getLayerId()); - } - - private static final String IS_LAYER_DISPATCHABLE = - "SELECT " + - "int_waiting_count " + - "FROM " + - "layer_stat " + - "WHERE " + - "pk_layer=?"; - - @Override - public boolean isLayerDispatchable(LayerInterface l ) { - return getJdbcTemplate().queryForObject(IS_LAYER_DISPATCHABLE, - Integer.class, l.getLayerId()) > 0; - } - - private static final String IS_LAYER_COMPLETE = - "SELECT " + - "SUM ( " + - "int_waiting_count + " + - "int_running_count + " + - "int_dead_count + " + - "int_depend_count "+ - ") " + - "FROM " + - "layer_stat " + - "WHERE " + - "pk_layer=?"; - - public boolean isLayerComplete(LayerInterface l) { - if (isLaunching(l)) { - return false; - } - return getJdbcTemplate().queryForObject(IS_LAYER_COMPLETE, - Integer.class, l.getLayerId()) == 0; - } - - private static final String IS_LAUNCHING = - "SELECT " + - "str_state " + - "FROM " + - "job " + - "WHERE " + - "pk_job=?"; - - @Override - public boolean isLaunching(LayerInterface l) { - return getJdbcTemplate().queryForObject( - IS_LAUNCHING, String.class, l.getJobId()).equals( - JobState.STARTUP.toString()); - } - - private static final String IS_THREADABLE = - "SELECT " + - "b_threadable " + - "FROM " + - "layer " + - "WHERE " + - "pk_layer = ?"; - - @Override - public boolean isThreadable(LayerInterface l) { - return getJdbcTemplate().queryForObject(IS_THREADABLE, Boolean.class, l.getLayerId()); - } - - /** - * Query for layers table. Where clauses are appended later - */ - public static final String GET_LAYER_DETAIL = - "SELECT " + - "layer.*, " + - "job.pk_show, "+ - "job.pk_facility " + - "FROM " + - "layer," + - "job," + - "show " + - "WHERE " + - "layer.pk_job = job.pk_job " + - "AND " + - "job.pk_show = show.pk_show "; - - private static final String GET_LAYER = - "SELECT " + - "layer.pk_layer,"+ - "layer.pk_job,"+ - "job.pk_show,"+ - "job.pk_facility, " + - "layer.str_name "+ - "FROM " + - "layer," + - "job," + - "show " + - "WHERE " + - "layer.pk_job = job.pk_job " + - "AND " + - "job.pk_show = show.pk_show "; - - /** - * Maps a ResultSet to a LayerDetail - */ - public static final RowMapper LAYER_DETAIL_MAPPER = new RowMapper() { - public LayerDetail mapRow(ResultSet rs, int rowNum) throws SQLException { - LayerDetail layer = new LayerDetail(); - layer.chunkSize = rs.getInt("int_chunk_size"); - layer.command = rs.getString("str_cmd"); - layer.dispatchOrder = rs.getInt("int_dispatch_order"); - layer.id = rs.getString("pk_layer"); - layer.jobId = rs.getString("pk_job"); - layer.showId = rs.getString("pk_show"); - layer.facilityId =rs.getString("pk_facility"); - layer.name = rs.getString("str_name"); - layer.range = rs.getString("str_range"); - layer.minimumCores = rs.getInt("int_cores_min"); - layer.minimumMemory = rs.getLong("int_mem_min"); - layer.minimumGpus = rs.getInt("int_gpus_min"); - layer.minimumGpuMemory = rs.getLong("int_gpu_mem_min"); - layer.type = LayerType.valueOf(rs.getString("str_type")); - layer.tags = Sets.newHashSet( - rs.getString("str_tags").replaceAll(" ", "").split("\\|")); - layer.services.addAll( - Lists.newArrayList(rs.getString("str_services").split(","))); - layer.timeout = rs.getInt("int_timeout"); - layer.timeout_llu = rs.getInt("int_timeout_llu"); - return layer; - } - }; + private final long MEM_RESERVED_MIN; + private static final Logger logger = LogManager.getLogger(LayerDaoJdbc.class); + private static final String INSERT_OUTPUT_PATH = "INSERT INTO " + "layer_output " + "( " + + "pk_layer_output," + "pk_layer," + "pk_job," + "str_filespec " + ") VALUES (?,?,?,?)"; + + @Autowired + public LayerDaoJdbc(Environment env) { + this.MEM_RESERVED_MIN = + env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); + } + + @Override + public void insertLayerOutput(LayerInterface layer, String filespec) { + getJdbcTemplate().update(INSERT_OUTPUT_PATH, UUID.randomUUID().toString(), layer.getLayerId(), + layer.getJobId(), filespec); + } + + private static final String GET_OUTPUT = "SELECT " + "str_filespec " + "FROM " + "layer_output " + + "WHERE " + "pk_layer = ? " + "ORDER BY " + "ser_order"; + + private static final RowMapper OUTPUT_MAPPER = new RowMapper() { + public String mapRow(ResultSet rs, int rowNum) throws SQLException { + return rs.getString("str_filespec"); + } + }; + + @Override + public List getLayerOutputs(LayerInterface layer) { + return getJdbcTemplate().query(GET_OUTPUT, OUTPUT_MAPPER, layer.getLayerId()); + } + + private static final String IS_LAYER_DISPATCHABLE = + "SELECT " + "int_waiting_count " + "FROM " + "layer_stat " + "WHERE " + "pk_layer=?"; + + @Override + public boolean isLayerDispatchable(LayerInterface l) { + return getJdbcTemplate().queryForObject(IS_LAYER_DISPATCHABLE, Integer.class, + l.getLayerId()) > 0; + } + + private static final String IS_LAYER_COMPLETE = + "SELECT " + "SUM ( " + "int_waiting_count + " + "int_running_count + " + "int_dead_count + " + + "int_depend_count " + ") " + "FROM " + "layer_stat " + "WHERE " + "pk_layer=?"; + + public boolean isLayerComplete(LayerInterface l) { + if (isLaunching(l)) { + return false; + } + return getJdbcTemplate().queryForObject(IS_LAYER_COMPLETE, Integer.class, l.getLayerId()) == 0; + } + + private static final String IS_LAUNCHING = + "SELECT " + "str_state " + "FROM " + "job " + "WHERE " + "pk_job=?"; + + @Override + public boolean isLaunching(LayerInterface l) { + return getJdbcTemplate().queryForObject(IS_LAUNCHING, String.class, l.getJobId()) + .equals(JobState.STARTUP.toString()); + } + + private static final String IS_THREADABLE = + "SELECT " + "b_threadable " + "FROM " + "layer " + "WHERE " + "pk_layer = ?"; + + @Override + public boolean isThreadable(LayerInterface l) { + return getJdbcTemplate().queryForObject(IS_THREADABLE, Boolean.class, l.getLayerId()); + } + + /** + * Query for layers table. Where clauses are appended later + */ + public static final String GET_LAYER_DETAIL = "SELECT " + "layer.*, " + "job.pk_show, " + + "job.pk_facility " + "FROM " + "layer," + "job," + "show " + "WHERE " + + "layer.pk_job = job.pk_job " + "AND " + "job.pk_show = show.pk_show "; + + private static final String GET_LAYER = "SELECT " + "layer.pk_layer," + "layer.pk_job," + + "job.pk_show," + "job.pk_facility, " + "layer.str_name " + "FROM " + "layer," + "job," + + "show " + "WHERE " + "layer.pk_job = job.pk_job " + "AND " + "job.pk_show = show.pk_show "; + + /** + * Maps a ResultSet to a LayerDetail + */ + public static final RowMapper LAYER_DETAIL_MAPPER = new RowMapper() { + public LayerDetail mapRow(ResultSet rs, int rowNum) throws SQLException { + LayerDetail layer = new LayerDetail(); + layer.chunkSize = rs.getInt("int_chunk_size"); + layer.command = rs.getString("str_cmd"); + layer.dispatchOrder = rs.getInt("int_dispatch_order"); + layer.id = rs.getString("pk_layer"); + layer.jobId = rs.getString("pk_job"); + layer.showId = rs.getString("pk_show"); + layer.facilityId = rs.getString("pk_facility"); + layer.name = rs.getString("str_name"); + layer.range = rs.getString("str_range"); + layer.minimumCores = rs.getInt("int_cores_min"); + layer.minimumMemory = rs.getLong("int_mem_min"); + layer.minimumGpus = rs.getInt("int_gpus_min"); + layer.minimumGpuMemory = rs.getLong("int_gpu_mem_min"); + layer.type = LayerType.valueOf(rs.getString("str_type")); + layer.tags = Sets.newHashSet(rs.getString("str_tags").replaceAll(" ", "").split("\\|")); + layer.services.addAll(Lists.newArrayList(rs.getString("str_services").split(","))); + layer.timeout = rs.getInt("int_timeout"); + layer.timeout_llu = rs.getInt("int_timeout_llu"); + return layer; + } + }; + + /** + * Maps a ResultSet to a LayerDetail + */ + private static final RowMapper LAYER_MAPPER = new RowMapper() { + public LayerEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + LayerEntity layer = new LayerEntity(); + layer.id = rs.getString("pk_layer"); + layer.jobId = rs.getString("pk_job"); + layer.showId = rs.getString("pk_show"); + layer.facilityId = rs.getString("pk_facility"); + layer.name = rs.getString("str_name"); + return layer; + } + }; + + @Override + public LayerDetail getLayerDetail(String id) { + LayerDetail layerDetail = getJdbcTemplate() + .queryForObject(GET_LAYER_DETAIL + " AND layer.pk_layer=?", LAYER_DETAIL_MAPPER, id); + layerDetail.limits.addAll(getLimitNames(layerDetail)); + return layerDetail; + } + + @Override + public LayerDetail getLayerDetail(LayerInterface layer) { + LayerDetail layerDetail = getJdbcTemplate().queryForObject( + GET_LAYER_DETAIL + " AND layer.pk_layer=?", LAYER_DETAIL_MAPPER, layer.getLayerId()); + layerDetail.limits.addAll(getLimitNames(layerDetail)); + return layerDetail; + } + + @Override + public LayerDetail findLayerDetail(JobInterface job, String name) { + LayerDetail layerDetail = getJdbcTemplate().queryForObject( + GET_LAYER_DETAIL + " AND layer.pk_job=? AND layer.str_name=?", LAYER_DETAIL_MAPPER, + job.getJobId(), name); + layerDetail.limits.addAll(getLimitNames(layerDetail)); + return layerDetail; + } + + @Override + public LayerInterface findLayer(JobInterface job, String name) { + try { + return getJdbcTemplate().queryForObject( + GET_LAYER + " AND layer.pk_job=? AND layer.str_name=?", LAYER_MAPPER, job.getJobId(), + name); + } catch (org.springframework.dao.EmptyResultDataAccessException e) { + throw new EmptyResultDataAccessException( + "The layer " + name + " was not found in " + job.getName() + e, 0); + } + } + + @Override + public List getLayerDetails(JobInterface job) { + List layers = getJdbcTemplate().query(GET_LAYER_DETAIL + " AND layer.pk_job=?", + LAYER_DETAIL_MAPPER, job.getJobId()); + layers.stream().forEach(layerDetail -> layerDetail.limits.addAll(getLimitNames(layerDetail))); + return layers; + } + + @Override + public List getLayers(JobInterface job) { + return getJdbcTemplate().query(GET_LAYER + " AND layer.pk_job=?", LAYER_MAPPER, job.getJobId()); + } + + @Override + public LayerInterface getLayer(String id) { + return getJdbcTemplate().queryForObject(GET_LAYER + " AND layer.pk_layer=?", LAYER_MAPPER, id); + } + + private static final String INSERT_LAYER = "INSERT INTO " + "layer " + "(" + "pk_layer, " + + "pk_job, " + "str_name, " + "str_cmd, " + "str_range, " + "int_chunk_size, " + + "int_dispatch_order, " + "str_tags, " + "str_type," + "int_cores_min, " + "int_cores_max, " + + "b_threadable, " + "int_mem_min, " + "int_gpus_min, " + "int_gpus_max, " + + "int_gpu_mem_min, " + "str_services, " + "int_timeout," + "int_timeout_llu " + ") " + + "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + + @Override + public void insertLayerDetail(LayerDetail l) { + l.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_LAYER, l.id, l.jobId, l.name, l.command, l.range, l.chunkSize, + l.dispatchOrder, StringUtils.join(l.tags, " | "), l.type.toString(), l.minimumCores, + l.maximumCores, l.isThreadable, l.minimumMemory, l.minimumGpus, l.maximumGpus, + l.minimumGpuMemory, StringUtils.join(l.services, ","), l.timeout, l.timeout_llu); + } + + @Override + public void updateLayerMinMemory(LayerInterface layer, long val) { + if (val < MEM_RESERVED_MIN) { + val = MEM_RESERVED_MIN; + } + getJdbcTemplate().update("UPDATE layer SET int_mem_min=? WHERE pk_layer=?", val, + layer.getLayerId()); + } + + @Override + public void updateLayerMinGpuMemory(LayerInterface layer, long kb) { + getJdbcTemplate().update("UPDATE layer SET int_gpu_mem_min=? WHERE pk_layer=?", kb, + layer.getLayerId()); + } + + private static final String BALANCE_MEM = "UPDATE " + "layer " + "SET " + "int_mem_min = ? " + + "WHERE " + "pk_layer = ? " + "AND " + "int_mem_min > ? " + "AND " + "b_optimize = true"; + + @Override + public boolean balanceLayerMinMemory(LayerInterface layer, long frameMaxRss) { /** - * Maps a ResultSet to a LayerDetail + * Lowers the memory value on the frame when the maxrss is lower than the memory requirement. */ - private static final RowMapper LAYER_MAPPER = new RowMapper() { - public LayerEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - LayerEntity layer = new LayerEntity(); - layer.id = rs.getString("pk_layer"); - layer.jobId = rs.getString("pk_job"); - layer.showId = rs.getString("pk_show"); - layer.facilityId = rs.getString("pk_facility"); - layer.name = rs.getString("str_name"); - return layer; - } - }; - - - @Override - public LayerDetail getLayerDetail(String id) { - LayerDetail layerDetail = getJdbcTemplate().queryForObject(GET_LAYER_DETAIL + - " AND layer.pk_layer=?", LAYER_DETAIL_MAPPER, id); - layerDetail.limits.addAll(getLimitNames(layerDetail)); - return layerDetail; - } - - @Override - public LayerDetail getLayerDetail(LayerInterface layer) { - LayerDetail layerDetail = getJdbcTemplate().queryForObject(GET_LAYER_DETAIL + - " AND layer.pk_layer=?", LAYER_DETAIL_MAPPER, layer.getLayerId()); - layerDetail.limits.addAll(getLimitNames(layerDetail)); - return layerDetail; - } - - @Override - public LayerDetail findLayerDetail(JobInterface job, String name) { - LayerDetail layerDetail = getJdbcTemplate().queryForObject( - GET_LAYER_DETAIL + " AND layer.pk_job=? AND layer.str_name=?", - LAYER_DETAIL_MAPPER, job.getJobId(), name); - layerDetail.limits.addAll(getLimitNames(layerDetail)); - return layerDetail; - } - - @Override - public LayerInterface findLayer(JobInterface job, String name) { - try { - return getJdbcTemplate().queryForObject( - GET_LAYER + " AND layer.pk_job=? AND layer.str_name=?", - LAYER_MAPPER, job.getJobId(), name); - } catch (org.springframework.dao.EmptyResultDataAccessException e) { - throw new EmptyResultDataAccessException("The layer " + - name + " was not found in " + job.getName() + e, 0); - } - } - - @Override - public List getLayerDetails(JobInterface job) { - List layers = getJdbcTemplate().query( - GET_LAYER_DETAIL + " AND layer.pk_job=?", - LAYER_DETAIL_MAPPER, job.getJobId()); - layers.stream() - .forEach(layerDetail -> layerDetail.limits.addAll(getLimitNames(layerDetail))); - return layers; - } - - @Override - public List getLayers(JobInterface job) { - return getJdbcTemplate().query( - GET_LAYER + " AND layer.pk_job=?", - LAYER_MAPPER, job.getJobId()); - } - - @Override - public LayerInterface getLayer(String id) { - return getJdbcTemplate().queryForObject( - GET_LAYER + " AND layer.pk_layer=?", - LAYER_MAPPER, id); - } - - private static final String INSERT_LAYER = - "INSERT INTO " + - "layer " + - "("+ - "pk_layer, " + - "pk_job, "+ - "str_name, " + - "str_cmd, " + - "str_range, " + - "int_chunk_size, " + - "int_dispatch_order, " + - "str_tags, " + - "str_type," + - "int_cores_min, "+ - "int_cores_max, "+ - "b_threadable, " + - "int_mem_min, " + - "int_gpus_min, "+ - "int_gpus_max, "+ - "int_gpu_mem_min, " + - "str_services, " + - "int_timeout," + - "int_timeout_llu " + - ") " + - "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; - - @Override - public void insertLayerDetail(LayerDetail l) { - l.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_LAYER, - l.id, l.jobId, l.name, l.command, - l.range, l.chunkSize, l.dispatchOrder, - StringUtils.join(l.tags," | "), l.type.toString(), - l.minimumCores, l.maximumCores, l.isThreadable, - l.minimumMemory, l.minimumGpus, l.maximumGpus, l.minimumGpuMemory, StringUtils.join(l.services,","), - l.timeout, l.timeout_llu); - } - - @Override - public void updateLayerMinMemory(LayerInterface layer, long val) { - if (val < MEM_RESERVED_MIN) { - val = MEM_RESERVED_MIN; - } - getJdbcTemplate().update("UPDATE layer SET int_mem_min=? WHERE pk_layer=?", - val, layer.getLayerId()); - } - - @Override - public void updateLayerMinGpuMemory(LayerInterface layer, long kb) { - getJdbcTemplate().update("UPDATE layer SET int_gpu_mem_min=? WHERE pk_layer=?", - kb, layer.getLayerId()); - } - - private static final String BALANCE_MEM = - "UPDATE " + - "layer " + - "SET " + - "int_mem_min = ? " + - "WHERE " + - "pk_layer = ? " + - "AND " + - "int_mem_min > ? " + - "AND " + - "b_optimize = true"; - - @Override - public boolean balanceLayerMinMemory(LayerInterface layer, long frameMaxRss) { - - /** - * Lowers the memory value on the frame when the maxrss is lower than - * the memory requirement. - */ - long maxrss = getJdbcTemplate().queryForObject( - "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", - Long.class, layer.getLayerId()); - - if (maxrss < frameMaxRss) { - maxrss = frameMaxRss; - } - if (maxrss < MEM_RESERVED_MIN) { - maxrss = MEM_RESERVED_MIN; - } else { - maxrss = maxrss + CueUtil.MB256; - } - - boolean result = getJdbcTemplate().update(BALANCE_MEM, - maxrss, layer.getLayerId(), maxrss) == 1; - if (result) { - logger.info(layer.getName() + " was balanced to " + maxrss); - } - return result; - } - - @Override - public void increaseLayerMinMemory(LayerInterface layer, long val) { - getJdbcTemplate().update("UPDATE layer SET int_mem_min=? WHERE pk_layer=? AND int_mem_min < ?", - val, layer.getLayerId(), val); - } - - @Override - public void increaseLayerMinGpuMemory(LayerInterface layer, long kb) { - getJdbcTemplate().update("UPDATE layer SET int_gpu_mem_min=? WHERE pk_layer=? AND int_gpu_mem_min < ?", - kb, layer.getLayerId(), kb); - } - - @Override - public void updateLayerMinCores(LayerInterface layer, int val) { - if (val < Dispatcher.CORE_POINTS_RESERVED_MIN) { - val = Dispatcher.CORE_POINTS_RESERVED_DEFAULT; - } - getJdbcTemplate().update("UPDATE layer SET int_cores_min=? WHERE pk_layer=?", - val, layer.getLayerId()); - } - - @Override - public void updateLayerMaxCores(LayerInterface layer, int val) { - getJdbcTemplate().update("UPDATE layer SET int_cores_max=? WHERE pk_layer=?", - val, layer.getLayerId()); - } - - @Override - public void updateLayerMinGpus(LayerInterface layer, int val) { - getJdbcTemplate().update("UPDATE layer SET int_gpus_min=? WHERE pk_layer=?", - val, layer.getLayerId()); - } - - @Override - public void updateLayerMaxGpus(LayerInterface layer, int val) { - getJdbcTemplate().update("UPDATE layer SET int_gpus_max=? WHERE pk_layer=?", - val, layer.getLayerId()); - } - - private static final String UPDATE_LAYER_MAX_RSS = - "UPDATE " + - "layer_mem " + - "SET " + - "int_max_rss = ? " + - "WHERE " + - "pk_layer = ?"; - - @Override - public void updateLayerMaxRSS(LayerInterface layer, long val, boolean force) { - StringBuilder sb = new StringBuilder(UPDATE_LAYER_MAX_RSS); - Object[] options; - if (!force) { - options = new Object[] { val, layer.getLayerId(), val}; - sb.append (" AND int_max_rss < ?"); - } - else { - options = new Object[] { val, layer.getLayerId() }; - } - getJdbcTemplate().update(sb.toString(), options); - } - - @Override - public void updateLayerTags(LayerInterface layer, Set tags) { - if (tags.size() == 0) { - throw new IllegalArgumentException( - "Layers must have at least one tag."); - } - StringBuilder sb = new StringBuilder(128); - for (String t: tags) { - if (t == null) { continue; } - if (t.length() < 1) { continue; } - sb.append(t + " | "); - } - sb.delete(sb.length()-3, sb.length()); - if (sb.length() == 0) { - throw new IllegalArgumentException( - "Invalid layer tags, cannot contain null tags or " + - "tags of zero length."); - } - getJdbcTemplate().update( - "UPDATE layer SET str_tags=? WHERE pk_layer=?", - sb.toString(), layer.getLayerId()); - } - - @Override - public void appendLayerTags(LayerInterface layer, String val) { - String appendTag = " | " + val; - String matchTag = "%" + val + "%"; - - getJdbcTemplate().update("UPDATE layer SET str_tags = str_tags || ? " + - "WHERE pk_layer=? AND str_tags NOT LIKE ?", - appendTag, layer.getLayerId(), matchTag); - } - - public FrameStateTotals getFrameStateTotals(LayerInterface layer) { - return getJdbcTemplate().queryForObject( - "SELECT * FROM layer_stat WHERE pk_layer=?", - new RowMapper() { - public FrameStateTotals mapRow(ResultSet rs, int rowNum) throws SQLException { - FrameStateTotals t = new FrameStateTotals(); - t.dead = rs.getInt("int_dead_count"); - t.depend = rs.getInt("int_depend_count"); - t.eaten = rs.getInt("int_eaten_count"); - t.running = rs.getInt("int_running_count"); - t.succeeded = rs.getInt("int_succeeded_count"); - t.waiting = rs.getInt("int_waiting_count"); - t.total = rs.getInt("int_total_count"); - return t; - } - },layer.getLayerId()); - } - - private static final String GET_EXECUTION_SUMMARY = - "SELECT " + - "layer_usage.int_core_time_success,"+ - "layer_usage.int_core_time_fail," + - "layer_usage.int_gpu_time_success,"+ - "layer_usage.int_gpu_time_fail," + - "layer_usage.int_clock_time_success," + - "layer_mem.int_max_rss " + - "FROM " + - "layer," + - "layer_usage, "+ - "layer_mem " + - "WHERE " + - "layer.pk_layer = layer_usage.pk_layer "+ - "AND " + - "layer.pk_layer = layer_mem.pk_layer " + - "AND " + - "layer.pk_layer = ?"; - - @Override - public ExecutionSummary getExecutionSummary(LayerInterface layer) { - return getJdbcTemplate().queryForObject( - GET_EXECUTION_SUMMARY, - new RowMapper() { - public ExecutionSummary mapRow(ResultSet rs, int rowNum) throws SQLException { - ExecutionSummary e = new ExecutionSummary(); - e.coreTimeSuccess = rs.getLong("int_core_time_success"); - e.coreTimeFail = rs.getLong("int_core_time_fail"); - e.coreTime = e.coreTimeSuccess + e.coreTimeFail; - e.gpuTimeSuccess = rs.getLong("int_gpu_time_success"); - e.gpuTimeFail = rs.getLong("int_gpu_time_fail"); - e.gpuTime = e.gpuTimeSuccess + e.gpuTimeFail; - e.highMemoryKb = rs.getLong("int_max_rss"); - return e; - } - }, layer.getLayerId()); - } - - private static final String INSERT_LAYER_ENV = - "INSERT INTO " + - "layer_env " + - "(" + - "pk_layer_env, pk_layer, pk_job, str_key, str_value " + - ") " + - "VALUES (?,?,?,?,?)"; - - @Override - public void insertLayerEnvironment(LayerInterface layer, Map env) { - for (Map.Entry e: env.entrySet()) { - String pk = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_LAYER_ENV, - pk, layer.getLayerId(), layer.getJobId(), e.getKey(), e.getValue()); - } - } - - @Override - public void insertLayerEnvironment(LayerInterface layer, String key, String value) { - String pk = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_LAYER_ENV, - pk, layer.getLayerId(), layer.getJobId(), key, value); - } - - @Override - public Map getLayerEnvironment(LayerInterface layer) { - Map result = new HashMap(); - List> _result = getJdbcTemplate().queryForList( - "SELECT str_key, str_value FROM layer_env WHERE pk_layer=?", layer.getLayerId()); - - for (Map o: _result) { - result.put((String) o.get("str_key"), (String) o.get("str_value")); - } - return result; - } - - private static final String FIND_PAST_MAX_RSS = - "SELECT "+ - "layer_mem.int_max_rss " + - "FROM " + - "layer, " + - "layer_mem, "+ - "layer_stat "+ - "WHERE " + - "layer.pk_layer = layer_stat.pk_layer " + - "AND " + - "layer.pk_layer = layer_mem.pk_layer " + - "AND " + - "layer.pk_job = ? " + - "AND " + - "layer.str_name = ? " + - "AND " + - "layer_stat.int_succeeded_count >= ceil(layer_stat.int_total_count * .5) "; - - @Override - public long findPastMaxRSS(JobInterface job, String name) { - try { - long maxRss = getJdbcTemplate().queryForObject(FIND_PAST_MAX_RSS, - Long.class, job.getJobId(), name); - if (maxRss >= MEM_RESERVED_MIN) { - return maxRss; - } - else { - return MEM_RESERVED_MIN; - } - } catch (EmptyResultDataAccessException e) { - // Actually want to return 0 here, which means - // there is no past history. - return 0; - } - } - - @Override - public void updateTags(JobInterface job, String tags, LayerType type) { - getJdbcTemplate().update( - "UPDATE layer SET str_tags=? WHERE pk_job=? AND str_type=?", - tags, job.getJobId(), type.toString()); - } - - @Override - public void updateMinMemory(JobInterface job, long mem, LayerType type) { - if (mem < MEM_RESERVED_MIN) { - mem = MEM_RESERVED_MIN; - } - getJdbcTemplate().update( - "UPDATE layer SET int_mem_min=? WHERE pk_job=? AND str_type=?", - mem, job.getJobId(), type.toString()); - } - - @Override - public void updateMinGpuMemory(JobInterface job, long kb, LayerType type) { - getJdbcTemplate().update( - "UPDATE layer SET int_gpu_mem_min=? WHERE pk_job=? AND str_type=?", - kb, job.getJobId(), type.toString()); - } - - @Override - public void updateMinCores(JobInterface job, int cores, LayerType type) { - getJdbcTemplate().update( - "UPDATE layer SET int_cores_min=? WHERE pk_job=? AND str_type=?", - cores, job.getJobId(), type.toString()); - } - - @Override - public void updateMaxCores(JobInterface job, int cores, LayerType type) { - getJdbcTemplate().update( - "UPDATE layer SET int_cores_max=? WHERE pk_job=? AND str_type=?", - cores, job.getJobId(), type.toString()); - } - - @Override - public void updateMinGpus(JobInterface job, int gpus, LayerType type) { - getJdbcTemplate().update( - "UPDATE layer SET int_gpus_min=? WHERE pk_job=? AND str_type=?", - gpus, job.getJobId(), type.toString()); - } - - @Override - public void updateThreadable(LayerInterface layer, boolean threadable) { - getJdbcTemplate().update( - "UPDATE layer SET b_threadable=? WHERE pk_layer=?", - threadable, layer.getLayerId()); - } - - @Override - public void updateTimeout(LayerInterface layer, int timeout){ - getJdbcTemplate().update( - "UPDATE layer SET int_timeout=? WHERE pk_layer=?", - timeout, layer.getLayerId()); - } - - @Override - public void updateTimeoutLLU(LayerInterface layer, int timeout_llu){ - getJdbcTemplate().update( - "UPDATE layer SET int_timeout_llu=? WHERE pk_layer=?", - timeout_llu, layer.getLayerId()); - } - - @Override - public void enableMemoryOptimizer(LayerInterface layer, boolean value) { - getJdbcTemplate().update( - "UPDATE layer SET b_optimize=? WHERE pk_layer=?", - value, layer.getLayerId()); - } - - private static final String IS_OPTIMIZABLE = - "SELECT " + - "COUNT(1) "+ - "FROM " + - "layer, " + - "layer_stat, " + - "layer_usage " + - "WHERE " + - "layer.pk_layer = layer_stat.pk_layer " + - "AND " + - "layer.pk_layer = layer_usage.pk_layer " + - "AND " + - "layer.pk_layer = ? " + - "AND " + - "layer.int_cores_min = 100 " + - "AND " + - "layer.int_gpus_min = 0 " + - "AND " + - "str_tags LIKE '%general%' " + - "AND " + - "str_tags NOT LIKE '%util%' " + - "AND " + - "layer_stat.int_succeeded_count >= ? " + - "AND " + - "(layer_usage.int_core_time_success / layer_stat.int_succeeded_count) <= ?"; - - @Override - public boolean isOptimizable(LayerInterface l, int succeeded, float avg) { - if (succeeded < 1) { - throw new IllegalArgumentException("Succeeded frames option " + - "must be greater than zero"); - } - return getJdbcTemplate().queryForObject(IS_OPTIMIZABLE, - Integer.class, l.getLayerId(), succeeded, avg) > 0; - } - - private static final String THREAD_STATS = - "SELECT " + - "avg(interval_to_seconds(ts_stopped - ts_started)) AS avg, " + - "int_cores, " + - "int_gpus " + - "FROM " + - "frame " + - "WHERE " + - "frame.pk_layer = ? " + - "AND " + - "frame.int_checkpoint_count = 0 " + - "AND " + - "int_cores > 0 " + - "AND " + - "int_gpus > 0 " + - "GROUP BY " + - "int_cores, " + - "int_gpus " + - "ORDER BY " + - "int_cores DESC "; - - @Override - public List getThreadStats(LayerInterface layer) { - - return getJdbcTemplate().query(THREAD_STATS, - new RowMapper() { - public ThreadStats mapRow(ResultSet rs, int rowNum) throws SQLException { - ThreadStats s = new ThreadStats(); - s.setThreads(rs.getInt("int_cores") / 100); - s.setAvgFrameTime(rs.getInt("avg")); - return s; - } + long maxrss = getJdbcTemplate().queryForObject( + "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", Long.class, layer.getLayerId()); + + if (maxrss < frameMaxRss) { + maxrss = frameMaxRss; + } + if (maxrss < MEM_RESERVED_MIN) { + maxrss = MEM_RESERVED_MIN; + } else { + maxrss = maxrss + CueUtil.MB256; + } + + boolean result = getJdbcTemplate().update(BALANCE_MEM, maxrss, layer.getLayerId(), maxrss) == 1; + if (result) { + logger.info(layer.getName() + " was balanced to " + maxrss); + } + return result; + } + + @Override + public void increaseLayerMinMemory(LayerInterface layer, long val) { + getJdbcTemplate().update("UPDATE layer SET int_mem_min=? WHERE pk_layer=? AND int_mem_min < ?", + val, layer.getLayerId(), val); + } + + @Override + public void increaseLayerMinGpuMemory(LayerInterface layer, long kb) { + getJdbcTemplate().update( + "UPDATE layer SET int_gpu_mem_min=? WHERE pk_layer=? AND int_gpu_mem_min < ?", kb, + layer.getLayerId(), kb); + } + + @Override + public void updateLayerMinCores(LayerInterface layer, int val) { + if (val < Dispatcher.CORE_POINTS_RESERVED_MIN) { + val = Dispatcher.CORE_POINTS_RESERVED_DEFAULT; + } + getJdbcTemplate().update("UPDATE layer SET int_cores_min=? WHERE pk_layer=?", val, + layer.getLayerId()); + } + + @Override + public void updateLayerMaxCores(LayerInterface layer, int val) { + getJdbcTemplate().update("UPDATE layer SET int_cores_max=? WHERE pk_layer=?", val, + layer.getLayerId()); + } + + @Override + public void updateLayerMinGpus(LayerInterface layer, int val) { + getJdbcTemplate().update("UPDATE layer SET int_gpus_min=? WHERE pk_layer=?", val, + layer.getLayerId()); + } + + @Override + public void updateLayerMaxGpus(LayerInterface layer, int val) { + getJdbcTemplate().update("UPDATE layer SET int_gpus_max=? WHERE pk_layer=?", val, + layer.getLayerId()); + } + + private static final String UPDATE_LAYER_MAX_RSS = + "UPDATE " + "layer_mem " + "SET " + "int_max_rss = ? " + "WHERE " + "pk_layer = ?"; + + @Override + public void updateLayerMaxRSS(LayerInterface layer, long val, boolean force) { + StringBuilder sb = new StringBuilder(UPDATE_LAYER_MAX_RSS); + Object[] options; + if (!force) { + options = new Object[] {val, layer.getLayerId(), val}; + sb.append(" AND int_max_rss < ?"); + } else { + options = new Object[] {val, layer.getLayerId()}; + } + getJdbcTemplate().update(sb.toString(), options); + } + + @Override + public void updateLayerTags(LayerInterface layer, Set tags) { + if (tags.size() == 0) { + throw new IllegalArgumentException("Layers must have at least one tag."); + } + StringBuilder sb = new StringBuilder(128); + for (String t : tags) { + if (t == null) { + continue; + } + if (t.length() < 1) { + continue; + } + sb.append(t + " | "); + } + sb.delete(sb.length() - 3, sb.length()); + if (sb.length() == 0) { + throw new IllegalArgumentException( + "Invalid layer tags, cannot contain null tags or " + "tags of zero length."); + } + getJdbcTemplate().update("UPDATE layer SET str_tags=? WHERE pk_layer=?", sb.toString(), + layer.getLayerId()); + } + + @Override + public void appendLayerTags(LayerInterface layer, String val) { + String appendTag = " | " + val; + String matchTag = "%" + val + "%"; + + getJdbcTemplate().update( + "UPDATE layer SET str_tags = str_tags || ? " + "WHERE pk_layer=? AND str_tags NOT LIKE ?", + appendTag, layer.getLayerId(), matchTag); + } + + public FrameStateTotals getFrameStateTotals(LayerInterface layer) { + return getJdbcTemplate().queryForObject("SELECT * FROM layer_stat WHERE pk_layer=?", + new RowMapper() { + public FrameStateTotals mapRow(ResultSet rs, int rowNum) throws SQLException { + FrameStateTotals t = new FrameStateTotals(); + t.dead = rs.getInt("int_dead_count"); + t.depend = rs.getInt("int_depend_count"); + t.eaten = rs.getInt("int_eaten_count"); + t.running = rs.getInt("int_running_count"); + t.succeeded = rs.getInt("int_succeeded_count"); + t.waiting = rs.getInt("int_waiting_count"); + t.total = rs.getInt("int_total_count"); + return t; + } }, layer.getLayerId()); - } - - @Override - public void updateUsage(LayerInterface layer, ResourceUsage usage, int exitStatus) { - - if (exitStatus == 0) { - - getJdbcTemplate().update( - "UPDATE " + - "layer_usage " + - "SET " + - "int_core_time_success = int_core_time_success + ?," + - "int_gpu_time_success = int_gpu_time_success + ?," + - "int_clock_time_success = int_clock_time_success + ?,"+ - "int_frame_success_count = int_frame_success_count + 1 " + - "WHERE " + - "pk_layer = ? ", - usage.getCoreTimeSeconds(), - usage.getGpuTimeSeconds(), - usage.getClockTimeSeconds(), - layer.getLayerId()); - - getJdbcTemplate().update( - "UPDATE " + - "layer_usage " + - "SET " + - "int_clock_time_high = ? " + - "WHERE " + - "pk_layer = ? " + - "AND " + - "int_clock_time_high < ?", - usage.getClockTimeSeconds(), - layer.getLayerId(), - usage.getClockTimeSeconds()); - - getJdbcTemplate().update( - "UPDATE " + - "layer_usage " + - "SET " + - "int_clock_time_low = ? " + - "WHERE " + - "pk_layer = ? " + - "AND " + - "(? < int_clock_time_low OR int_clock_time_low = 0)", - usage.getClockTimeSeconds(), - layer.getLayerId(), - usage.getClockTimeSeconds()); - } - else { - getJdbcTemplate().update( - "UPDATE " + - "layer_usage " + - "SET " + - "int_core_time_fail = int_core_time_fail + ?," + - "int_clock_time_fail = int_clock_time_fail + ?,"+ - "int_frame_fail_count = int_frame_fail_count + 1 " + - "WHERE " + - "pk_layer = ? ", - usage.getCoreTimeSeconds(), - usage.getClockTimeSeconds(), - layer.getLayerId()); - } - } - - private static final String INSERT_LIMIT = - "INSERT INTO " + - "layer_limit (pk_layer_limit,pk_layer,pk_limit_record)" + - "VALUES (?,?,?)"; - - private static final String GET_LIMITS = - "SELECT " + - "limit_record.pk_limit_record, " + - "limit_record.str_name, " + - "limit_record.int_max_value " + - "FROM " + - "layer_limit," + - "limit_record " + - "WHERE " + - "layer_limit.pk_layer = ? " + - "AND limit_record.pk_limit_record = layer_limit.pk_limit_record"; - - private static final String GET_LIMIT_NAMES = - "SELECT " + - "limit_record.str_name " + - "FROM " + - "layer_limit, " + - "limit_record " + - "WHERE " + - "layer_limit.pk_layer = ? " + - "AND limit_record.pk_limit_record = layer_limit.pk_limit_record"; - - private static final RowMapper LIMIT_MAPPER = - new RowMapper() { - public LimitEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - LimitEntity limit = new LimitEntity(); - limit.id = rs.getString("pk_limit_record"); - limit.name = rs.getString("str_name"); - limit.maxValue = rs.getInt("int_max_value"); - return limit; - } - }; - - private static final RowMapper LIMIT_NAME_MAPPER = - new RowMapper() { - public String mapRow(ResultSet rs, int rowNum) throws SQLException { - return rs.getString("str_name"); - } - }; - - @Override - public void addLimit(LayerInterface layer, String limitId) { - getJdbcTemplate().update(INSERT_LIMIT, UUID.randomUUID().toString(), layer.getLayerId(), - limitId); - } - - @Override - public void dropLimit(LayerInterface layer, String limitId) { - getJdbcTemplate().update( - "DELETE FROM layer_limit WHERE pk_limit_record = ? AND pk_layer = ?", - limitId, - layer.getLayerId()); - } - - @Override - public List getLimits(LayerInterface layer) { - return getJdbcTemplate().query(GET_LIMITS, - LIMIT_MAPPER, layer.getLayerId()); - } - - @Override - public List getLimitNames(LayerInterface layer) { - return getJdbcTemplate().query(GET_LIMIT_NAMES, - LIMIT_NAME_MAPPER, layer.getLayerId()); - } + } + + private static final String GET_EXECUTION_SUMMARY = + "SELECT " + "layer_usage.int_core_time_success," + "layer_usage.int_core_time_fail," + + "layer_usage.int_gpu_time_success," + "layer_usage.int_gpu_time_fail," + + "layer_usage.int_clock_time_success," + "layer_mem.int_max_rss " + "FROM " + "layer," + + "layer_usage, " + "layer_mem " + "WHERE " + "layer.pk_layer = layer_usage.pk_layer " + + "AND " + "layer.pk_layer = layer_mem.pk_layer " + "AND " + "layer.pk_layer = ?"; + + @Override + public ExecutionSummary getExecutionSummary(LayerInterface layer) { + return getJdbcTemplate().queryForObject(GET_EXECUTION_SUMMARY, + new RowMapper() { + public ExecutionSummary mapRow(ResultSet rs, int rowNum) throws SQLException { + ExecutionSummary e = new ExecutionSummary(); + e.coreTimeSuccess = rs.getLong("int_core_time_success"); + e.coreTimeFail = rs.getLong("int_core_time_fail"); + e.coreTime = e.coreTimeSuccess + e.coreTimeFail; + e.gpuTimeSuccess = rs.getLong("int_gpu_time_success"); + e.gpuTimeFail = rs.getLong("int_gpu_time_fail"); + e.gpuTime = e.gpuTimeSuccess + e.gpuTimeFail; + e.highMemoryKb = rs.getLong("int_max_rss"); + return e; + } + }, layer.getLayerId()); + } + + private static final String INSERT_LAYER_ENV = "INSERT INTO " + "layer_env " + "(" + + "pk_layer_env, pk_layer, pk_job, str_key, str_value " + ") " + "VALUES (?,?,?,?,?)"; + + @Override + public void insertLayerEnvironment(LayerInterface layer, Map env) { + for (Map.Entry e : env.entrySet()) { + String pk = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_LAYER_ENV, pk, layer.getLayerId(), layer.getJobId(), + e.getKey(), e.getValue()); + } + } + + @Override + public void insertLayerEnvironment(LayerInterface layer, String key, String value) { + String pk = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_LAYER_ENV, pk, layer.getLayerId(), layer.getJobId(), key, + value); + } + + @Override + public Map getLayerEnvironment(LayerInterface layer) { + Map result = new HashMap(); + List> _result = getJdbcTemplate().queryForList( + "SELECT str_key, str_value FROM layer_env WHERE pk_layer=?", layer.getLayerId()); + + for (Map o : _result) { + result.put((String) o.get("str_key"), (String) o.get("str_value")); + } + return result; + } + + private static final String FIND_PAST_MAX_RSS = "SELECT " + "layer_mem.int_max_rss " + "FROM " + + "layer, " + "layer_mem, " + "layer_stat " + "WHERE " + + "layer.pk_layer = layer_stat.pk_layer " + "AND " + "layer.pk_layer = layer_mem.pk_layer " + + "AND " + "layer.pk_job = ? " + "AND " + "layer.str_name = ? " + "AND " + + "layer_stat.int_succeeded_count >= ceil(layer_stat.int_total_count * .5) "; + + @Override + public long findPastMaxRSS(JobInterface job, String name) { + try { + long maxRss = + getJdbcTemplate().queryForObject(FIND_PAST_MAX_RSS, Long.class, job.getJobId(), name); + if (maxRss >= MEM_RESERVED_MIN) { + return maxRss; + } else { + return MEM_RESERVED_MIN; + } + } catch (EmptyResultDataAccessException e) { + // Actually want to return 0 here, which means + // there is no past history. + return 0; + } + } + + @Override + public void updateTags(JobInterface job, String tags, LayerType type) { + getJdbcTemplate().update("UPDATE layer SET str_tags=? WHERE pk_job=? AND str_type=?", tags, + job.getJobId(), type.toString()); + } + + @Override + public void updateMinMemory(JobInterface job, long mem, LayerType type) { + if (mem < MEM_RESERVED_MIN) { + mem = MEM_RESERVED_MIN; + } + getJdbcTemplate().update("UPDATE layer SET int_mem_min=? WHERE pk_job=? AND str_type=?", mem, + job.getJobId(), type.toString()); + } + + @Override + public void updateMinGpuMemory(JobInterface job, long kb, LayerType type) { + getJdbcTemplate().update("UPDATE layer SET int_gpu_mem_min=? WHERE pk_job=? AND str_type=?", kb, + job.getJobId(), type.toString()); + } + + @Override + public void updateMinCores(JobInterface job, int cores, LayerType type) { + getJdbcTemplate().update("UPDATE layer SET int_cores_min=? WHERE pk_job=? AND str_type=?", + cores, job.getJobId(), type.toString()); + } + + @Override + public void updateMaxCores(JobInterface job, int cores, LayerType type) { + getJdbcTemplate().update("UPDATE layer SET int_cores_max=? WHERE pk_job=? AND str_type=?", + cores, job.getJobId(), type.toString()); + } + + @Override + public void updateMinGpus(JobInterface job, int gpus, LayerType type) { + getJdbcTemplate().update("UPDATE layer SET int_gpus_min=? WHERE pk_job=? AND str_type=?", gpus, + job.getJobId(), type.toString()); + } + + @Override + public void updateThreadable(LayerInterface layer, boolean threadable) { + getJdbcTemplate().update("UPDATE layer SET b_threadable=? WHERE pk_layer=?", threadable, + layer.getLayerId()); + } + + @Override + public void updateTimeout(LayerInterface layer, int timeout) { + getJdbcTemplate().update("UPDATE layer SET int_timeout=? WHERE pk_layer=?", timeout, + layer.getLayerId()); + } + + @Override + public void updateTimeoutLLU(LayerInterface layer, int timeout_llu) { + getJdbcTemplate().update("UPDATE layer SET int_timeout_llu=? WHERE pk_layer=?", timeout_llu, + layer.getLayerId()); + } + + @Override + public void enableMemoryOptimizer(LayerInterface layer, boolean value) { + getJdbcTemplate().update("UPDATE layer SET b_optimize=? WHERE pk_layer=?", value, + layer.getLayerId()); + } + + private static final String IS_OPTIMIZABLE = "SELECT " + "COUNT(1) " + "FROM " + "layer, " + + "layer_stat, " + "layer_usage " + "WHERE " + "layer.pk_layer = layer_stat.pk_layer " + + "AND " + "layer.pk_layer = layer_usage.pk_layer " + "AND " + "layer.pk_layer = ? " + "AND " + + "layer.int_cores_min = 100 " + "AND " + "layer.int_gpus_min = 0 " + "AND " + + "str_tags LIKE '%general%' " + "AND " + "str_tags NOT LIKE '%util%' " + "AND " + + "layer_stat.int_succeeded_count >= ? " + "AND " + + "(layer_usage.int_core_time_success / layer_stat.int_succeeded_count) <= ?"; + + @Override + public boolean isOptimizable(LayerInterface l, int succeeded, float avg) { + if (succeeded < 1) { + throw new IllegalArgumentException("Succeeded frames option " + "must be greater than zero"); + } + return getJdbcTemplate().queryForObject(IS_OPTIMIZABLE, Integer.class, l.getLayerId(), + succeeded, avg) > 0; + } + + private static final String THREAD_STATS = + "SELECT " + "avg(interval_to_seconds(ts_stopped - ts_started)) AS avg, " + "int_cores, " + + "int_gpus " + "FROM " + "frame " + "WHERE " + "frame.pk_layer = ? " + "AND " + + "frame.int_checkpoint_count = 0 " + "AND " + "int_cores > 0 " + "AND " + "int_gpus > 0 " + + "GROUP BY " + "int_cores, " + "int_gpus " + "ORDER BY " + "int_cores DESC "; + + @Override + public List getThreadStats(LayerInterface layer) { + + return getJdbcTemplate().query(THREAD_STATS, new RowMapper() { + public ThreadStats mapRow(ResultSet rs, int rowNum) throws SQLException { + ThreadStats s = new ThreadStats(); + s.setThreads(rs.getInt("int_cores") / 100); + s.setAvgFrameTime(rs.getInt("avg")); + return s; + } + }, layer.getLayerId()); + } + + @Override + public void updateUsage(LayerInterface layer, ResourceUsage usage, int exitStatus) { + + if (exitStatus == 0) { + + getJdbcTemplate().update( + "UPDATE " + "layer_usage " + "SET " + "int_core_time_success = int_core_time_success + ?," + + "int_gpu_time_success = int_gpu_time_success + ?," + + "int_clock_time_success = int_clock_time_success + ?," + + "int_frame_success_count = int_frame_success_count + 1 " + "WHERE " + + "pk_layer = ? ", + usage.getCoreTimeSeconds(), usage.getGpuTimeSeconds(), usage.getClockTimeSeconds(), + layer.getLayerId()); + + getJdbcTemplate().update( + "UPDATE " + "layer_usage " + "SET " + "int_clock_time_high = ? " + "WHERE " + + "pk_layer = ? " + "AND " + "int_clock_time_high < ?", + usage.getClockTimeSeconds(), layer.getLayerId(), usage.getClockTimeSeconds()); + + getJdbcTemplate().update( + "UPDATE " + "layer_usage " + "SET " + "int_clock_time_low = ? " + "WHERE " + + "pk_layer = ? " + "AND " + "(? < int_clock_time_low OR int_clock_time_low = 0)", + usage.getClockTimeSeconds(), layer.getLayerId(), usage.getClockTimeSeconds()); + } else { + getJdbcTemplate().update( + "UPDATE " + "layer_usage " + "SET " + "int_core_time_fail = int_core_time_fail + ?," + + "int_clock_time_fail = int_clock_time_fail + ?," + + "int_frame_fail_count = int_frame_fail_count + 1 " + "WHERE " + "pk_layer = ? ", + usage.getCoreTimeSeconds(), usage.getClockTimeSeconds(), layer.getLayerId()); + } + } + + private static final String INSERT_LIMIT = + "INSERT INTO " + "layer_limit (pk_layer_limit,pk_layer,pk_limit_record)" + "VALUES (?,?,?)"; + + private static final String GET_LIMITS = "SELECT " + "limit_record.pk_limit_record, " + + "limit_record.str_name, " + "limit_record.int_max_value " + "FROM " + "layer_limit," + + "limit_record " + "WHERE " + "layer_limit.pk_layer = ? " + + "AND limit_record.pk_limit_record = layer_limit.pk_limit_record"; + + private static final String GET_LIMIT_NAMES = "SELECT " + "limit_record.str_name " + "FROM " + + "layer_limit, " + "limit_record " + "WHERE " + "layer_limit.pk_layer = ? " + + "AND limit_record.pk_limit_record = layer_limit.pk_limit_record"; + + private static final RowMapper LIMIT_MAPPER = new RowMapper() { + public LimitEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + LimitEntity limit = new LimitEntity(); + limit.id = rs.getString("pk_limit_record"); + limit.name = rs.getString("str_name"); + limit.maxValue = rs.getInt("int_max_value"); + return limit; + } + }; + + private static final RowMapper LIMIT_NAME_MAPPER = new RowMapper() { + public String mapRow(ResultSet rs, int rowNum) throws SQLException { + return rs.getString("str_name"); + } + }; + + @Override + public void addLimit(LayerInterface layer, String limitId) { + getJdbcTemplate().update(INSERT_LIMIT, UUID.randomUUID().toString(), layer.getLayerId(), + limitId); + } + + @Override + public void dropLimit(LayerInterface layer, String limitId) { + getJdbcTemplate().update("DELETE FROM layer_limit WHERE pk_limit_record = ? AND pk_layer = ?", + limitId, layer.getLayerId()); + } + + @Override + public List getLimits(LayerInterface layer) { + return getJdbcTemplate().query(GET_LIMITS, LIMIT_MAPPER, layer.getLayerId()); + } + + @Override + public List getLimitNames(LayerInterface layer) { + return getJdbcTemplate().query(GET_LIMIT_NAMES, LIMIT_NAME_MAPPER, layer.getLayerId()); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LimitDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LimitDaoJdbc.java index f49b99911..6e623c7fa 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LimitDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LimitDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -32,100 +28,65 @@ public class LimitDaoJdbc extends JdbcDaoSupport implements LimitDao { - public static final RowMapper LIMIT_MAPPER = new RowMapper() { - public LimitEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - LimitEntity limit = new LimitEntity(); - limit.id = rs.getString("pk_limit_record"); - limit.name = rs.getString("str_name"); - limit.maxValue = rs.getInt("int_max_value"); - limit.currentRunning = rs.getInt("int_current_running"); - return limit; - } - }; - - @Override - public String createLimit(String name, int maxValue) { - String limitId = SqlUtil.genKeyRandom(); - getJdbcTemplate().update( - "INSERT INTO " + - "limit_record " + - "(pk_limit_record,str_name, int_max_value) " + - "VALUES " + - "(?,?,?)", - limitId, name, maxValue); - return limitId; - } - - @Override - public void deleteLimit(LimitInterface limit) { - getJdbcTemplate().update( - "DELETE FROM " + - "limit_record " + - "WHERE " + - "pk_limit_record=?", - limit.getId()); - } - - @Override - public LimitEntity findLimit(String name){ - String findLimitQuery = GET_LIMIT_BASE + - "WHERE " + - "limit_record.str_name=? " + - "GROUP BY " + - "limit_record.str_name, " + - "limit_record.pk_limit_record, " + - "limit_record.int_max_value"; - return getJdbcTemplate().queryForObject(findLimitQuery, LIMIT_MAPPER, name); + public static final RowMapper LIMIT_MAPPER = new RowMapper() { + public LimitEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + LimitEntity limit = new LimitEntity(); + limit.id = rs.getString("pk_limit_record"); + limit.name = rs.getString("str_name"); + limit.maxValue = rs.getInt("int_max_value"); + limit.currentRunning = rs.getInt("int_current_running"); + return limit; } - - @Override - public LimitEntity getLimit(String id) { - String getLimitQuery = GET_LIMIT_BASE + - "WHERE " + - "limit_record.pk_limit_record=? " + - "GROUP BY " + - "limit_record.str_name, " + - "limit_record.pk_limit_record, " + - "limit_record.int_max_value"; - return getJdbcTemplate().queryForObject(getLimitQuery, LIMIT_MAPPER, id); - } - - @Override - public void setLimitName(LimitInterface limit, String name) { - getJdbcTemplate().update( - "UPDATE " + - "limit_record " + - "SET " + - "str_name = ? " + - "WHERE " + - "pk_limit_record = ?", - name, limit.getId()); - } - - public void setMaxValue(LimitInterface limit, int maxValue) { - getJdbcTemplate().update( - "UPDATE " + - "limit_record " + - "SET " + - "int_max_value = ? " + - "WHERE " + - "pk_limit_record = ?", - maxValue, limit.getId()); - } - - private static final String GET_LIMIT_BASE = - "SELECT " + - "limit_record.pk_limit_record, " + - "limit_record.str_name, " + - "limit_record.int_max_value," + - "SUM(layer_stat.int_running_count) AS int_current_running " + - "FROM " + - "limit_record " + - "LEFT JOIN " + - "layer_limit ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN " + - "layer ON layer.pk_layer = layer_limit.pk_layer " + - "LEFT JOIN " + - "layer_stat ON layer_stat.pk_layer = layer.pk_layer "; + }; + + @Override + public String createLimit(String name, int maxValue) { + String limitId = SqlUtil.genKeyRandom(); + getJdbcTemplate().update("INSERT INTO " + "limit_record " + + "(pk_limit_record,str_name, int_max_value) " + "VALUES " + "(?,?,?)", limitId, name, + maxValue); + return limitId; + } + + @Override + public void deleteLimit(LimitInterface limit) { + getJdbcTemplate().update("DELETE FROM " + "limit_record " + "WHERE " + "pk_limit_record=?", + limit.getId()); + } + + @Override + public LimitEntity findLimit(String name) { + String findLimitQuery = GET_LIMIT_BASE + "WHERE " + "limit_record.str_name=? " + "GROUP BY " + + "limit_record.str_name, " + "limit_record.pk_limit_record, " + + "limit_record.int_max_value"; + return getJdbcTemplate().queryForObject(findLimitQuery, LIMIT_MAPPER, name); + } + + @Override + public LimitEntity getLimit(String id) { + String getLimitQuery = GET_LIMIT_BASE + "WHERE " + "limit_record.pk_limit_record=? " + + "GROUP BY " + "limit_record.str_name, " + "limit_record.pk_limit_record, " + + "limit_record.int_max_value"; + return getJdbcTemplate().queryForObject(getLimitQuery, LIMIT_MAPPER, id); + } + + @Override + public void setLimitName(LimitInterface limit, String name) { + getJdbcTemplate().update( + "UPDATE " + "limit_record " + "SET " + "str_name = ? " + "WHERE " + "pk_limit_record = ?", + name, limit.getId()); + } + + public void setMaxValue(LimitInterface limit, int maxValue) { + getJdbcTemplate().update("UPDATE " + "limit_record " + "SET " + "int_max_value = ? " + "WHERE " + + "pk_limit_record = ?", maxValue, limit.getId()); + } + + private static final String GET_LIMIT_BASE = "SELECT " + "limit_record.pk_limit_record, " + + "limit_record.str_name, " + "limit_record.int_max_value," + + "SUM(layer_stat.int_running_count) AS int_current_running " + "FROM " + "limit_record " + + "LEFT JOIN " + "layer_limit ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + + "LEFT JOIN " + "layer ON layer.pk_layer = layer_limit.pk_layer " + "LEFT JOIN " + + "layer_stat ON layer_stat.pk_layer = layer.pk_layer "; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/MaintenanceDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/MaintenanceDaoJdbc.java index 68e3a3566..84d7943d6 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/MaintenanceDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/MaintenanceDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import org.springframework.jdbc.core.support.JdbcDaoSupport; @@ -27,63 +23,35 @@ public class MaintenanceDaoJdbc extends JdbcDaoSupport implements MaintenanceDao { - private static final String HOST_DOWN_INTERVAL = "interval '300' second"; + private static final String HOST_DOWN_INTERVAL = "interval '300' second"; - private static final String UPDATE_HOSTS_DOWN = - "UPDATE " + - "host_stat " + - "SET " + - "str_state = ? " + - "WHERE " + - "str_state = 'UP' " + - "AND " + - "current_timestamp - ts_ping > " + HOST_DOWN_INTERVAL; + private static final String UPDATE_HOSTS_DOWN = + "UPDATE " + "host_stat " + "SET " + "str_state = ? " + "WHERE " + "str_state = 'UP' " + "AND " + + "current_timestamp - ts_ping > " + HOST_DOWN_INTERVAL; - public int setUpHostsToDown() { - return getJdbcTemplate().update(UPDATE_HOSTS_DOWN, - HardwareState.DOWN.toString()); - } + public int setUpHostsToDown() { + return getJdbcTemplate().update(UPDATE_HOSTS_DOWN, HardwareState.DOWN.toString()); + } - public static final String LOCK_TASK = - "UPDATE " + - "task_lock " + - "SET " + - "int_lock = ?, " + - "ts_lastrun = current_timestamp " + - "WHERE " + - "str_name = ? "+ - "AND " + - "(int_lock = ? OR ? - int_lock > int_timeout)"; + public static final String LOCK_TASK = + "UPDATE " + "task_lock " + "SET " + "int_lock = ?, " + "ts_lastrun = current_timestamp " + + "WHERE " + "str_name = ? " + "AND " + "(int_lock = ? OR ? - int_lock > int_timeout)"; - public boolean lockTask(MaintenanceTask task) { - long now = System.currentTimeMillis(); - return getJdbcTemplate().update(LOCK_TASK, - now, task.toString(), 0, now) == 1; - } + public boolean lockTask(MaintenanceTask task) { + long now = System.currentTimeMillis(); + return getJdbcTemplate().update(LOCK_TASK, now, task.toString(), 0, now) == 1; + } - public static final String LOCK_TASK_MIN = - "UPDATE " + - "task_lock " + - "SET " + - "int_lock = ?, " + - "ts_lastrun = current_timestamp " + - "WHERE " + - "str_name= ? "+ - "AND " + - "int_lock = ? " + - "AND " + - "interval_to_seconds(current_timestamp - ts_lastrun) > ? "; + public static final String LOCK_TASK_MIN = "UPDATE " + "task_lock " + "SET " + "int_lock = ?, " + + "ts_lastrun = current_timestamp " + "WHERE " + "str_name= ? " + "AND " + "int_lock = ? " + + "AND " + "interval_to_seconds(current_timestamp - ts_lastrun) > ? "; - public boolean lockTask(MaintenanceTask task, int minutes) { - long now = System.currentTimeMillis(); - return getJdbcTemplate().update(LOCK_TASK_MIN, - now, task.toString(), 0, minutes * 60) == 1; - } + public boolean lockTask(MaintenanceTask task, int minutes) { + long now = System.currentTimeMillis(); + return getJdbcTemplate().update(LOCK_TASK_MIN, now, task.toString(), 0, minutes * 60) == 1; + } - - public void unlockTask(MaintenanceTask task) { - getJdbcTemplate().update( - "UPDATE task_lock SET int_lock = 0 WHERE str_name=?", task.toString()); - } + public void unlockTask(MaintenanceTask task) { + getJdbcTemplate().update("UPDATE task_lock SET int_lock = 0 WHERE str_name=?", task.toString()); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/MatcherDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/MatcherDaoJdbc.java index 4afc2804c..0f5f95184 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/MatcherDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/MatcherDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -36,74 +32,56 @@ public class MatcherDaoJdbc extends JdbcDaoSupport implements MatcherDao { - private static final String INSERT_MATCHER = - "INSERT INTO " + - "matcher " + - "( " + - "pk_matcher,pk_filter,str_subject,str_match,str_value"+ - ") VALUES (?,?,?,?,?)"; - - public void insertMatcher(MatcherEntity matcher) { - matcher.id = SqlUtil.genKeyRandom(); - - getJdbcTemplate().update(INSERT_MATCHER, - matcher.id, matcher.getFilterId(), matcher.subject.toString(), - matcher.type.toString(), matcher.value); - } - - public void deleteMatcher(MatcherInterface matcher) { - getJdbcTemplate().update( - "DELETE FROM matcher WHERE pk_matcher=?", - matcher.getMatcherId()); - } - - private static final String GET_MATCHER = - "SELECT " + - "matcher.*, " + - "filter.pk_show "+ - "FROM " + - "matcher, " + - "filter " + - "WHERE " + - "matcher.pk_filter = filter.pk_filter"; - - public MatcherEntity getMatcher(String id) { - return getJdbcTemplate().queryForObject( - GET_MATCHER + " AND matcher.pk_matcher=?", - MATCHER_DETAIL_MAPPER, id); - } - - public MatcherEntity getMatcher(MatcherInterface matcher) { - return getJdbcTemplate().queryForObject( - GET_MATCHER + " AND matcher.pk_matcher=?", MATCHER_DETAIL_MAPPER, - matcher.getMatcherId()); - } - - public List getMatchers(FilterInterface filter) { - return getJdbcTemplate().query( - GET_MATCHER + " AND filter.pk_filter=? ORDER BY ts_created ASC", - MATCHER_DETAIL_MAPPER, filter.getFilterId()); - } - - - public void updateMatcher(MatcherEntity matcher) { - getJdbcTemplate().update( - "UPDATE matcher SET str_subject=?,str_match=?,str_value=? WHERE pk_matcher=?", - matcher.subject.toString(), matcher.type.toString(), matcher.value, matcher.getMatcherId()); - } - - public static final RowMapper MATCHER_DETAIL_MAPPER = new RowMapper() { + private static final String INSERT_MATCHER = "INSERT INTO " + "matcher " + "( " + + "pk_matcher,pk_filter,str_subject,str_match,str_value" + ") VALUES (?,?,?,?,?)"; + + public void insertMatcher(MatcherEntity matcher) { + matcher.id = SqlUtil.genKeyRandom(); + + getJdbcTemplate().update(INSERT_MATCHER, matcher.id, matcher.getFilterId(), + matcher.subject.toString(), matcher.type.toString(), matcher.value); + } + + public void deleteMatcher(MatcherInterface matcher) { + getJdbcTemplate().update("DELETE FROM matcher WHERE pk_matcher=?", matcher.getMatcherId()); + } + + private static final String GET_MATCHER = "SELECT " + "matcher.*, " + "filter.pk_show " + "FROM " + + "matcher, " + "filter " + "WHERE " + "matcher.pk_filter = filter.pk_filter"; + + public MatcherEntity getMatcher(String id) { + return getJdbcTemplate().queryForObject(GET_MATCHER + " AND matcher.pk_matcher=?", + MATCHER_DETAIL_MAPPER, id); + } + + public MatcherEntity getMatcher(MatcherInterface matcher) { + return getJdbcTemplate().queryForObject(GET_MATCHER + " AND matcher.pk_matcher=?", + MATCHER_DETAIL_MAPPER, matcher.getMatcherId()); + } + + public List getMatchers(FilterInterface filter) { + return getJdbcTemplate().query(GET_MATCHER + " AND filter.pk_filter=? ORDER BY ts_created ASC", + MATCHER_DETAIL_MAPPER, filter.getFilterId()); + } + + public void updateMatcher(MatcherEntity matcher) { + getJdbcTemplate().update( + "UPDATE matcher SET str_subject=?,str_match=?,str_value=? WHERE pk_matcher=?", + matcher.subject.toString(), matcher.type.toString(), matcher.value, matcher.getMatcherId()); + } + + public static final RowMapper MATCHER_DETAIL_MAPPER = + new RowMapper() { public MatcherEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - MatcherEntity matcher = new MatcherEntity(); - matcher.id = rs.getString("pk_matcher"); - matcher.showId = rs.getString("pk_show"); - matcher.filterId = rs.getString("pk_filter"); - matcher.name = null; - matcher.subject = MatchSubject.valueOf(rs.getString("str_subject")); - matcher.type = MatchType.valueOf(rs.getString("str_match")); - matcher.value = rs.getString("str_value"); - return matcher; + MatcherEntity matcher = new MatcherEntity(); + matcher.id = rs.getString("pk_matcher"); + matcher.showId = rs.getString("pk_show"); + matcher.filterId = rs.getString("pk_filter"); + matcher.name = null; + matcher.subject = MatchSubject.valueOf(rs.getString("str_subject")); + matcher.type = MatchType.valueOf(rs.getString("str_match")); + matcher.value = rs.getString("str_value"); + return matcher; } - }; + }; } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/NestedWhiteboardDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/NestedWhiteboardDaoJdbc.java index fdd147ca1..6d2a0853a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/NestedWhiteboardDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/NestedWhiteboardDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -47,513 +43,362 @@ public class NestedWhiteboardDaoJdbc extends JdbcDaoSupport implements NestedWhiteboardDao { - private class CachedJobWhiteboardMapper { - public final long time; - public NestedJobWhiteboardMapper mapper; + private class CachedJobWhiteboardMapper { + public final long time; + public NestedJobWhiteboardMapper mapper; - public CachedJobWhiteboardMapper(NestedJobWhiteboardMapper result) { - this.mapper = result; - this.time = System.currentTimeMillis(); - } + public CachedJobWhiteboardMapper(NestedJobWhiteboardMapper result) { + this.mapper = result; + this.time = System.currentTimeMillis(); + } + } + + private static final int CACHE_TIMEOUT = 5000; + private final ConcurrentHashMap jobCache = + new ConcurrentHashMap(20); + + public static final String GET_NESTED_GROUPS = "SELECT " + "show.pk_show, " + + "show.str_name AS str_show, " + "facility.str_name AS facility_name, " + + "dept.str_name AS dept_name, " + "folder.pk_folder, " + "folder.pk_parent_folder, " + + "folder.str_name AS group_name, " + "folder.int_job_priority as int_def_job_priority, " + + "folder.int_job_min_cores as int_def_job_min_cores, " + + "folder.int_job_max_cores as int_def_job_max_cores, " + + "folder.int_job_min_gpus as int_def_job_min_gpus, " + + "folder.int_job_max_gpus as int_def_job_max_gpus, " + + "folder_resource.int_min_cores AS folder_min_cores, " + + "folder_resource.int_max_cores AS folder_max_cores, " + + "folder_resource.int_min_gpus AS folder_min_gpus, " + + "folder_resource.int_max_gpus AS folder_max_gpus, " + "folder_level.int_level, " + + "job.pk_job, " + "job.str_name, " + "job.str_shot, " + "job.str_user, " + "job.str_state, " + + "job.str_log_dir, " + "job.int_uid, " + "job_resource.int_priority, " + "job.ts_started, " + + "job.ts_stopped, " + "job.ts_updated, " + "job.b_paused, " + "job.b_autoeat, " + + "job.b_comment, " + "COALESCE(str_os, '') AS str_os, " + "job.int_frame_count, " + + "job.int_layer_count, " + "job_stat.int_waiting_count, " + "job_stat.int_running_count, " + + "job_stat.int_dead_count, " + "job_stat.int_eaten_count," + "job_stat.int_depend_count, " + + "job_stat.int_succeeded_count, " + "job_usage.int_core_time_success, " + + "job_usage.int_core_time_fail, " + "job_usage.int_gpu_time_success, " + + "job_usage.int_gpu_time_fail, " + "job_usage.int_frame_success_count, " + + "job_usage.int_frame_fail_count, " + "job_usage.int_clock_time_high, " + + "job_usage.int_clock_time_success, " + + "(job_resource.int_cores + job_resource.int_local_cores) AS int_cores, " + + "(job_resource.int_gpus + job_resource.int_local_gpus) AS int_gpus, " + + "job_resource.int_min_cores, " + "job_resource.int_min_gpus, " + + "job_resource.int_max_cores, " + "job_resource.int_max_gpus, " + "job_mem.int_max_rss " + + "FROM " + "show, " + "dept, " + "folder_level, " + "folder_resource, " + "folder " + + "LEFT JOIN " + "job " + "ON " + + " (folder.pk_folder = job.pk_folder AND job.str_state='PENDING') " + "LEFT JOIN " + + "facility " + "ON " + "(job.pk_facility = facility.pk_facility) " + "LEFT JOIN " + + "job_stat " + "ON " + "(job.pk_job = job_stat.pk_job) " + "LEFT JOIN " + "job_resource " + + "ON " + "(job.pk_job = job_resource.pk_job) " + "LEFT JOIN " + "job_usage " + "ON " + + "(job.pk_job = job_usage.pk_job) " + "LEFT JOIN " + "job_mem " + "ON " + + "(job.pk_job = job_mem.pk_job) " + "WHERE " + "show.pk_show = folder.pk_show " + "AND " + + "folder.pk_folder = folder_level.pk_folder " + "AND " + + "folder.pk_folder = folder_resource.pk_folder " + "AND " + "folder.pk_dept = dept.pk_dept "; + + private class ChildrenEntry { + String key; + int level; + List children; + String name; + + public ChildrenEntry(String key, int level, String name) { + this.key = key; + this.level = level; + this.children = new ArrayList<>(); + this.name = name; } - private static final int CACHE_TIMEOUT = 5000; - private final ConcurrentHashMap jobCache = - new ConcurrentHashMap(20); - - public static final String GET_NESTED_GROUPS = - "SELECT " + - "show.pk_show, " + - "show.str_name AS str_show, " + - "facility.str_name AS facility_name, " + - "dept.str_name AS dept_name, " + - "folder.pk_folder, " + - "folder.pk_parent_folder, " + - "folder.str_name AS group_name, " + - "folder.int_job_priority as int_def_job_priority, " + - "folder.int_job_min_cores as int_def_job_min_cores, " + - "folder.int_job_max_cores as int_def_job_max_cores, " + - "folder.int_job_min_gpus as int_def_job_min_gpus, " + - "folder.int_job_max_gpus as int_def_job_max_gpus, " + - "folder_resource.int_min_cores AS folder_min_cores, " + - "folder_resource.int_max_cores AS folder_max_cores, " + - "folder_resource.int_min_gpus AS folder_min_gpus, " + - "folder_resource.int_max_gpus AS folder_max_gpus, " + - "folder_level.int_level, " + - "job.pk_job, " + - "job.str_name, " + - "job.str_shot, " + - "job.str_user, " + - "job.str_state, " + - "job.str_log_dir, " + - "job.int_uid, " + - "job_resource.int_priority, " + - "job.ts_started, " + - "job.ts_stopped, " + - "job.ts_updated, " + - "job.b_paused, " + - "job.b_autoeat, " + - "job.b_comment, " + - "COALESCE(str_os, '') AS str_os, " + - "job.int_frame_count, " + - "job.int_layer_count, " + - "job_stat.int_waiting_count, " + - "job_stat.int_running_count, " + - "job_stat.int_dead_count, " + - "job_stat.int_eaten_count," + - "job_stat.int_depend_count, " + - "job_stat.int_succeeded_count, " + - "job_usage.int_core_time_success, " + - "job_usage.int_core_time_fail, " + - "job_usage.int_gpu_time_success, " + - "job_usage.int_gpu_time_fail, " + - "job_usage.int_frame_success_count, " + - "job_usage.int_frame_fail_count, " + - "job_usage.int_clock_time_high, " + - "job_usage.int_clock_time_success, " + - "(job_resource.int_cores + job_resource.int_local_cores) AS int_cores, " + - "(job_resource.int_gpus + job_resource.int_local_gpus) AS int_gpus, " + - "job_resource.int_min_cores, " + - "job_resource.int_min_gpus, " + - "job_resource.int_max_cores, " + - "job_resource.int_max_gpus, " + - "job_mem.int_max_rss " + - "FROM " + - "show, " + - "dept, " + - "folder_level, " + - "folder_resource, " + - "folder " + - "LEFT JOIN " + - "job " + - "ON " + - " (folder.pk_folder = job.pk_folder AND job.str_state='PENDING') " + - "LEFT JOIN " + - "facility " + - "ON " + - "(job.pk_facility = facility.pk_facility) " + - "LEFT JOIN " + - "job_stat " + - "ON " + - "(job.pk_job = job_stat.pk_job) " + - "LEFT JOIN " + - "job_resource " + - "ON " + - "(job.pk_job = job_resource.pk_job) " + - "LEFT JOIN " + - "job_usage " + - "ON " + - "(job.pk_job = job_usage.pk_job) " + - "LEFT JOIN " + - "job_mem " + - "ON " + - "(job.pk_job = job_mem.pk_job) " + - "WHERE " + - "show.pk_show = folder.pk_show " + - "AND " + - "folder.pk_folder = folder_level.pk_folder " + - "AND " + - "folder.pk_folder = folder_resource.pk_folder " + - "AND " + - "folder.pk_dept = dept.pk_dept "; - - private class ChildrenEntry { - String key; - int level; - List children; - String name; - - public ChildrenEntry(String key, int level, String name) { - this.key = key; - this.level = level; - this.children = new ArrayList<>(); - this.name = name; - } - - public List getChildren() { - return children; - } - - public void addChild(String child) { - children.add(child); - } - - public String getKey() { - return key; - } - - public String getName() { - return name; - } - - public int compareTo(ChildrenEntry o) { - // Invert order - return Integer.compare(o.level, this.level); - } + public List getChildren() { + return children; + } - @Override - public String toString() { - StringBuilder out = new StringBuilder(); - String spacing = " ".repeat(Math.max(0, this.level + 1)); - out.append(spacing); - out.append(key + "(c " + name + ")"); - for (String id : children) { - out.append("\n " + spacing + id.substring(0, 4)); - } - return out.toString(); - } + public void addChild(String child) { + children.add(child); } - class NestedJobWhiteboardMapper implements RowMapper { - public Map groups = new HashMap(50); - public Map childrenMap = new HashMap(); - public String rootGroupID; - - @Override - public NestedGroup mapRow(ResultSet rs, int rowNum) throws SQLException { - String groupId = rs.getString("pk_folder"); - NestedGroup group; - if (!groups.containsKey(groupId)) { - group = NestedGroup.newBuilder() - .setId(rs.getString("pk_folder")) - .setName(rs.getString("group_name")) - .setDefaultJobPriority(rs.getInt("int_def_job_priority")) - .setDefaultJobMinCores(Convert.coreUnitsToCores(rs.getInt("int_def_job_min_cores"))) - .setDefaultJobMaxCores(Convert.coreUnitsToCores(rs.getInt("int_def_job_max_cores"))) - .setDefaultJobMinGpus(rs.getInt("int_def_job_min_gpus")) - .setDefaultJobMaxGpus(rs.getInt("int_def_job_max_gpus")) - .setMaxCores(Convert.coreUnitsToCores(rs.getInt("folder_max_cores"))) - .setMinCores(Convert.coreUnitsToCores(rs.getInt("folder_min_cores"))) - .setMaxGpus(rs.getInt("folder_max_gpus")) - .setMinGpus(rs.getInt("folder_min_gpus")) - .setLevel(rs.getInt("int_level")) - .setDepartment(rs.getString("dept_name")) - .build(); - - String parentGroupId = rs.getString("pk_parent_folder"); - if (parentGroupId != null) { - ChildrenEntry childrenEntry = childrenMap.get(parentGroupId); - if (childrenEntry == null) { - childrenEntry = new ChildrenEntry( - parentGroupId, group.getLevel() - 1, rs.getString("group_name")); - childrenEntry.addChild(groupId); - childrenMap.put(parentGroupId, childrenEntry); - } - else { - childrenEntry.addChild(groupId); - } - } - else { - rootGroupID = rs.getString("pk_folder"); - } - groups.put(groupId, group); - } - else { - group = groups.get(groupId); - } - if (rs.getString("pk_job") != null) { - GroupStats oldStats = group.getStats(); - JobStats jobStats = WhiteboardDaoJdbc.mapJobStats(rs); - GroupStats groupStats = GroupStats.newBuilder() - .setDeadFrames(oldStats.getDeadFrames() + jobStats.getDeadFrames()) - .setRunningFrames(oldStats.getRunningFrames() + jobStats.getRunningFrames()) - .setWaitingFrames(oldStats.getWaitingFrames() + jobStats.getWaitingFrames()) - .setDependFrames(oldStats.getDependFrames() + jobStats.getDependFrames()) - .setReservedCores(oldStats.getReservedCores() + jobStats.getReservedCores()) - .setPendingJobs(oldStats.getPendingJobs() + 1).build(); - - group = group.toBuilder() - .setStats(groupStats) - .addJobs(rs.getString("pk_job")) - .build(); - groups.put(groupId, group); - } - return group; - } + public String getKey() { + return key; } - private NestedJobWhiteboardMapper updateConnections(NestedJobWhiteboardMapper mapper) { - ArrayList orderedChildren = new ArrayList<>(mapper.childrenMap.values()); - orderedChildren.sort(ChildrenEntry::compareTo); - - for (ChildrenEntry entry : orderedChildren) { - NestedGroup group = mapper.groups.get(entry.getKey()); - NestedGroupSeq.Builder childrenBuilder = NestedGroupSeq.newBuilder(); - for (String childId : entry.getChildren()) { - NestedGroup child = mapper.groups.get(childId); - child = child.toBuilder().setParent(group).build(); - childrenBuilder.addNestedGroups(child); - mapper.groups.put(childId, child); - } - group = group.toBuilder() - .setGroups(childrenBuilder.build()) - .build(); - mapper.groups.put(entry.getKey(), group); - } - return mapper; + public String getName() { + return name; } - public NestedGroup getJobWhiteboard(ShowInterface show) { + public int compareTo(ChildrenEntry o) { + // Invert order + return Integer.compare(o.level, this.level); + } - CachedJobWhiteboardMapper cachedMapper = jobCache.get(show.getShowId()); - if (cachedMapper != null) { - if (System.currentTimeMillis() - cachedMapper.time < CACHE_TIMEOUT) { - return cachedMapper.mapper.groups.get(cachedMapper.mapper.rootGroupID); - } + @Override + public String toString() { + StringBuilder out = new StringBuilder(); + String spacing = " ".repeat(Math.max(0, this.level + 1)); + out.append(spacing); + out.append(key + "(c " + name + ")"); + for (String id : children) { + out.append("\n " + spacing + id.substring(0, 4)); + } + return out.toString(); + } + } + + class NestedJobWhiteboardMapper implements RowMapper { + public Map groups = new HashMap(50); + public Map childrenMap = new HashMap(); + public String rootGroupID; + + @Override + public NestedGroup mapRow(ResultSet rs, int rowNum) throws SQLException { + String groupId = rs.getString("pk_folder"); + NestedGroup group; + if (!groups.containsKey(groupId)) { + group = NestedGroup.newBuilder().setId(rs.getString("pk_folder")) + .setName(rs.getString("group_name")) + .setDefaultJobPriority(rs.getInt("int_def_job_priority")) + .setDefaultJobMinCores(Convert.coreUnitsToCores(rs.getInt("int_def_job_min_cores"))) + .setDefaultJobMaxCores(Convert.coreUnitsToCores(rs.getInt("int_def_job_max_cores"))) + .setDefaultJobMinGpus(rs.getInt("int_def_job_min_gpus")) + .setDefaultJobMaxGpus(rs.getInt("int_def_job_max_gpus")) + .setMaxCores(Convert.coreUnitsToCores(rs.getInt("folder_max_cores"))) + .setMinCores(Convert.coreUnitsToCores(rs.getInt("folder_min_cores"))) + .setMaxGpus(rs.getInt("folder_max_gpus")).setMinGpus(rs.getInt("folder_min_gpus")) + .setLevel(rs.getInt("int_level")).setDepartment(rs.getString("dept_name")).build(); + + String parentGroupId = rs.getString("pk_parent_folder"); + if (parentGroupId != null) { + ChildrenEntry childrenEntry = childrenMap.get(parentGroupId); + if (childrenEntry == null) { + childrenEntry = + new ChildrenEntry(parentGroupId, group.getLevel() - 1, rs.getString("group_name")); + childrenEntry.addChild(groupId); + childrenMap.put(parentGroupId, childrenEntry); + } else { + childrenEntry.addChild(groupId); + } + } else { + rootGroupID = rs.getString("pk_folder"); } + groups.put(groupId, group); + } else { + group = groups.get(groupId); + } + if (rs.getString("pk_job") != null) { + GroupStats oldStats = group.getStats(); + JobStats jobStats = WhiteboardDaoJdbc.mapJobStats(rs); + GroupStats groupStats = GroupStats.newBuilder() + .setDeadFrames(oldStats.getDeadFrames() + jobStats.getDeadFrames()) + .setRunningFrames(oldStats.getRunningFrames() + jobStats.getRunningFrames()) + .setWaitingFrames(oldStats.getWaitingFrames() + jobStats.getWaitingFrames()) + .setDependFrames(oldStats.getDependFrames() + jobStats.getDependFrames()) + .setReservedCores(oldStats.getReservedCores() + jobStats.getReservedCores()) + .setPendingJobs(oldStats.getPendingJobs() + 1).build(); + + group = group.toBuilder().setStats(groupStats).addJobs(rs.getString("pk_job")).build(); + groups.put(groupId, group); + } + return group; + } + } + + private NestedJobWhiteboardMapper updateConnections(NestedJobWhiteboardMapper mapper) { + ArrayList orderedChildren = new ArrayList<>(mapper.childrenMap.values()); + orderedChildren.sort(ChildrenEntry::compareTo); + + for (ChildrenEntry entry : orderedChildren) { + NestedGroup group = mapper.groups.get(entry.getKey()); + NestedGroupSeq.Builder childrenBuilder = NestedGroupSeq.newBuilder(); + for (String childId : entry.getChildren()) { + NestedGroup child = mapper.groups.get(childId); + child = child.toBuilder().setParent(group).build(); + childrenBuilder.addNestedGroups(child); + mapper.groups.put(childId, child); + } + group = group.toBuilder().setGroups(childrenBuilder.build()).build(); + mapper.groups.put(entry.getKey(), group); + } + return mapper; + } - NestedJobWhiteboardMapper mapper = new NestedJobWhiteboardMapper(); - getJdbcTemplate().query( - GET_NESTED_GROUPS + " AND show.pk_show=? ORDER BY folder_level.int_level ASC", - mapper, show.getShowId()); + public NestedGroup getJobWhiteboard(ShowInterface show) { - mapper = updateConnections(mapper); - jobCache.put(show.getShowId(), new CachedJobWhiteboardMapper(mapper)); - return mapper.groups.get(mapper.rootGroupID); + CachedJobWhiteboardMapper cachedMapper = jobCache.get(show.getShowId()); + if (cachedMapper != null) { + if (System.currentTimeMillis() - cachedMapper.time < CACHE_TIMEOUT) { + return cachedMapper.mapper.groups.get(cachedMapper.mapper.rootGroupID); + } } + NestedJobWhiteboardMapper mapper = new NestedJobWhiteboardMapper(); + getJdbcTemplate().query( + GET_NESTED_GROUPS + " AND show.pk_show=? ORDER BY folder_level.int_level ASC", mapper, + show.getShowId()); + + mapper = updateConnections(mapper); + jobCache.put(show.getShowId(), new CachedJobWhiteboardMapper(mapper)); + return mapper.groups.get(mapper.rootGroupID); + } + + private static final NestedJob mapResultSetToJob(ResultSet rs) throws SQLException { + + NestedJob.Builder jobBuilder = NestedJob.newBuilder().setId(rs.getString("pk_job")) + .setLogDir(rs.getString("str_log_dir")) + .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_max_cores"))) + .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) + .setMaxGpus(rs.getInt("int_max_cores")).setMinGpus(rs.getInt("int_min_cores")) + .setName(rs.getString("str_name")).setPriority(rs.getInt("int_priority")) + .setShot(rs.getString("str_shot")).setShow(rs.getString("str_show")) + .setOs(rs.getString("str_os")).setFacility(rs.getString("facility_name")) + .setGroup(rs.getString("group_name")).setState(JobState.valueOf(rs.getString("str_state"))) + .setUser(rs.getString("str_user")).setIsPaused(rs.getBoolean("b_paused")) + .setHasComment(rs.getBoolean("b_comment")).setAutoEat(rs.getBoolean("b_autoeat")) + .setStartTime((int) (rs.getTimestamp("ts_started").getTime() / 1000)) + .setStats(WhiteboardDaoJdbc.mapJobStats(rs)); + + int uid = rs.getInt("int_uid"); + if (!rs.wasNull()) { + jobBuilder.setUid(uid); + } - private static final NestedJob mapResultSetToJob(ResultSet rs) throws SQLException { - - NestedJob.Builder jobBuilder = NestedJob.newBuilder() - .setId(rs.getString("pk_job")) - .setLogDir(rs.getString("str_log_dir")) - .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_max_cores"))) - .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) - .setMaxGpus(rs.getInt("int_max_cores")) - .setMinGpus(rs.getInt("int_min_cores")) - .setName(rs.getString("str_name")) - .setPriority(rs.getInt("int_priority")) - .setShot(rs.getString("str_shot")) - .setShow(rs.getString("str_show")) - .setOs(rs.getString("str_os")) - .setFacility(rs.getString("facility_name")) - .setGroup(rs.getString("group_name")) - .setState(JobState.valueOf(rs.getString("str_state"))) - .setUser(rs.getString("str_user")) - .setIsPaused(rs.getBoolean("b_paused")) - .setHasComment(rs.getBoolean("b_comment")) - .setAutoEat(rs.getBoolean("b_autoeat")) - .setStartTime((int) (rs.getTimestamp("ts_started").getTime() / 1000)) - .setStats(WhiteboardDaoJdbc.mapJobStats(rs)); - - int uid = rs.getInt("int_uid"); - if (!rs.wasNull()) { - jobBuilder.setUid(uid); - } - - Timestamp ts = rs.getTimestamp("ts_stopped"); - if (ts != null) { - jobBuilder.setStopTime((int) (ts.getTime() / 1000)); - } - else { - jobBuilder.setStopTime(0); - } - return jobBuilder.build(); + Timestamp ts = rs.getTimestamp("ts_stopped"); + if (ts != null) { + jobBuilder.setStopTime((int) (ts.getTime() / 1000)); + } else { + jobBuilder.setStopTime(0); } + return jobBuilder.build(); + } + + private static final String GET_HOSTS = "SELECT " + "alloc.str_name AS alloc_name, " + + "host.pk_host, " + "host.str_name AS host_name, " + "host_stat.str_state AS host_state, " + + "host.b_nimby, " + "host_stat.ts_booted, " + "host_stat.ts_ping, " + "host.int_cores, " + + "host.int_cores_idle, " + "host.int_gpus, " + "host.int_gpus_idle, " + "host.int_gpu_mem, " + + "host.int_gpu_mem_idle, " + "host.int_mem, " + "host.int_mem_idle, " + + "host.str_lock_state, " + "host.str_tags, " + "host.b_comment, " + "host.int_thread_mode, " + + "host_stat.str_os, " + "host_stat.int_mem_total, " + "host_stat.int_mem_free, " + + "host_stat.int_swap_total, " + "host_stat.int_swap_free, " + "host_stat.int_mcp_total, " + + "host_stat.int_mcp_free, " + "host_stat.int_gpu_mem_total, " + + "host_stat.int_gpu_mem_free, " + "host_stat.int_load, " + "proc.pk_proc, " + + "proc.int_cores_reserved AS proc_cores, " + "proc.int_gpus_reserved AS proc_gpus, " + + "proc.int_mem_reserved AS proc_memory, " + "proc.int_mem_used AS used_memory, " + + "proc.int_mem_max_used AS max_memory, " + "proc.int_gpu_mem_reserved AS proc_gpu_memory, " + + "proc.ts_ping, " + "proc.ts_booked, " + "proc.ts_dispatched, " + "proc.b_unbooked, " + + "redirect.str_name AS str_redirect, " + "job.str_name AS job_name, " + "job.str_log_dir, " + + "show.str_name AS show_name, " + "frame.str_name AS frame_name " + "FROM " + "alloc, " + + "host_stat, " + "host " + "LEFT JOIN " + "proc " + "ON " + "(proc.pk_host = host.pk_host) " + + "LEFT JOIN " + "frame " + "ON " + "(proc.pk_frame = frame.pk_frame) " + "LEFT JOIN " + + "job " + "ON " + "(proc.pk_job = job.pk_job) " + "LEFT JOIN " + "show " + "ON " + + "(proc.pk_show = show.pk_show) " + "LEFT JOIN " + "redirect " + "ON " + + "(proc.pk_proc = redirect.pk_proc) " + "WHERE " + "host.pk_alloc = alloc.pk_alloc " + "AND " + + "host.pk_host = host_stat.pk_host "; + + /** + * Caches a the host whiteboard. This class is not thread safe so you have to synchronize calls to + * the "cache" method on your own. + */ + class CachedHostWhiteboard { - private static final String GET_HOSTS = - "SELECT " + - "alloc.str_name AS alloc_name, " + - "host.pk_host, " + - "host.str_name AS host_name, " + - "host_stat.str_state AS host_state, " + - "host.b_nimby, " + - "host_stat.ts_booted, " + - "host_stat.ts_ping, " + - "host.int_cores, " + - "host.int_cores_idle, " + - "host.int_gpus, " + - "host.int_gpus_idle, " + - "host.int_gpu_mem, " + - "host.int_gpu_mem_idle, " + - "host.int_mem, " + - "host.int_mem_idle, " + - "host.str_lock_state, " + - "host.str_tags, " + - "host.b_comment, " + - "host.int_thread_mode, " + - "host_stat.str_os, " + - "host_stat.int_mem_total, " + - "host_stat.int_mem_free, " + - "host_stat.int_swap_total, " + - "host_stat.int_swap_free, " + - "host_stat.int_mcp_total, " + - "host_stat.int_mcp_free, " + - "host_stat.int_gpu_mem_total, " + - "host_stat.int_gpu_mem_free, " + - "host_stat.int_load, " + - "proc.pk_proc, " + - "proc.int_cores_reserved AS proc_cores, " + - "proc.int_gpus_reserved AS proc_gpus, " + - "proc.int_mem_reserved AS proc_memory, " + - "proc.int_mem_used AS used_memory, " + - "proc.int_mem_max_used AS max_memory, " + - "proc.int_gpu_mem_reserved AS proc_gpu_memory, " + - "proc.ts_ping, " + - "proc.ts_booked, " + - "proc.ts_dispatched, " + - "proc.b_unbooked, " + - "redirect.str_name AS str_redirect, " + - "job.str_name AS job_name, " + - "job.str_log_dir, " + - "show.str_name AS show_name, " + - "frame.str_name AS frame_name " + - "FROM " + - "alloc, " + - "host_stat, " + - "host " + - "LEFT JOIN " + - "proc " + - "ON " + - "(proc.pk_host = host.pk_host) " + - "LEFT JOIN " + - "frame " + - "ON " + - "(proc.pk_frame = frame.pk_frame) " + - "LEFT JOIN " + - "job " + - "ON " + - "(proc.pk_job = job.pk_job) " + - "LEFT JOIN " + - "show " + - "ON " + - "(proc.pk_show = show.pk_show) " + - "LEFT JOIN " + - "redirect " + - "ON " + - "(proc.pk_proc = redirect.pk_proc) " + - "WHERE " + - "host.pk_alloc = alloc.pk_alloc " + - "AND " + - "host.pk_host = host_stat.pk_host "; + /** + * Number of seconds till the cache expires + */ + private static final int CACHE_EXPIRE_TIME_MS = 10000; /** - * Caches a the host whiteboard. This class is not - * thread safe so you have to synchronize calls to - * the "cache" method on your own. + * The host whiteboard we're caching */ - class CachedHostWhiteboard { - - /** - * Number of seconds till the cache expires - */ - private static final int CACHE_EXPIRE_TIME_MS = 10000; - - /** - * The host whiteboard we're caching - */ - private NestedHostSeq hostWhiteboard; - - /** - * The time in which the cache expires. - */ - private long expireTime = 0l; - - public void cache(List hostWhiteboard) { - this.hostWhiteboard = NestedHostSeq.newBuilder().addAllNestedHosts(hostWhiteboard).build(); - expireTime = System.currentTimeMillis() + CACHE_EXPIRE_TIME_MS; - } + private NestedHostSeq hostWhiteboard; - public NestedHostSeq get() { - return hostWhiteboard; - } + /** + * The time in which the cache expires. + */ + private long expireTime = 0l; - public boolean isExpired() { - return System.currentTimeMillis() > expireTime; - } + public void cache(List hostWhiteboard) { + this.hostWhiteboard = NestedHostSeq.newBuilder().addAllNestedHosts(hostWhiteboard).build(); + expireTime = System.currentTimeMillis() + CACHE_EXPIRE_TIME_MS; } - /** - * The CachedHostWhiteboard holds onto the result of the last - * host whiteboard query for about 10 seconds, returning the - * same result to all subsequent requests. - */ - private final CachedHostWhiteboard cachedHostWhiteboard = - new CachedHostWhiteboard(); + public NestedHostSeq get() { + return hostWhiteboard; + } - public NestedHostSeq getHostWhiteboard() { + public boolean isExpired() { + return System.currentTimeMillis() > expireTime; + } + } - if (!cachedHostWhiteboard.isExpired()) { - return cachedHostWhiteboard.get(); - } + /** + * The CachedHostWhiteboard holds onto the result of the last host whiteboard query for about 10 + * seconds, returning the same result to all subsequent requests. + */ + private final CachedHostWhiteboard cachedHostWhiteboard = new CachedHostWhiteboard(); - /* - * Ensures only 1 thread is doing the query, other threads will wait - * and then return the result of the thead that actually did - * the query. - */ - synchronized (cachedHostWhiteboard) { + public NestedHostSeq getHostWhiteboard() { - if (!cachedHostWhiteboard.isExpired()) { - return cachedHostWhiteboard.get(); - } + if (!cachedHostWhiteboard.isExpired()) { + return cachedHostWhiteboard.get(); + } - final List result = new ArrayList(3000); - final Map hosts = new HashMap(3000); - final Map procs = new HashMap(8000); - - getJdbcTemplate().query( - GET_HOSTS, - new RowMapper() { - - public NestedHost mapRow(ResultSet rs, int row) throws SQLException { - NestedHost host; - String hid = rs.getString("pk_host"); - if (!hosts.containsKey(hid)) { - host = WhiteboardDaoJdbc.mapNestedHostBuilder(rs).build(); - hosts.put(hid, host); - result.add(host); - } - else { - host = hosts.get(hid); - } - - String pid = rs.getString("pk_proc"); - if (pid != null) { - NestedProc proc; - if (!procs.containsKey(pid)) { - proc = NestedProc.newBuilder() - .setId(pid) - .setName(CueUtil.buildProcName(host.getName(), - rs.getInt("proc_cores"), - rs.getInt("proc_gpus"))) - .setReservedCores(Convert.coreUnitsToCores( - rs.getInt("proc_cores"))) - .setReservedGpus(rs.getInt("proc_gpus")) - .setReservedMemory(rs.getLong("proc_memory")) - .setReservedGpuMemory(rs.getLong("proc_gpu_memory")) - .setUsedMemory(rs.getLong("used_memory")) - .setFrameName(rs.getString("frame_name")) - .setJobName(rs.getString("job_name")) - .setShowName(rs.getString("show_name")) - .setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)) - .setBookedTime((int) (rs.getTimestamp("ts_booked").getTime() / 1000)) - .setDispatchTime((int) (rs.getTimestamp("ts_dispatched").getTime() / 1000)) - .setUnbooked(rs.getBoolean("b_unbooked")) - .setLogPath(String.format("%s/%s.%s.rqlog", - rs.getString("str_log_dir"),rs.getString("job_name"), - rs.getString("frame_name"))) - .setRedirectTarget(rs.getString("str_redirect")) - .setParent(host) - .build(); - - host = host.toBuilder().setProcs( - host.getProcs().toBuilder().addNestedProcs(proc).build()) - .build(); - procs.put(pid, proc); - } - else { - proc = procs.get(pid); - } - } - return null; - } - }); - - cachedHostWhiteboard.cache(result); - } + /* + * Ensures only 1 thread is doing the query, other threads will wait and then return the result + * of the thead that actually did the query. + */ + synchronized (cachedHostWhiteboard) { + + if (!cachedHostWhiteboard.isExpired()) { return cachedHostWhiteboard.get(); + } + + final List result = new ArrayList(3000); + final Map hosts = new HashMap(3000); + final Map procs = new HashMap(8000); + + getJdbcTemplate().query(GET_HOSTS, new RowMapper() { + + public NestedHost mapRow(ResultSet rs, int row) throws SQLException { + NestedHost host; + String hid = rs.getString("pk_host"); + if (!hosts.containsKey(hid)) { + host = WhiteboardDaoJdbc.mapNestedHostBuilder(rs).build(); + hosts.put(hid, host); + result.add(host); + } else { + host = hosts.get(hid); + } + + String pid = rs.getString("pk_proc"); + if (pid != null) { + NestedProc proc; + if (!procs.containsKey(pid)) { + proc = NestedProc.newBuilder().setId(pid) + .setName(CueUtil.buildProcName(host.getName(), rs.getInt("proc_cores"), + rs.getInt("proc_gpus"))) + .setReservedCores(Convert.coreUnitsToCores(rs.getInt("proc_cores"))) + .setReservedGpus(rs.getInt("proc_gpus")) + .setReservedMemory(rs.getLong("proc_memory")) + .setReservedGpuMemory(rs.getLong("proc_gpu_memory")) + .setUsedMemory(rs.getLong("used_memory")).setFrameName(rs.getString("frame_name")) + .setJobName(rs.getString("job_name")).setShowName(rs.getString("show_name")) + .setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)) + .setBookedTime((int) (rs.getTimestamp("ts_booked").getTime() / 1000)) + .setDispatchTime((int) (rs.getTimestamp("ts_dispatched").getTime() / 1000)) + .setUnbooked(rs.getBoolean("b_unbooked")) + .setLogPath(String.format("%s/%s.%s.rqlog", rs.getString("str_log_dir"), + rs.getString("job_name"), rs.getString("frame_name"))) + .setRedirectTarget(rs.getString("str_redirect")).setParent(host).build(); + + host = host.toBuilder() + .setProcs(host.getProcs().toBuilder().addNestedProcs(proc).build()).build(); + procs.put(pid, proc); + } else { + proc = procs.get(pid); + } + } + return null; + } + }); + + cachedHostWhiteboard.cache(result); } + return cachedHostWhiteboard.get(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/OwnerDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/OwnerDaoJdbc.java index 7d2b8d5a7..d6a888362 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/OwnerDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/OwnerDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -33,95 +29,67 @@ import com.imageworks.spcue.dao.OwnerDao; import com.imageworks.spcue.util.SqlUtil; -public class OwnerDaoJdbc extends JdbcDaoSupport implements OwnerDao { - - public static final RowMapper - OWNER_MAPPER = new RowMapper() { - public OwnerEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - OwnerEntity o = new OwnerEntity(); - o.id = rs.getString("pk_owner"); - o.name = rs.getString("str_username"); - return o; - } - }; - - @Override - public boolean deleteOwner(Entity owner) { - return getJdbcTemplate().update( - "DELETE FROM owner WHERE pk_owner = ?", - owner.getId()) > 0; - } +public class OwnerDaoJdbc extends JdbcDaoSupport implements OwnerDao { - private static final String QUERY_FOR_OWNER = - "SELECT " + - "owner.pk_owner," + - "owner.str_username " + - "FROM " + - "owner "; - - @Override - public OwnerEntity findOwner(String name) { - try { - return getJdbcTemplate().queryForObject( - QUERY_FOR_OWNER + " WHERE str_username = ?", - OWNER_MAPPER, name); - } catch (EmptyResultDataAccessException e) { - throw new EmptyResultDataAccessException( - "Failed to find owner: " + name, 1); - } + public static final RowMapper OWNER_MAPPER = new RowMapper() { + public OwnerEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + OwnerEntity o = new OwnerEntity(); + o.id = rs.getString("pk_owner"); + o.name = rs.getString("str_username"); + return o; } - - @Override - public OwnerEntity getOwner(String id) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_OWNER + " WHERE pk_owner = ?", - OWNER_MAPPER, id); - } - - @Override - public OwnerEntity getOwner(HostInterface host) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_OWNER + - "WHERE " + - "pk_owner = (" + - "SELECT "+ - "pk_owner " + - "FROM " + - "deed " + - "WHERE " + - "pk_host = ?)", - OWNER_MAPPER, host.getHostId()); - } - - public boolean isOwner(OwnerEntity owner, HostInterface host) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM host, deed" + - " WHERE host.pk_host = deed.pk_host AND deed.pk_owner=?", - Integer.class, owner.getId()) > 0; - } - - private static final String INSERT_OWNER = - "INSERT INTO " + - "owner " + - "(" + - "pk_owner," + - "pk_show," + - "str_username " + - ") " + - "VALUES (?,?,?)"; - - @Override - public void insertOwner(OwnerEntity owner, ShowInterface show) { - owner.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_OWNER, - owner.id, show.getShowId(), owner.name); - } - - @Override - public void updateShow(Entity owner, ShowInterface show) { - getJdbcTemplate().update( - "UPDATE owner SET pk_show = ? WHERE pk_owner = ?", - show.getShowId(), owner.getId()); + }; + + @Override + public boolean deleteOwner(Entity owner) { + return getJdbcTemplate().update("DELETE FROM owner WHERE pk_owner = ?", owner.getId()) > 0; + } + + private static final String QUERY_FOR_OWNER = + "SELECT " + "owner.pk_owner," + "owner.str_username " + "FROM " + "owner "; + + @Override + public OwnerEntity findOwner(String name) { + try { + return getJdbcTemplate().queryForObject(QUERY_FOR_OWNER + " WHERE str_username = ?", + OWNER_MAPPER, name); + } catch (EmptyResultDataAccessException e) { + throw new EmptyResultDataAccessException("Failed to find owner: " + name, 1); } + } + + @Override + public OwnerEntity getOwner(String id) { + return getJdbcTemplate().queryForObject(QUERY_FOR_OWNER + " WHERE pk_owner = ?", OWNER_MAPPER, + id); + } + + @Override + public OwnerEntity getOwner(HostInterface host) { + return getJdbcTemplate().queryForObject(QUERY_FOR_OWNER + "WHERE " + "pk_owner = (" + "SELECT " + + "pk_owner " + "FROM " + "deed " + "WHERE " + "pk_host = ?)", OWNER_MAPPER, + host.getHostId()); + } + + public boolean isOwner(OwnerEntity owner, HostInterface host) { + return getJdbcTemplate().queryForObject( + "SELECT COUNT(1) FROM host, deed" + + " WHERE host.pk_host = deed.pk_host AND deed.pk_owner=?", + Integer.class, owner.getId()) > 0; + } + + private static final String INSERT_OWNER = "INSERT INTO " + "owner " + "(" + "pk_owner," + + "pk_show," + "str_username " + ") " + "VALUES (?,?,?)"; + + @Override + public void insertOwner(OwnerEntity owner, ShowInterface show) { + owner.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_OWNER, owner.id, show.getShowId(), owner.name); + } + + @Override + public void updateShow(Entity owner, ShowInterface show) { + getJdbcTemplate().update("UPDATE owner SET pk_show = ? WHERE pk_owner = ?", show.getShowId(), + owner.getId()); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/PointDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/PointDaoJdbc.java index 499113381..d37b86cbc 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/PointDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/PointDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -36,187 +32,121 @@ public class PointDaoJdbc extends JdbcDaoSupport implements PointDao { - @Override - public void insertPointConf(PointDetail t) { - t.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update( - "INSERT INTO point (pk_point, pk_show, pk_dept) VALUES (?,?,?)", - t.id, t.getShowId(), t.getDepartmentId()); + @Override + public void insertPointConf(PointDetail t) { + t.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update("INSERT INTO point (pk_point, pk_show, pk_dept) VALUES (?,?,?)", t.id, + t.getShowId(), t.getDepartmentId()); + } + + @Override + public boolean isManaged(ShowInterface show, DepartmentInterface dept) { + try { + return getJdbcTemplate().queryForObject( + "SELECT b_managed FROM point WHERE pk_show=? and pk_dept=?", Integer.class, + show.getShowId(), dept.getDepartmentId()) == 1; + } catch (org.springframework.dao.DataRetrievalFailureException e) { + return false; } - - @Override - public boolean isManaged(ShowInterface show, DepartmentInterface dept) { - try { - return getJdbcTemplate().queryForObject( - "SELECT b_managed FROM point WHERE pk_show=? and pk_dept=?", - Integer.class, show.getShowId(), dept.getDepartmentId()) == 1; - } catch (org.springframework.dao.DataRetrievalFailureException e) { - return false; + } + + @Override + public PointDetail insertPointConf(ShowInterface show, DepartmentInterface dept) { + PointDetail r = new PointDetail(); + r.deptId = dept.getId(); + r.showId = show.getShowId(); + r.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update("INSERT INTO point (pk_point, pk_show, pk_dept) VALUES (?,?,?)", r.id, + r.getShowId(), r.getDepartmentId()); + return r; + } + + @Override + public boolean pointConfExists(ShowInterface show, DepartmentInterface dept) { + return getJdbcTemplate().queryForObject( + "SELECT COUNT(1) FROM point WHERE pk_show=? AND pk_dept=?", Integer.class, show.getShowId(), + dept.getDepartmentId()) > 0; + } + + private static final String UPDATE_TI_MANAGED = "UPDATE " + "point " + "SET " + + "b_managed = true," + "str_ti_task=?, " + "int_min_cores=? " + "WHERE " + "pk_point=?"; + + @Override + public void updateEnableManaged(PointInterface p, String task, int coreUnits) { + getJdbcTemplate().update(UPDATE_TI_MANAGED, task, coreUnits, p.getPointId()); + } + + private static final String UPDATE_DISABLE_TI_MANAGED = "UPDATE " + "point " + "SET " + + "b_managed = false," + "str_ti_task=null, " + "int_min_cores=0 " + "WHERE " + "pk_point=?"; + + @Override + public void updateDisableManaged(PointInterface p) { + getJdbcTemplate().update(UPDATE_DISABLE_TI_MANAGED, p.getPointId()); + } + + private static final RowMapper DEPARTMENT_CONFIG_DETAIL_MAPPER = + new RowMapper() { + public PointDetail mapRow(ResultSet rs, int rowNum) throws SQLException { + PointDetail rpd = new PointDetail(); + rpd.deptId = rs.getString("pk_dept"); + rpd.showId = rs.getString("pk_show"); + rpd.id = rs.getString("pk_point"); + rpd.cores = rs.getInt("int_min_cores"); + rpd.tiTask = rs.getString("str_ti_task"); + return rpd; } - } - - @Override - public PointDetail insertPointConf(ShowInterface show, DepartmentInterface dept) { - PointDetail r = new PointDetail(); - r.deptId = dept.getId(); - r.showId = show.getShowId(); - r.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update( - "INSERT INTO point (pk_point, pk_show, pk_dept) VALUES (?,?,?)", - r.id, r.getShowId(), r.getDepartmentId()); - return r; - } - - @Override - public boolean pointConfExists(ShowInterface show, DepartmentInterface dept) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM point WHERE pk_show=? AND pk_dept=?", - Integer.class, show.getShowId(), dept.getDepartmentId()) > 0; - } - - private static final String UPDATE_TI_MANAGED = - "UPDATE " + - "point " + - "SET " + - "b_managed = true,"+ - "str_ti_task=?, "+ - "int_min_cores=? " + - "WHERE " + - "pk_point=?"; - - @Override - public void updateEnableManaged(PointInterface p, String task, int coreUnits) { - getJdbcTemplate().update(UPDATE_TI_MANAGED, - task, coreUnits, p.getPointId()); - } - - private static final String UPDATE_DISABLE_TI_MANAGED = - "UPDATE " + - "point " + - "SET " + - "b_managed = false,"+ - "str_ti_task=null, "+ - "int_min_cores=0 " + - "WHERE " + - "pk_point=?"; - - @Override - public void updateDisableManaged(PointInterface p) { - getJdbcTemplate().update(UPDATE_DISABLE_TI_MANAGED, p.getPointId()); - } - - private static final RowMapper DEPARTMENT_CONFIG_DETAIL_MAPPER = - new RowMapper() { - public PointDetail mapRow(ResultSet rs, int rowNum) throws SQLException { - PointDetail rpd = new PointDetail(); - rpd.deptId = rs.getString("pk_dept"); - rpd.showId = rs.getString("pk_show"); - rpd.id = rs.getString("pk_point"); - rpd.cores = rs.getInt("int_min_cores"); - rpd.tiTask = rs.getString("str_ti_task"); - return rpd; - } - }; - - private static final String GET_DEPARTMENT_CONFIG_DETAIL = - "SELECT "+ - "pk_point,"+ - "pk_dept,"+ - "pk_show,"+ - "str_ti_task,"+ - "int_min_cores "+ - "FROM " + - "point " + - "WHERE " + - "pk_point = ?"; - - @Override - public PointDetail getPointConfDetail(String id) { - return getJdbcTemplate().queryForObject(GET_DEPARTMENT_CONFIG_DETAIL, - DEPARTMENT_CONFIG_DETAIL_MAPPER, id); - } - - private static final String GET_POINT_CONFIG_DETAIL_BY_SHOW_DEPT = - "SELECT "+ - "pk_point,"+ - "pk_dept,"+ - "pk_show,"+ - "str_ti_task,"+ - "int_min_cores, "+ - "b_managed " + - "FROM " + - "point " + - "WHERE " + - "pk_show = ? " + - "AND " + - "pk_dept = ? "; - - @Override - public PointDetail getPointConfigDetail(ShowInterface show, DepartmentInterface dept) { - return getJdbcTemplate().queryForObject(GET_POINT_CONFIG_DETAIL_BY_SHOW_DEPT, - DEPARTMENT_CONFIG_DETAIL_MAPPER, show.getShowId(), dept.getDepartmentId()); - } - - private static final String UPDATE_TI_MANAGED_CORES = - "UPDATE " + - "point " + - "SET " + - "int_min_cores=? " + - "WHERE " + - "pk_point=?"; - - @Override - public void updateManagedCores(PointInterface cdept, int cores) { - getJdbcTemplate().update(UPDATE_TI_MANAGED_CORES, cores, - cdept.getPointId()); - - } - - private static final String GET_MANAGED_POINT_CONFS = - "SELECT " + - "pk_point,"+ - "pk_dept,"+ - "pk_show,"+ - "str_ti_task,"+ - "int_min_cores, "+ - "b_managed " + - "FROM " + - "point " + - "WHERE " + - "b_managed = true "; - - @Override - public List getManagedPointConfs() { - return getJdbcTemplate().query(GET_MANAGED_POINT_CONFS, - DEPARTMENT_CONFIG_DETAIL_MAPPER); - } - - @Override - public void updatePointConfUpdateTime(PointInterface t) { - getJdbcTemplate().update( - "UPDATE point SET ts_updated=current_timestamp WHERE pk_point=?", - t.getPointId()); - } - - private static final String IS_OVER_MIN_CORES = - "SELECT " + - "COUNT(1) " + - "FROM " + - "job,"+ - "point p "+ - "WHERE " + - "job.pk_show = p.pk_show " + - "AND " + - "job.pk_dept = p.pk_dept " + - "AND " + - "p.int_cores > p.int_min_cores " + - "AND "+ - "job.pk_job = ?"; - - @Override - public boolean isOverMinCores(JobInterface job) { - return getJdbcTemplate().queryForObject(IS_OVER_MIN_CORES, - Integer.class, job.getJobId()) > 0; - } + }; + + private static final String GET_DEPARTMENT_CONFIG_DETAIL = + "SELECT " + "pk_point," + "pk_dept," + "pk_show," + "str_ti_task," + "int_min_cores " + + "FROM " + "point " + "WHERE " + "pk_point = ?"; + + @Override + public PointDetail getPointConfDetail(String id) { + return getJdbcTemplate().queryForObject(GET_DEPARTMENT_CONFIG_DETAIL, + DEPARTMENT_CONFIG_DETAIL_MAPPER, id); + } + + private static final String GET_POINT_CONFIG_DETAIL_BY_SHOW_DEPT = + "SELECT " + "pk_point," + "pk_dept," + "pk_show," + "str_ti_task," + "int_min_cores, " + + "b_managed " + "FROM " + "point " + "WHERE " + "pk_show = ? " + "AND " + "pk_dept = ? "; + + @Override + public PointDetail getPointConfigDetail(ShowInterface show, DepartmentInterface dept) { + return getJdbcTemplate().queryForObject(GET_POINT_CONFIG_DETAIL_BY_SHOW_DEPT, + DEPARTMENT_CONFIG_DETAIL_MAPPER, show.getShowId(), dept.getDepartmentId()); + } + + private static final String UPDATE_TI_MANAGED_CORES = + "UPDATE " + "point " + "SET " + "int_min_cores=? " + "WHERE " + "pk_point=?"; + + @Override + public void updateManagedCores(PointInterface cdept, int cores) { + getJdbcTemplate().update(UPDATE_TI_MANAGED_CORES, cores, cdept.getPointId()); + + } + + private static final String GET_MANAGED_POINT_CONFS = + "SELECT " + "pk_point," + "pk_dept," + "pk_show," + "str_ti_task," + "int_min_cores, " + + "b_managed " + "FROM " + "point " + "WHERE " + "b_managed = true "; + + @Override + public List getManagedPointConfs() { + return getJdbcTemplate().query(GET_MANAGED_POINT_CONFS, DEPARTMENT_CONFIG_DETAIL_MAPPER); + } + + @Override + public void updatePointConfUpdateTime(PointInterface t) { + getJdbcTemplate().update("UPDATE point SET ts_updated=current_timestamp WHERE pk_point=?", + t.getPointId()); + } + + private static final String IS_OVER_MIN_CORES = "SELECT " + "COUNT(1) " + "FROM " + "job," + + "point p " + "WHERE " + "job.pk_show = p.pk_show " + "AND " + "job.pk_dept = p.pk_dept " + + "AND " + "p.int_cores > p.int_min_cores " + "AND " + "job.pk_job = ?"; + + @Override + public boolean isOverMinCores(JobInterface job) { + return getJdbcTemplate().queryForObject(IS_OVER_MIN_CORES, Integer.class, job.getJobId()) > 0; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ProcDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ProcDaoJdbc.java index 0e24ed1c8..364791313 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ProcDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ProcDaoJdbc.java @@ -2,24 +2,19 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; - import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -54,887 +49,591 @@ public class ProcDaoJdbc extends JdbcDaoSupport implements ProcDao { - @Autowired - private Environment env; - - private static final String VERIFY_RUNNING_PROC = - "SELECT " + - "proc.pk_frame " + - "FROM " + - "proc, " + - "job " + - "WHERE " + - "proc.pk_job = job.pk_job " + - "AND " + - "job.str_state = 'PENDING' " + - "AND " + - "proc.pk_proc= ? "; - - public boolean verifyRunningProc(String procId, String frameId) { - try { - String pk_frame = getJdbcTemplate().queryForObject( - VERIFY_RUNNING_PROC, String.class, procId); - if (pk_frame != null) { - return pk_frame.equals(frameId); - } - else { - return false; - } - } catch (org.springframework.dao.EmptyResultDataAccessException e) { - // EAT - } - return false; - } + @Autowired + private Environment env; - private static final String DELETE_VIRTUAL_PROC = - "DELETE FROM " + - "proc " + - "WHERE " + - "pk_proc=?"; + private static final String VERIFY_RUNNING_PROC = "SELECT " + "proc.pk_frame " + "FROM " + + "proc, " + "job " + "WHERE " + "proc.pk_job = job.pk_job " + "AND " + + "job.str_state = 'PENDING' " + "AND " + "proc.pk_proc= ? "; - public boolean deleteVirtualProc(VirtualProc proc) { - if(getJdbcTemplate().update(DELETE_VIRTUAL_PROC, proc.getProcId()) == 0) { - logger.info("failed to delete " + proc + " , proc does not exist."); - return false; - } - // update all of the resource counts. - procDestroyed(proc); - return true; + public boolean verifyRunningProc(String procId, String frameId) { + try { + String pk_frame = getJdbcTemplate().queryForObject(VERIFY_RUNNING_PROC, String.class, procId); + if (pk_frame != null) { + return pk_frame.equals(frameId); + } else { + return false; + } + } catch (org.springframework.dao.EmptyResultDataAccessException e) { + // EAT } + return false; + } - private static final String INSERT_VIRTUAL_PROC = - "INSERT INTO " + - "proc " + - "( " + - "pk_proc, " + - "pk_host, " + - "pk_show, "+ - "pk_layer,"+ - "pk_job," + - "pk_frame, "+ - "int_cores_reserved, " + - "int_mem_reserved, " + - "int_mem_pre_reserved, " + - "int_mem_used, "+ - "int_gpus_reserved, " + - "int_gpu_mem_reserved, " + - "int_gpu_mem_pre_reserved, " + - "int_gpu_mem_used, " + - "b_local " + - ") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) "; - - public void insertVirtualProc(VirtualProc proc) { - proc.id = SqlUtil.genKeyRandom(); - long memReservedMin = env.getRequiredProperty( - "dispatcher.memory.mem_reserved_min", - Long.class); - long memGpuReservedMin = env.getRequiredProperty( - "dispatcher.memory.mem_gpu_reserved_min", - Long.class); - int result = 0; - try { - result = getJdbcTemplate().update(INSERT_VIRTUAL_PROC, - proc.getProcId(), proc.getHostId(), proc.getShowId(), - proc.getLayerId(), proc.getJobId(), proc.getFrameId(), - proc.coresReserved, proc.memoryReserved, - proc.memoryReserved, memReservedMin, - proc.gpusReserved, proc.gpuMemoryReserved, - proc.gpuMemoryReserved, memGpuReservedMin, - proc.isLocalDispatch); - - // Update all of the resource counts - procCreated(proc); - } - catch (org.springframework.dao.DataIntegrityViolationException d) { - /* - * This means the frame is already running. If that is the - * case, don't delete it, just set pk_frame to null or - * the orphaned proc handler will catch it. - */ - throw new ResourceDuplicationFailureException("The frame " + - proc.getFrameId() + " is already assigned to a proc."); - } - catch (Exception e) { - String msg = "unable to book proc " + - proc.getName() + " on frame " + proc.getFrameId() + - " , " + e; - throw new ResourceReservationFailureException(msg,e); - } + private static final String DELETE_VIRTUAL_PROC = + "DELETE FROM " + "proc " + "WHERE " + "pk_proc=?"; - if (result == 0) { - String msg = "unable to book proc " + proc.id + - " the insert query succeeded but returned 0"; - throw new ResourceReservationFailureException(msg); - } + public boolean deleteVirtualProc(VirtualProc proc) { + if (getJdbcTemplate().update(DELETE_VIRTUAL_PROC, proc.getProcId()) == 0) { + logger.info("failed to delete " + proc + " , proc does not exist."); + return false; } - - private static final String UPDATE_VIRTUAL_PROC_ASSIGN = - "UPDATE " + - "proc " + - "SET " + - "pk_show = ?, " + - "pk_job = ?, " + - "pk_layer = ?, " + - "pk_frame = ?, " + - "int_mem_used = 0, " + - "int_mem_max_used = 0, " + - "int_virt_used = 0, " + - "int_virt_max_used = 0, " + - "ts_dispatched = current_timestamp " + - "WHERE " + - "pk_proc = ?"; - - public void updateVirtualProcAssignment(VirtualProc proc) { - - int result = 0; - try { - result = getJdbcTemplate().update( - UPDATE_VIRTUAL_PROC_ASSIGN, - proc.getShowId(), proc.getJobId(), proc.getLayerId(), - proc.getFrameId(), proc.getProcId()); - } - catch (org.springframework.dao.DataIntegrityViolationException d) { - throw new ResourceDuplicationFailureException("The frame " + - proc.getFrameId() + " is already assigned to " + - "the proc " + proc); - } - catch (Exception e) { - String msg = "unable to book proc " + - proc.id + ", " + e; - throw new ResourceReservationFailureException(msg, e); - } - - /* - * If the proc was not updated then it has disappeared. - */ - if (result == 0) { - String msg = "unable to book proc " + - proc.id + ", the proc no longer exists,"; - throw new ResourceReservationFailureException(msg); - } + // update all of the resource counts. + procDestroyed(proc); + return true; + } + + private static final String INSERT_VIRTUAL_PROC = "INSERT INTO " + "proc " + "( " + "pk_proc, " + + "pk_host, " + "pk_show, " + "pk_layer," + "pk_job," + "pk_frame, " + "int_cores_reserved, " + + "int_mem_reserved, " + "int_mem_pre_reserved, " + "int_mem_used, " + "int_gpus_reserved, " + + "int_gpu_mem_reserved, " + "int_gpu_mem_pre_reserved, " + "int_gpu_mem_used, " + "b_local " + + ") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) "; + + public void insertVirtualProc(VirtualProc proc) { + proc.id = SqlUtil.genKeyRandom(); + long memReservedMin = env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); + long memGpuReservedMin = + env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_min", Long.class); + int result = 0; + try { + result = getJdbcTemplate().update(INSERT_VIRTUAL_PROC, proc.getProcId(), proc.getHostId(), + proc.getShowId(), proc.getLayerId(), proc.getJobId(), proc.getFrameId(), + proc.coresReserved, proc.memoryReserved, proc.memoryReserved, memReservedMin, + proc.gpusReserved, proc.gpuMemoryReserved, proc.gpuMemoryReserved, memGpuReservedMin, + proc.isLocalDispatch); + + // Update all of the resource counts + procCreated(proc); + } catch (org.springframework.dao.DataIntegrityViolationException d) { + /* + * This means the frame is already running. If that is the case, don't delete it, just set + * pk_frame to null or the orphaned proc handler will catch it. + */ + throw new ResourceDuplicationFailureException( + "The frame " + proc.getFrameId() + " is already assigned to a proc."); + } catch (Exception e) { + String msg = + "unable to book proc " + proc.getName() + " on frame " + proc.getFrameId() + " , " + e; + throw new ResourceReservationFailureException(msg, e); } - private static final String CLEAR_VIRTUAL_PROC_ASSIGN = - "UPDATE " + - "proc " + - "SET " + - "pk_frame = NULL " + - "WHERE " + - "pk_proc = ?"; - - public boolean clearVirtualProcAssignment(ProcInterface proc) { - return getJdbcTemplate().update(CLEAR_VIRTUAL_PROC_ASSIGN, - proc.getId()) == 1; + if (result == 0) { + String msg = "unable to book proc " + proc.id + " the insert query succeeded but returned 0"; + throw new ResourceReservationFailureException(msg); } - - private static final String CLEAR_VIRTUAL_PROC_ASSIGN_BY_FRAME = - "UPDATE " + - "proc " + - "SET " + - "pk_frame = NULL " + - "WHERE " + - "pk_frame = ?"; - - public boolean clearVirtualProcAssignment(FrameInterface frame) { - return getJdbcTemplate().update(CLEAR_VIRTUAL_PROC_ASSIGN_BY_FRAME, - frame.getFrameId()) == 1; + } + + private static final String UPDATE_VIRTUAL_PROC_ASSIGN = "UPDATE " + "proc " + "SET " + + "pk_show = ?, " + "pk_job = ?, " + "pk_layer = ?, " + "pk_frame = ?, " + + "int_mem_used = 0, " + "int_mem_max_used = 0, " + "int_virt_used = 0, " + + "int_virt_max_used = 0, " + "ts_dispatched = current_timestamp " + "WHERE " + "pk_proc = ?"; + + public void updateVirtualProcAssignment(VirtualProc proc) { + + int result = 0; + try { + result = getJdbcTemplate().update(UPDATE_VIRTUAL_PROC_ASSIGN, proc.getShowId(), + proc.getJobId(), proc.getLayerId(), proc.getFrameId(), proc.getProcId()); + } catch (org.springframework.dao.DataIntegrityViolationException d) { + throw new ResourceDuplicationFailureException( + "The frame " + proc.getFrameId() + " is already assigned to " + "the proc " + proc); + } catch (Exception e) { + String msg = "unable to book proc " + proc.id + ", " + e; + throw new ResourceReservationFailureException(msg, e); } - private static final String UPDATE_PROC_MEMORY_USAGE = - "UPDATE " + - "proc " + - "SET " + - "int_mem_used = ?, " + - "int_mem_max_used = ?," + - "int_virt_used = ?, " + - "int_virt_max_used = ?, " + - "int_gpu_mem_used = ?, " + - "int_gpu_mem_max_used = ?, " + - "int_swap_used = ?, " + - "bytea_children = ?, " + - "ts_ping = current_timestamp " + - "WHERE " + - "pk_frame = ?"; - - @Override - public void updateProcMemoryUsage(FrameInterface f, long rss, long maxRss, - long vss, long maxVss, long usedGpuMemory, long maxUsedGpuMemory, - long usedSwapMemory, byte[] children) { - /* - * This method is going to repeat for a proc every 1 minute, so - * if the proc is being touched by another thread, then return - * quietly without updating memory usage. - * - * If another thread is accessing the proc record, that means - * the proc is probably being booked to another frame, which - * makes this update invalid anyway. - */ - try { - if (getJdbcTemplate().queryForObject( - "SELECT pk_frame FROM proc WHERE pk_frame=? FOR UPDATE", - String.class, f.getFrameId()).equals(f.getFrameId())) { - - getJdbcTemplate().update(new PreparedStatementCreator() { - @Override - public PreparedStatement createPreparedStatement(Connection conn) - throws SQLException { - PreparedStatement updateProc = conn.prepareStatement( - UPDATE_PROC_MEMORY_USAGE); - updateProc.setLong(1, rss); - updateProc.setLong(2, maxRss); - updateProc.setLong(3, vss); - updateProc.setLong(4, maxVss); - updateProc.setLong(5, usedGpuMemory); - updateProc.setLong(6, maxUsedGpuMemory); - updateProc.setLong(7, usedSwapMemory); - updateProc.setBytes(8, children); - updateProc.setString(9, f.getFrameId()); - return updateProc; - } - }); - } - } catch (DataAccessException dae) { - logger.info("The proc for frame " + f + - " could not be updated with new memory stats: " + dae); - } + /* + * If the proc was not updated then it has disappeared. + */ + if (result == 0) { + String msg = "unable to book proc " + proc.id + ", the proc no longer exists,"; + throw new ResourceReservationFailureException(msg); } - - /** - * Maps a row to a VirtualProc object. + } + + private static final String CLEAR_VIRTUAL_PROC_ASSIGN = + "UPDATE " + "proc " + "SET " + "pk_frame = NULL " + "WHERE " + "pk_proc = ?"; + + public boolean clearVirtualProcAssignment(ProcInterface proc) { + return getJdbcTemplate().update(CLEAR_VIRTUAL_PROC_ASSIGN, proc.getId()) == 1; + } + + private static final String CLEAR_VIRTUAL_PROC_ASSIGN_BY_FRAME = + "UPDATE " + "proc " + "SET " + "pk_frame = NULL " + "WHERE " + "pk_frame = ?"; + + public boolean clearVirtualProcAssignment(FrameInterface frame) { + return getJdbcTemplate().update(CLEAR_VIRTUAL_PROC_ASSIGN_BY_FRAME, frame.getFrameId()) == 1; + } + + private static final String UPDATE_PROC_MEMORY_USAGE = + "UPDATE " + "proc " + "SET " + "int_mem_used = ?, " + "int_mem_max_used = ?," + + "int_virt_used = ?, " + "int_virt_max_used = ?, " + "int_gpu_mem_used = ?, " + + "int_gpu_mem_max_used = ?, " + "int_swap_used = ?, " + "bytea_children = ?, " + + "ts_ping = current_timestamp " + "WHERE " + "pk_frame = ?"; + + @Override + public void updateProcMemoryUsage(FrameInterface f, long rss, long maxRss, long vss, long maxVss, + long usedGpuMemory, long maxUsedGpuMemory, long usedSwapMemory, byte[] children) { + /* + * This method is going to repeat for a proc every 1 minute, so if the proc is being touched by + * another thread, then return quietly without updating memory usage. + * + * If another thread is accessing the proc record, that means the proc is probably being booked + * to another frame, which makes this update invalid anyway. */ - public static final RowMapper VIRTUAL_PROC_MAPPER = - new RowMapper() { - public VirtualProc mapRow(ResultSet rs, int rowNum) throws SQLException { - VirtualProc proc = new VirtualProc(); - proc.id = rs.getString("pk_proc"); - proc.hostId = rs.getString("pk_host"); - proc.showId = rs.getString("pk_show"); - proc.jobId= rs.getString("pk_job"); - proc.layerId = rs.getString("pk_layer"); - proc.frameId = rs.getString("pk_frame"); - proc.hostName = rs.getString("host_name"); - proc.allocationId = rs.getString("pk_alloc"); - proc.facilityId = rs.getString("pk_facility"); - proc.coresReserved =rs.getInt("int_cores_reserved"); - proc.memoryReserved = rs.getLong("int_mem_reserved"); - proc.memoryMax = rs.getLong("int_mem_max_used"); - proc.gpusReserved = rs.getInt("int_gpus_reserved"); - proc.gpuMemoryReserved = rs.getLong("int_gpu_mem_reserved"); - proc.gpuMemoryMax = rs.getLong("int_gpu_mem_max_used"); - proc.virtualMemoryMax = rs.getLong("int_virt_max_used"); - proc.virtualMemoryUsed = rs.getLong("int_virt_used"); - proc.memoryUsed = rs.getLong("int_mem_used"); - proc.unbooked = rs.getBoolean("b_unbooked"); - proc.isLocalDispatch = rs.getBoolean("b_local"); - proc.os = rs.getString("str_os"); - proc.childProcesses = rs.getBytes("bytea_children"); - return proc; - } - }; - - private static final String GET_VIRTUAL_PROC = - "SELECT " + - "proc.pk_proc," + - "proc.pk_host,"+ - "proc.pk_show,"+ - "proc.pk_job,"+ - "proc.pk_layer,"+ - "proc.pk_frame,"+ - "proc.b_unbooked,"+ - "proc.b_local,"+ - "host.pk_alloc, " + - "alloc.pk_facility,"+ - "proc.int_cores_reserved,"+ - "proc.int_mem_reserved,"+ - "proc.int_mem_max_used,"+ - "proc.int_mem_used,"+ - "proc.int_gpus_reserved,"+ - "proc.int_gpu_mem_reserved,"+ - "proc.int_gpu_mem_max_used,"+ - "proc.int_gpu_mem_used,"+ - "proc.bytea_children,"+ - "proc.int_virt_max_used,"+ - "proc.int_virt_used,"+ - "host.str_name AS host_name, " + - "COALESCE(job.str_os, '') AS str_os " + - "FROM " + - "proc, " + - "job, " + - "host, " + - "host_stat, " + - "alloc " + - "WHERE " + - "proc.pk_host = host.pk_host " + - "AND " + - "host.pk_host = host_stat.pk_host " + - "AND " + - "host.pk_alloc = alloc.pk_alloc " + - "AND " + - "job.pk_job = proc.pk_job "; - - public VirtualProc getVirtualProc(String id) { - return getJdbcTemplate().queryForObject( - GET_VIRTUAL_PROC + " AND proc.pk_proc=? ", - VIRTUAL_PROC_MAPPER, id); - } - - public VirtualProc findVirtualProc(FrameInterface frame) { - return getJdbcTemplate().queryForObject( - GET_VIRTUAL_PROC + " AND proc.pk_frame=? ", - VIRTUAL_PROC_MAPPER, frame.getFrameId()); - } - - private static final String GET_VIRTUAL_PROC_LIST = - "SELECT " + - "proc.*, " + - "host.str_name AS host_name, " + - "host.pk_alloc, " + - "COALESCE(job.str_os, '') AS str_os, " + - "alloc.pk_facility " + - "FROM " + - "proc, " + - "frame, " + - "host," + - "host_stat, " + - "alloc, " + - "layer," + - "job, " + - "folder, " + - "show " + - "WHERE " + - "proc.pk_show = show.pk_show " + - "AND " + - "proc.pk_host = host.pk_host " + - "AND " + - "host.pk_alloc = alloc.pk_alloc " + - "AND " + - "host.pk_host = host_stat.pk_host " + - "AND " + - "proc.pk_job = job.pk_job " + - "AND " + - "proc.pk_layer = layer.pk_layer " + - "AND " + - "proc.pk_frame = frame.pk_frame " + - "AND " + - "job.pk_folder = folder.pk_folder "; - - public List findVirtualProcs(ProcSearchInterface r) { - return getJdbcTemplate().query(r.getFilteredQuery(GET_VIRTUAL_PROC_LIST), - VIRTUAL_PROC_MAPPER, r.getValuesArray()); - } - - @Override - public List findBookedVirtualProcs(ProcSearchInterface r) { - return getJdbcTemplate().query(r.getFilteredQuery(GET_VIRTUAL_PROC_LIST + - "AND proc.b_unbooked = false"), VIRTUAL_PROC_MAPPER, r.getValuesArray()); - } - - public List findVirtualProcs(FrameSearchInterface r) { - return getJdbcTemplate().query(r.getFilteredQuery(GET_VIRTUAL_PROC_LIST), - VIRTUAL_PROC_MAPPER, r.getValuesArray()); - } - - public List findVirtualProcs(HostInterface host) { - return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND proc.pk_host=?", - VIRTUAL_PROC_MAPPER, host.getHostId()); - } - - public List findVirtualProcs(LayerInterface layer) { - return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND proc.pk_layer=?", - VIRTUAL_PROC_MAPPER, layer.getLayerId()); - } - - public List findVirtualProcs(JobInterface job) { - return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND proc.pk_job=?", - VIRTUAL_PROC_MAPPER, job.getJobId()); - } - - private static final String FIND_VIRTUAL_PROCS_LJA = - GET_VIRTUAL_PROC_LIST + - "AND proc.pk_job=( " + - "SELECT pk_job FROM host_local WHERE pk_host_local = ?) " + - "AND proc.pk_host=(" + - "SELECT pk_host FROM host_local WHERE pk_host_local = ?) "; - - @Override - public List findVirtualProcs(LocalHostAssignment l) { - return getJdbcTemplate().query( - FIND_VIRTUAL_PROCS_LJA, - VIRTUAL_PROC_MAPPER, - l.getId(), - l.getId()); - } - - public List findVirtualProcs(HardwareState state) { - return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND host_stat.str_state=?", - VIRTUAL_PROC_MAPPER, state.toString()); - } - - public void unbookVirtualProcs(List procs) { - List batchArgs = new ArrayList(procs.size()); - for (VirtualProc proc: procs) { - batchArgs.add(new Object[] { proc.id }); + try { + if (getJdbcTemplate().queryForObject("SELECT pk_frame FROM proc WHERE pk_frame=? FOR UPDATE", + String.class, f.getFrameId()).equals(f.getFrameId())) { + + getJdbcTemplate().update(new PreparedStatementCreator() { + @Override + public PreparedStatement createPreparedStatement(Connection conn) throws SQLException { + PreparedStatement updateProc = conn.prepareStatement(UPDATE_PROC_MEMORY_USAGE); + updateProc.setLong(1, rss); + updateProc.setLong(2, maxRss); + updateProc.setLong(3, vss); + updateProc.setLong(4, maxVss); + updateProc.setLong(5, usedGpuMemory); + updateProc.setLong(6, maxUsedGpuMemory); + updateProc.setLong(7, usedSwapMemory); + updateProc.setBytes(8, children); + updateProc.setString(9, f.getFrameId()); + return updateProc; } - - getJdbcTemplate().batchUpdate( - "UPDATE proc SET b_unbooked=true WHERE pk_proc=?", batchArgs); - } - - @Override - public boolean setUnbookState(ProcInterface proc, boolean unbooked) { - return getJdbcTemplate().update( - "UPDATE proc SET b_unbooked=? WHERE pk_proc=?", - unbooked, proc.getProcId()) == 1; - } - - @Override - public boolean setRedirectTarget(ProcInterface p, Redirect r) { - String name = null; - boolean unbooked = false; - if (r != null) { - name = r.getDestinationName(); - unbooked = true; - } - return getJdbcTemplate().update( - "UPDATE proc SET str_redirect=?, b_unbooked=? WHERE pk_proc=?", - name, unbooked, p.getProcId()) == 1; + }); } + } catch (DataAccessException dae) { + logger + .info("The proc for frame " + f + " could not be updated with new memory stats: " + dae); + } + } + + /** + * Maps a row to a VirtualProc object. + */ + public static final RowMapper VIRTUAL_PROC_MAPPER = new RowMapper() { + public VirtualProc mapRow(ResultSet rs, int rowNum) throws SQLException { + VirtualProc proc = new VirtualProc(); + proc.id = rs.getString("pk_proc"); + proc.hostId = rs.getString("pk_host"); + proc.showId = rs.getString("pk_show"); + proc.jobId = rs.getString("pk_job"); + proc.layerId = rs.getString("pk_layer"); + proc.frameId = rs.getString("pk_frame"); + proc.hostName = rs.getString("host_name"); + proc.allocationId = rs.getString("pk_alloc"); + proc.facilityId = rs.getString("pk_facility"); + proc.coresReserved = rs.getInt("int_cores_reserved"); + proc.memoryReserved = rs.getLong("int_mem_reserved"); + proc.memoryMax = rs.getLong("int_mem_max_used"); + proc.gpusReserved = rs.getInt("int_gpus_reserved"); + proc.gpuMemoryReserved = rs.getLong("int_gpu_mem_reserved"); + proc.gpuMemoryMax = rs.getLong("int_gpu_mem_max_used"); + proc.virtualMemoryMax = rs.getLong("int_virt_max_used"); + proc.virtualMemoryUsed = rs.getLong("int_virt_used"); + proc.memoryUsed = rs.getLong("int_mem_used"); + proc.unbooked = rs.getBoolean("b_unbooked"); + proc.isLocalDispatch = rs.getBoolean("b_local"); + proc.os = rs.getString("str_os"); + proc.childProcesses = rs.getBytes("bytea_children"); + return proc; + } + }; + + private static final String GET_VIRTUAL_PROC = "SELECT " + "proc.pk_proc," + "proc.pk_host," + + "proc.pk_show," + "proc.pk_job," + "proc.pk_layer," + "proc.pk_frame," + "proc.b_unbooked," + + "proc.b_local," + "host.pk_alloc, " + "alloc.pk_facility," + "proc.int_cores_reserved," + + "proc.int_mem_reserved," + "proc.int_mem_max_used," + "proc.int_mem_used," + + "proc.int_gpus_reserved," + "proc.int_gpu_mem_reserved," + "proc.int_gpu_mem_max_used," + + "proc.int_gpu_mem_used," + "proc.bytea_children," + "proc.int_virt_max_used," + + "proc.int_virt_used," + "host.str_name AS host_name, " + + "COALESCE(job.str_os, '') AS str_os " + "FROM " + "proc, " + "job, " + "host, " + + "host_stat, " + "alloc " + "WHERE " + "proc.pk_host = host.pk_host " + "AND " + + "host.pk_host = host_stat.pk_host " + "AND " + "host.pk_alloc = alloc.pk_alloc " + "AND " + + "job.pk_job = proc.pk_job "; + + public VirtualProc getVirtualProc(String id) { + return getJdbcTemplate().queryForObject(GET_VIRTUAL_PROC + " AND proc.pk_proc=? ", + VIRTUAL_PROC_MAPPER, id); + } + + public VirtualProc findVirtualProc(FrameInterface frame) { + return getJdbcTemplate().queryForObject(GET_VIRTUAL_PROC + " AND proc.pk_frame=? ", + VIRTUAL_PROC_MAPPER, frame.getFrameId()); + } + + private static final String GET_VIRTUAL_PROC_LIST = "SELECT " + "proc.*, " + + "host.str_name AS host_name, " + "host.pk_alloc, " + "COALESCE(job.str_os, '') AS str_os, " + + "alloc.pk_facility " + "FROM " + "proc, " + "frame, " + "host," + "host_stat, " + "alloc, " + + "layer," + "job, " + "folder, " + "show " + "WHERE " + "proc.pk_show = show.pk_show " + + "AND " + "proc.pk_host = host.pk_host " + "AND " + "host.pk_alloc = alloc.pk_alloc " + + "AND " + "host.pk_host = host_stat.pk_host " + "AND " + "proc.pk_job = job.pk_job " + "AND " + + "proc.pk_layer = layer.pk_layer " + "AND " + "proc.pk_frame = frame.pk_frame " + "AND " + + "job.pk_folder = folder.pk_folder "; + + public List findVirtualProcs(ProcSearchInterface r) { + return getJdbcTemplate().query(r.getFilteredQuery(GET_VIRTUAL_PROC_LIST), VIRTUAL_PROC_MAPPER, + r.getValuesArray()); + } + + @Override + public List findBookedVirtualProcs(ProcSearchInterface r) { + return getJdbcTemplate().query( + r.getFilteredQuery(GET_VIRTUAL_PROC_LIST + "AND proc.b_unbooked = false"), + VIRTUAL_PROC_MAPPER, r.getValuesArray()); + } + + public List findVirtualProcs(FrameSearchInterface r) { + return getJdbcTemplate().query(r.getFilteredQuery(GET_VIRTUAL_PROC_LIST), VIRTUAL_PROC_MAPPER, + r.getValuesArray()); + } + + public List findVirtualProcs(HostInterface host) { + return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND proc.pk_host=?", + VIRTUAL_PROC_MAPPER, host.getHostId()); + } + + public List findVirtualProcs(LayerInterface layer) { + return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND proc.pk_layer=?", + VIRTUAL_PROC_MAPPER, layer.getLayerId()); + } + + public List findVirtualProcs(JobInterface job) { + return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND proc.pk_job=?", + VIRTUAL_PROC_MAPPER, job.getJobId()); + } + + private static final String FIND_VIRTUAL_PROCS_LJA = GET_VIRTUAL_PROC_LIST + "AND proc.pk_job=( " + + "SELECT pk_job FROM host_local WHERE pk_host_local = ?) " + "AND proc.pk_host=(" + + "SELECT pk_host FROM host_local WHERE pk_host_local = ?) "; + + @Override + public List findVirtualProcs(LocalHostAssignment l) { + return getJdbcTemplate().query(FIND_VIRTUAL_PROCS_LJA, VIRTUAL_PROC_MAPPER, l.getId(), + l.getId()); + } + + public List findVirtualProcs(HardwareState state) { + return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND host_stat.str_state=?", + VIRTUAL_PROC_MAPPER, state.toString()); + } + + public void unbookVirtualProcs(List procs) { + List batchArgs = new ArrayList(procs.size()); + for (VirtualProc proc : procs) { + batchArgs.add(new Object[] {proc.id}); + } - public void unbookProc(ProcInterface proc) { - getJdbcTemplate().update("UPDATE proc SET b_unbooked=true WHERE pk_proc=?", - proc.getProcId()); - } + getJdbcTemplate().batchUpdate("UPDATE proc SET b_unbooked=true WHERE pk_proc=?", batchArgs); + } + + @Override + public boolean setUnbookState(ProcInterface proc, boolean unbooked) { + return getJdbcTemplate().update("UPDATE proc SET b_unbooked=? WHERE pk_proc=?", unbooked, + proc.getProcId()) == 1; + } + + @Override + public boolean setRedirectTarget(ProcInterface p, Redirect r) { + String name = null; + boolean unbooked = false; + if (r != null) { + name = r.getDestinationName(); + unbooked = true; + } + return getJdbcTemplate().update("UPDATE proc SET str_redirect=?, b_unbooked=? WHERE pk_proc=?", + name, unbooked, p.getProcId()) == 1; + } + + public void unbookProc(ProcInterface proc) { + getJdbcTemplate().update("UPDATE proc SET b_unbooked=true WHERE pk_proc=?", proc.getProcId()); + } + + public String getCurrentShowId(ProcInterface p) { + return getJdbcTemplate().queryForObject("SELECT pk_show FROM proc WHERE pk_proc=?", + String.class, p.getProcId()); + } + + public String getCurrentJobId(ProcInterface p) { + return getJdbcTemplate().queryForObject("SELECT pk_job FROM proc WHERE pk_proc=?", String.class, + p.getProcId()); + } + + public String getCurrentLayerId(ProcInterface p) { + return getJdbcTemplate().queryForObject("SELECT pk_layer FROM proc WHERE pk_proc=?", + String.class, p.getProcId()); + } + + public String getCurrentFrameId(ProcInterface p) { + return getJdbcTemplate().queryForObject("SELECT pk_frame FROM proc WHERE pk_proc=?", + String.class, p.getProcId()); + } + + private static final String ORPHANED_PROC_INTERVAL = "interval '300' second"; + private static final String GET_ORPHANED_PROC_LIST = "SELECT " + "proc.*, " + + "host.str_name AS host_name, " + "COALESCE(job.str_os, '') AS str_os, " + "host.pk_alloc, " + + "alloc.pk_facility " + "FROM " + "proc, " + "host, " + "host_stat," + "alloc, " + "job " + + "WHERE " + "proc.pk_host = host.pk_host " + "AND " + "host.pk_host = host_stat.pk_host " + + "AND " + "host.pk_alloc = alloc.pk_alloc " + "AND " + "job.pk_job = proc.pk_job " + "AND " + + "current_timestamp - proc.ts_ping > " + ORPHANED_PROC_INTERVAL; + + public List findOrphanedVirtualProcs() { + return getJdbcTemplate().query(GET_ORPHANED_PROC_LIST, VIRTUAL_PROC_MAPPER); + } + + public List findOrphanedVirtualProcs(int limit) { + return getJdbcTemplate().query(GET_ORPHANED_PROC_LIST + " LIMIT " + limit, VIRTUAL_PROC_MAPPER); + } + + private static final String IS_ORPHAN = + "SELECT " + "COUNT(1) " + "FROM " + "proc " + "WHERE " + "proc.pk_proc = ? " + "AND " + + "current_timestamp - proc.ts_ping > " + ORPHANED_PROC_INTERVAL; + + @Override + public boolean isOrphan(ProcInterface proc) { + return getJdbcTemplate().queryForObject(IS_ORPHAN, Integer.class, proc.getProcId()) == 1; + } + + public boolean increaseReservedMemory(ProcInterface p, long value) { + try { + return getJdbcTemplate().update( + "UPDATE proc SET int_mem_reserved=? WHERE pk_proc=? AND int_mem_reserved < ?", value, + p.getProcId(), value) == 1; + } catch (Exception e) { + // check by trigger erify_host_resources + throw new ResourceReservationFailureException( + "failed to increase memory reservation for proc " + p.getProcId() + " to " + value + + ", proc does not have that much memory to spare."); + } + } - public String getCurrentShowId(ProcInterface p) { - return getJdbcTemplate().queryForObject("SELECT pk_show FROM proc WHERE pk_proc=?", - String.class, p.getProcId()); - } + public long getReservedMemory(ProcInterface proc) { + return getJdbcTemplate().queryForObject("SELECT int_mem_reserved FROM proc WHERE pk_proc=?", + Long.class, proc.getProcId()); + } - public String getCurrentJobId(ProcInterface p) { - return getJdbcTemplate().queryForObject("SELECT pk_job FROM proc WHERE pk_proc=?", - String.class, p.getProcId()); - } + public long getReservedGpuMemory(ProcInterface proc) { + return getJdbcTemplate().queryForObject("SELECT int_gpu_mem_reserved FROM proc WHERE pk_proc=?", + Long.class, proc.getProcId()); + } - public String getCurrentLayerId(ProcInterface p) { - return getJdbcTemplate().queryForObject("SELECT pk_layer FROM proc WHERE pk_proc=?", - String.class, p.getProcId()); - } + private static final String FIND_UNDERUTILIZED_PROCS = + "SELECT " + "proc.pk_proc," + "proc.int_mem_reserved - layer_mem.int_max_rss AS free_mem " + + "FROM " + "proc," + "host, " + "layer_mem " + "WHERE " + "proc.pk_host = host.pk_host " + + "AND " + "proc.pk_layer = layer_mem.pk_layer " + "AND " + "layer_mem.int_max_rss > 0 " + + "AND " + "host.pk_host = ? " + "AND " + "proc.pk_proc != ? " + "AND " + + "proc.int_mem_reserved - layer_mem.int_max_rss > 0"; - public String getCurrentFrameId(ProcInterface p) { - return getJdbcTemplate().queryForObject("SELECT pk_frame FROM proc WHERE pk_proc=?", - String.class, p.getProcId()); - } + public boolean balanceUnderUtilizedProcs(ProcInterface targetProc, long targetMem) { - private static final String ORPHANED_PROC_INTERVAL = "interval '300' second"; - private static final String GET_ORPHANED_PROC_LIST = - "SELECT " + - "proc.*, " + - "host.str_name AS host_name, " + - "COALESCE(job.str_os, '') AS str_os, " + - "host.pk_alloc, " + - "alloc.pk_facility " + - "FROM " + - "proc, " + - "host, " + - "host_stat,"+ - "alloc, " + - "job " + - "WHERE " + - "proc.pk_host = host.pk_host " + - "AND " + - "host.pk_host = host_stat.pk_host " + - "AND " + - "host.pk_alloc = alloc.pk_alloc " + - "AND " + - "job.pk_job = proc.pk_job " + - "AND " + - "current_timestamp - proc.ts_ping > " + ORPHANED_PROC_INTERVAL; - - public List findOrphanedVirtualProcs() { - return getJdbcTemplate().query(GET_ORPHANED_PROC_LIST, VIRTUAL_PROC_MAPPER); - } + List> result = getJdbcTemplate().queryForList(FIND_UNDERUTILIZED_PROCS, + targetProc.getHostId(), targetProc.getProcId()); - public List findOrphanedVirtualProcs(int limit) { - return getJdbcTemplate().query( - GET_ORPHANED_PROC_LIST + " LIMIT " + limit, - VIRTUAL_PROC_MAPPER); - } - - private static final String IS_ORPHAN = - "SELECT " + - "COUNT(1) " + - "FROM " + - "proc " + - "WHERE " + - "proc.pk_proc = ? " + - "AND " + - "current_timestamp - proc.ts_ping > " + ORPHANED_PROC_INTERVAL; - - @Override - public boolean isOrphan(ProcInterface proc) { - return getJdbcTemplate().queryForObject(IS_ORPHAN, - Integer.class, proc.getProcId()) == 1; - } + if (result.size() == 0) { + logger.info("unable to find under utilized procs on host " + targetProc.getName()); + return false; + } + final Map borrowMap = new HashMap(result.size()); + for (Map map : result) { + logger.info("creating borrow map for: " + (String) map.get("pk_proc")); + borrowMap.put((String) map.get("pk_proc"), 0l); + } - public boolean increaseReservedMemory(ProcInterface p, long value) { - try { - return getJdbcTemplate().update("UPDATE proc SET int_mem_reserved=? WHERE pk_proc=? AND int_mem_reserved < ?", - value, p.getProcId(), value) == 1; - } catch (Exception e) { - // check by trigger erify_host_resources - throw new ResourceReservationFailureException("failed to increase memory reservation for proc " - + p.getProcId() + " to " + value + ", proc does not have that much memory to spare."); + long memBorrowedTotal = 0l; + int pass = 0; + int maxPasses = 3; + + while (true) { + // the amount of memory we're going to borrow per frame/proc + long memPerFrame = ((targetMem - memBorrowedTotal) / result.size()) + 1; + + // loop through all of our other running frames and try to borrow + // a little bit of memory from each one. + for (Map map : result) { + String pk_proc = (String) map.get("pk_proc"); + Long free_mem = (Long) map.get("free_mem"); + long memReservedMin = + env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); + long available = free_mem - borrowMap.get(pk_proc) - memReservedMin; + if (available > memPerFrame) { + borrowMap.put(pk_proc, borrowMap.get(pk_proc) + memPerFrame); + memBorrowedTotal = memBorrowedTotal + memPerFrame; } } + pass++; - public long getReservedMemory(ProcInterface proc) { - return getJdbcTemplate().queryForObject( - "SELECT int_mem_reserved FROM proc WHERE pk_proc=?", - Long.class, proc.getProcId()); + // If we were unable to borrow anything, just break + if (memBorrowedTotal == 0) { + break; } - - public long getReservedGpuMemory(ProcInterface proc) { - return getJdbcTemplate().queryForObject( - "SELECT int_gpu_mem_reserved FROM proc WHERE pk_proc=?", - Long.class, proc.getProcId()); + // If we got the memory we needed, break + if (memBorrowedTotal >= targetMem) { + break; + } + // If we've exceeded the number of tries in this loop, break + if (pass >= maxPasses) { + break; } + } - private static final String FIND_UNDERUTILIZED_PROCS = - "SELECT " + - "proc.pk_proc," + - "proc.int_mem_reserved - layer_mem.int_max_rss AS free_mem " + - "FROM " + - "proc," + - "host, " + - "layer_mem " + - "WHERE " + - "proc.pk_host = host.pk_host " + - "AND " + - "proc.pk_layer = layer_mem.pk_layer " + - "AND " + - "layer_mem.int_max_rss > 0 " + - "AND " + - "host.pk_host = ? " + - "AND " + - "proc.pk_proc != ? " + - "AND " + - "proc.int_mem_reserved - layer_mem.int_max_rss > 0"; - - public boolean balanceUnderUtilizedProcs(ProcInterface targetProc, long targetMem) { - - List> result = getJdbcTemplate().queryForList(FIND_UNDERUTILIZED_PROCS, - targetProc.getHostId(), targetProc.getProcId()); - - if (result.size() == 0) { - logger.info("unable to find under utilized procs on host " + targetProc.getName()); - return false; - } + logger.info("attempted to borrow " + targetMem + " for host " + targetProc.getName() + + ", obtained " + memBorrowedTotal); - final Map borrowMap = new HashMap(result.size()); - for (Map map: result) { - logger.info("creating borrow map for: " + (String) map.get("pk_proc")); - borrowMap.put((String) map.get("pk_proc"), 0l); - } + if (memBorrowedTotal < targetMem) { + logger.info( + "mem borrowed " + memBorrowedTotal + " was less than the target memory of " + targetMem); + return false; + } - long memBorrowedTotal = 0l; - int pass = 0; - int maxPasses = 3; - - while(true) { - // the amount of memory we're going to borrow per frame/proc - long memPerFrame = ((targetMem - memBorrowedTotal) / result.size()) + 1; - - // loop through all of our other running frames and try to borrow - // a little bit of memory from each one. - for (Map map: result) { - String pk_proc = (String) map.get("pk_proc"); - Long free_mem = (Long) map.get("free_mem"); - long memReservedMin = env.getRequiredProperty( - "dispatcher.memory.mem_reserved_min", - Long.class); - long available = free_mem - borrowMap.get(pk_proc) - memReservedMin; - if (available > memPerFrame) { - borrowMap.put(pk_proc, borrowMap.get(pk_proc) + memPerFrame); - memBorrowedTotal = memBorrowedTotal + memPerFrame; - } - } - pass++; - - // If we were unable to borrow anything, just break - if (memBorrowedTotal == 0) { break; } - // If we got the memory we needed, break - if (memBorrowedTotal >= targetMem) { break; } - // If we've exceeded the number of tries in this loop, break - if (pass >= maxPasses) { break; } - } + /* + * This might fail... I'm not really sure if we should fail the whole operation or what. Just + * gonna let it ride for now. + */ + for (Map.Entry set : borrowMap.entrySet()) { + int success = getJdbcTemplate().update( + "UPDATE proc SET int_mem_reserved = int_mem_reserved - ? WHERE pk_proc=?", set.getValue(), + set.getKey()); + logger.info("transfering " + (set.getValue() * success) + " from " + set.getKey()); + } - logger.info("attempted to borrow " + targetMem + " for host " - + targetProc.getName() + ", obtained " + memBorrowedTotal); + return true; + } + + public void updateReservedMemory(ProcInterface p, long value) { + getJdbcTemplate().update("UPDATE proc SET int_mem_reserved=? WHERE pk_proc=?", value, + p.getProcId()); + } + + /** + * Updates proc counts for the host, subscription, layer, job, folder, and proc point when a proc + * is destroyed. + * + * @param proc + */ + private void procDestroyed(VirtualProc proc) { + + getJdbcTemplate().update( + "UPDATE " + "host " + "SET " + "int_cores_idle = int_cores_idle + ?," + + "int_mem_idle = int_mem_idle + ?, " + "int_gpus_idle = int_gpus_idle + ?," + + "int_gpu_mem_idle = int_gpu_mem_idle + ? " + "WHERE " + "pk_host = ?", + proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, + proc.getHostId()); + + if (!proc.isLocalDispatch) { + getJdbcTemplate().update( + "UPDATE " + "subscription " + "SET " + "int_cores = int_cores - ?," + + "int_gpus = int_gpus - ? " + "WHERE " + "pk_show = ? " + "AND " + "pk_alloc = ?", + proc.coresReserved, proc.gpusReserved, proc.getShowId(), proc.getAllocationId()); + } - if (memBorrowedTotal < targetMem) { - logger.info("mem borrowed " + memBorrowedTotal + - " was less than the target memory of " + targetMem); - return false; - } + getJdbcTemplate().update( + "UPDATE " + "layer_resource " + "SET " + "int_cores = int_cores - ?," + + "int_gpus = int_gpus - ? " + "WHERE " + "pk_layer = ?", + proc.coresReserved, proc.gpusReserved, proc.getLayerId()); + + if (!proc.isLocalDispatch) { + + getJdbcTemplate().update( + "UPDATE " + "job_resource " + "SET " + "int_cores = int_cores - ?," + + "int_gpus = int_gpus - ? " + "WHERE " + "pk_job = ?", + proc.coresReserved, proc.gpusReserved, proc.getJobId()); + + getJdbcTemplate().update( + "UPDATE " + "folder_resource " + "SET " + "int_cores = int_cores - ?," + + "int_gpus = int_gpus - ? " + "WHERE " + "pk_folder = " + + "(SELECT pk_folder FROM job WHERE pk_job=?)", + proc.coresReserved, proc.gpusReserved, proc.getJobId()); + + getJdbcTemplate().update( + "UPDATE " + "point " + "SET " + "int_cores = int_cores - ?, " + "int_gpus = int_gpus - ? " + + "WHERE " + "pk_dept = " + "(SELECT pk_dept FROM job WHERE pk_job=?) " + "AND " + + "pk_show = " + "(SELECT pk_show FROM job WHERE pk_job=?) ", + proc.coresReserved, proc.gpusReserved, proc.getJobId(), proc.getJobId()); + } - /* - * This might fail... I'm not really sure if we should - * fail the whole operation or what. Just gonna let it ride for now. - */ - for (Map.Entry set: borrowMap.entrySet()) { - int success = getJdbcTemplate().update( - "UPDATE proc SET int_mem_reserved = int_mem_reserved - ? WHERE pk_proc=?", - set.getValue(), set.getKey()); - logger.info("transfering " + (set.getValue() * success) + " from " + set.getKey()); - } + if (proc.isLocalDispatch) { - return true; - } + getJdbcTemplate().update( + "UPDATE " + "job_resource " + "SET " + "int_local_cores = int_local_cores - ?, " + + "int_local_gpus = int_local_gpus - ? " + "WHERE " + "pk_job = ?", + proc.coresReserved, proc.gpusReserved, proc.getJobId()); - public void updateReservedMemory(ProcInterface p, long value) { - getJdbcTemplate().update("UPDATE proc SET int_mem_reserved=? WHERE pk_proc=?", - value, p.getProcId()); - } + getJdbcTemplate().update( + "UPDATE " + "host_local " + "SET " + "int_cores_idle = int_cores_idle + ?, " + + "int_mem_idle = int_mem_idle + ?, " + "int_gpus_idle = int_gpus_idle + ?, " + + "int_gpu_mem_idle = int_gpu_mem_idle + ? " + "WHERE " + "pk_job = ? " + "AND " + + "pk_host = ? ", + proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, + proc.getJobId(), proc.getHostId()); + } + } + + /** + * Updates proc counts for the host, subscription, layer, job, folder, and proc point when a new + * proc is created. + * + * @param proc + */ + private void procCreated(VirtualProc proc) { + + getJdbcTemplate().update( + "UPDATE " + "host " + "SET " + "int_cores_idle = int_cores_idle - ?," + + "int_mem_idle = int_mem_idle - ?, " + "int_gpus_idle = int_gpus_idle - ?," + + "int_gpu_mem_idle = int_gpu_mem_idle - ? " + "WHERE " + "pk_host = ?", + proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, + proc.getHostId()); - /** - * Updates proc counts for the host, subscription, - * layer, job, folder, and proc point when a proc - * is destroyed. - * - * @param proc - */ - private void procDestroyed(VirtualProc proc) { - - - getJdbcTemplate().update( - "UPDATE " + - "host " + - "SET " + - "int_cores_idle = int_cores_idle + ?," + - "int_mem_idle = int_mem_idle + ?, " + - "int_gpus_idle = int_gpus_idle + ?," + - "int_gpu_mem_idle = int_gpu_mem_idle + ? " + - "WHERE " + - "pk_host = ?", - proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, proc.getHostId()); - - if (!proc.isLocalDispatch) { - getJdbcTemplate().update( - "UPDATE " + - "subscription " + - "SET " + - "int_cores = int_cores - ?," + - "int_gpus = int_gpus - ? " + - "WHERE " + - "pk_show = ? " + - "AND " + - "pk_alloc = ?", - proc.coresReserved, proc.gpusReserved, proc.getShowId(), - proc.getAllocationId()); - } + /** + * Not keeping track of local cores this way. + */ - getJdbcTemplate().update( - "UPDATE " + - "layer_resource " + - "SET " + - "int_cores = int_cores - ?," + - "int_gpus = int_gpus - ? " + - "WHERE " + - "pk_layer = ?", - proc.coresReserved, proc.gpusReserved, proc.getLayerId()); - - if (!proc.isLocalDispatch) { - - getJdbcTemplate().update( - "UPDATE " + - "job_resource " + - "SET " + - "int_cores = int_cores - ?," + - "int_gpus = int_gpus - ? " + - "WHERE " + - "pk_job = ?", - proc.coresReserved, proc.gpusReserved, proc.getJobId()); - - getJdbcTemplate().update( - "UPDATE " + - "folder_resource " + - "SET " + - "int_cores = int_cores - ?," + - "int_gpus = int_gpus - ? " + - "WHERE " + - "pk_folder = " + - "(SELECT pk_folder FROM job WHERE pk_job=?)", - proc.coresReserved, proc.gpusReserved, proc.getJobId()); - - getJdbcTemplate().update( - "UPDATE " + - "point " + - "SET " + - "int_cores = int_cores - ?, " + - "int_gpus = int_gpus - ? " + - "WHERE " + - "pk_dept = " + - "(SELECT pk_dept FROM job WHERE pk_job=?) " + - "AND " + - "pk_show = " + - "(SELECT pk_show FROM job WHERE pk_job=?) ", - proc.coresReserved, proc.gpusReserved, proc.getJobId(), proc.getJobId()); - } + if (!proc.isLocalDispatch) { + getJdbcTemplate().update( + "UPDATE " + "subscription " + "SET " + "int_cores = int_cores + ?," + + "int_gpus = int_gpus + ? " + "WHERE " + "pk_show = ? " + "AND " + "pk_alloc = ?", + proc.coresReserved, proc.gpusReserved, proc.getShowId(), proc.getAllocationId()); + } - if (proc.isLocalDispatch) { - - getJdbcTemplate().update( - "UPDATE " + - "job_resource " + - "SET " + - "int_local_cores = int_local_cores - ?, " + - "int_local_gpus = int_local_gpus - ? " + - "WHERE " + - "pk_job = ?", - proc.coresReserved, proc.gpusReserved, proc.getJobId()); - - getJdbcTemplate().update( - "UPDATE " + - "host_local " + - "SET " + - "int_cores_idle = int_cores_idle + ?, " + - "int_mem_idle = int_mem_idle + ?, " + - "int_gpus_idle = int_gpus_idle + ?, " + - "int_gpu_mem_idle = int_gpu_mem_idle + ? " + - "WHERE " + - "pk_job = ? " + - "AND " + - "pk_host = ? ", - proc.coresReserved, - proc.memoryReserved, - proc.gpusReserved, - proc.gpuMemoryReserved, - proc.getJobId(), - proc.getHostId()); - } - } + getJdbcTemplate().update( + "UPDATE " + "layer_resource " + "SET " + "int_cores = int_cores + ?," + + "int_gpus = int_gpus + ? " + "WHERE " + "pk_layer = ?", + proc.coresReserved, proc.gpusReserved, proc.getLayerId()); + + if (!proc.isLocalDispatch) { + + getJdbcTemplate().update( + "UPDATE " + "job_resource " + "SET " + "int_cores = int_cores + ?," + + "int_gpus = int_gpus + ? " + "WHERE " + "pk_job = ?", + proc.coresReserved, proc.gpusReserved, proc.getJobId()); + + getJdbcTemplate().update( + "UPDATE " + "folder_resource " + "SET " + "int_cores = int_cores + ?," + + "int_gpus = int_gpus + ? " + "WHERE " + "pk_folder = " + + "(SELECT pk_folder FROM job WHERE pk_job=?)", + proc.coresReserved, proc.gpusReserved, proc.getJobId()); + + getJdbcTemplate().update( + "UPDATE " + "point " + "SET " + "int_cores = int_cores + ?," + "int_gpus = int_gpus + ? " + + "WHERE " + "pk_dept = " + "(SELECT pk_dept FROM job WHERE pk_job=?) " + "AND " + + "pk_show = " + "(SELECT pk_show FROM job WHERE pk_job=?) ", + proc.coresReserved, proc.gpusReserved, proc.getJobId(), proc.getJobId()); + } - /** - * Updates proc counts for the host, subscription, - * layer, job, folder, and proc point when a new - * proc is created. - * - * @param proc - */ - private void procCreated(VirtualProc proc) { - - getJdbcTemplate().update( - "UPDATE " + - "host " + - "SET " + - "int_cores_idle = int_cores_idle - ?," + - "int_mem_idle = int_mem_idle - ?, " + - "int_gpus_idle = int_gpus_idle - ?," + - "int_gpu_mem_idle = int_gpu_mem_idle - ? " + - "WHERE " + - "pk_host = ?", - proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, proc.getHostId()); - - - /** - * Not keeping track of local cores this way. - */ - - if (!proc.isLocalDispatch) { - getJdbcTemplate().update( - "UPDATE " + - "subscription " + - "SET " + - "int_cores = int_cores + ?," + - "int_gpus = int_gpus + ? " + - "WHERE " + - "pk_show = ? " + - "AND " + - "pk_alloc = ?", - proc.coresReserved, proc.gpusReserved, proc.getShowId(), - proc.getAllocationId()); - } + if (proc.isLocalDispatch) { - getJdbcTemplate().update( - "UPDATE " + - "layer_resource " + - "SET " + - "int_cores = int_cores + ?," + - "int_gpus = int_gpus + ? " + - "WHERE " + - "pk_layer = ?", - proc.coresReserved, proc.gpusReserved, proc.getLayerId()); - - if (!proc.isLocalDispatch) { - - getJdbcTemplate().update( - "UPDATE " + - "job_resource " + - "SET " + - "int_cores = int_cores + ?," + - "int_gpus = int_gpus + ? " + - "WHERE " + - "pk_job = ?", - proc.coresReserved, proc.gpusReserved, proc.getJobId()); - - getJdbcTemplate().update( - "UPDATE " + - "folder_resource " + - "SET " + - "int_cores = int_cores + ?," + - "int_gpus = int_gpus + ? " + - "WHERE " + - "pk_folder = " + - "(SELECT pk_folder FROM job WHERE pk_job=?)", - proc.coresReserved, proc.gpusReserved, proc.getJobId()); - - getJdbcTemplate().update( - "UPDATE " + - "point " + - "SET " + - "int_cores = int_cores + ?," + - "int_gpus = int_gpus + ? " + - "WHERE " + - "pk_dept = " + - "(SELECT pk_dept FROM job WHERE pk_job=?) " + - "AND " + - "pk_show = " + - "(SELECT pk_show FROM job WHERE pk_job=?) ", - proc.coresReserved, proc.gpusReserved, proc.getJobId(), proc.getJobId()); - } + getJdbcTemplate().update( + "UPDATE " + "job_resource " + "SET " + "int_local_cores = int_local_cores + ?," + + "int_local_gpus = int_local_gpus + ? " + "WHERE " + "pk_job = ?", + proc.coresReserved, proc.gpusReserved, proc.getJobId()); - if (proc.isLocalDispatch) { - - getJdbcTemplate().update( - "UPDATE " + - "job_resource " + - "SET " + - "int_local_cores = int_local_cores + ?," + - "int_local_gpus = int_local_gpus + ? " + - "WHERE " + - "pk_job = ?", - proc.coresReserved, proc.gpusReserved, proc.getJobId()); - - getJdbcTemplate().update( - "UPDATE " + - "host_local " + - "SET " + - "int_cores_idle = int_cores_idle - ?, " + - "int_mem_idle = int_mem_idle - ?," + - "int_gpus_idle = int_gpus_idle - ?, " + - "int_gpu_mem_idle = int_gpu_mem_idle - ? " + - "WHERE " + - "pk_job = ? " + - "AND " + - "pk_host = ?", - proc.coresReserved, - proc.memoryReserved, - proc.gpusReserved, - proc.gpuMemoryReserved, - proc.getJobId(), - proc.getHostId()); - } - } + getJdbcTemplate().update( + "UPDATE " + "host_local " + "SET " + "int_cores_idle = int_cores_idle - ?, " + + "int_mem_idle = int_mem_idle - ?," + "int_gpus_idle = int_gpus_idle - ?, " + + "int_gpu_mem_idle = int_gpu_mem_idle - ? " + "WHERE " + "pk_job = ? " + "AND " + + "pk_host = ?", + proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, + proc.getJobId(), proc.getHostId()); + } + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/RedirectDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/RedirectDaoJdbc.java index 252872094..fe89fe91e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/RedirectDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/RedirectDaoJdbc.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -30,86 +27,60 @@ import com.imageworks.spcue.grpc.host.RedirectType; public class RedirectDaoJdbc extends JdbcDaoSupport implements RedirectDao { - @Override - public boolean containsKey(String key) { - return getJdbcTemplate().queryForObject( - "SELECT count(1) FROM redirect WHERE pk_proc = ?", - Integer.class, - key) > 0; - } + @Override + public boolean containsKey(String key) { + return getJdbcTemplate().queryForObject("SELECT count(1) FROM redirect WHERE pk_proc = ?", + Integer.class, key) > 0; + } - @Override - public int countRedirectsWithGroup(String groupId) { - return getJdbcTemplate().queryForObject( - "SELECT count(1) FROM redirect WHERE str_group_id = ?", - Integer.class, - groupId); - } + @Override + public int countRedirectsWithGroup(String groupId) { + return getJdbcTemplate().queryForObject("SELECT count(1) FROM redirect WHERE str_group_id = ?", + Integer.class, groupId); + } - @Override - public int deleteExpired() { - long cutoff = System.currentTimeMillis() - Redirect.EXPIRE_TIME; - return getJdbcTemplate().update( - "DELETE FROM redirect WHERE lng_creation_time < ?", - cutoff); - } + @Override + public int deleteExpired() { + long cutoff = System.currentTimeMillis() - Redirect.EXPIRE_TIME; + return getJdbcTemplate().update("DELETE FROM redirect WHERE lng_creation_time < ?", cutoff); + } - @Override - public void put(String key, Redirect r) { - getJdbcTemplate().update( - "INSERT INTO redirect (" + - "pk_proc, " + - "str_group_id, " + - "int_type, " + - "str_destination_id, " + - "str_name, " + - "lng_creation_time" + - ") VALUES (?, ?, ?, ?, ?, ?) " + - "ON CONFLICT (pk_proc) " + - "DO UPDATE SET " + - "str_group_id = EXCLUDED.str_group_id, " + - "int_type = EXCLUDED.int_type, " + - "str_destination_id = EXCLUDED.str_destination_id, " + - "str_name = EXCLUDED.str_name, " + - "lng_creation_time = EXCLUDED.lng_creation_time", - key, - r.getGroupId(), - r.getType().getNumber(), - r.getDestinationId(), - r.getDestinationName(), - r.getCreationTime()); - } + @Override + public void put(String key, Redirect r) { + getJdbcTemplate().update( + "INSERT INTO redirect (" + "pk_proc, " + "str_group_id, " + "int_type, " + + "str_destination_id, " + "str_name, " + "lng_creation_time" + + ") VALUES (?, ?, ?, ?, ?, ?) " + "ON CONFLICT (pk_proc) " + "DO UPDATE SET " + + "str_group_id = EXCLUDED.str_group_id, " + "int_type = EXCLUDED.int_type, " + + "str_destination_id = EXCLUDED.str_destination_id, " + + "str_name = EXCLUDED.str_name, " + "lng_creation_time = EXCLUDED.lng_creation_time", + key, r.getGroupId(), r.getType().getNumber(), r.getDestinationId(), r.getDestinationName(), + r.getCreationTime()); + } - @Override - public Redirect remove(String key) { - Redirect r = null; - try { - r = getJdbcTemplate().queryForObject( - "SELECT str_group_id, int_type, str_destination_id, str_name, lng_creation_time " - + "FROM redirect " - + "WHERE pk_proc = ? " - + "FOR UPDATE", - new RowMapper() { + @Override + public Redirect remove(String key) { + Redirect r = null; + try { + r = getJdbcTemplate() + .queryForObject( + "SELECT str_group_id, int_type, str_destination_id, str_name, lng_creation_time " + + "FROM redirect " + "WHERE pk_proc = ? " + "FOR UPDATE", + new RowMapper() { @Override public Redirect mapRow(ResultSet rs, int rowNum) throws SQLException { - return new Redirect( - rs.getString("str_group_id"), - RedirectType.forNumber(rs.getInt("int_type")), - rs.getString("str_destination_id"), - rs.getString("str_name"), - rs.getLong("lng_creation_time")); + return new Redirect(rs.getString("str_group_id"), + RedirectType.forNumber(rs.getInt("int_type")), + rs.getString("str_destination_id"), rs.getString("str_name"), + rs.getLong("lng_creation_time")); } - }, - key); - } - catch (EmptyResultDataAccessException e) { - return null; - } + }, key); + } catch (EmptyResultDataAccessException e) { + return null; + } - getJdbcTemplate().update( - "DELETE FROM redirect WHERE pk_proc = ?", - key); + getJdbcTemplate().update("DELETE FROM redirect WHERE pk_proc = ?", key); - return r; - } + return r; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ServiceDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ServiceDaoJdbc.java index a637b34d6..91362334e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ServiceDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ServiceDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -35,268 +31,171 @@ public class ServiceDaoJdbc extends JdbcDaoSupport implements ServiceDao { - private static final String SPLITTER = " \\| "; + private static final String SPLITTER = " \\| "; - private static final String JOINER = " | "; + private static final String JOINER = " | "; - public static LinkedHashSet splitTags(String tags) { - LinkedHashSet set = Sets.newLinkedHashSet(); - for(String s: tags.split(SPLITTER)) { - set.add(s.replaceAll(" ", "")); - } - return set; + public static LinkedHashSet splitTags(String tags) { + LinkedHashSet set = Sets.newLinkedHashSet(); + for (String s : tags.split(SPLITTER)) { + set.add(s.replaceAll(" ", "")); } - - public static String joinTags(LinkedHashSet tags) { - return StringUtils.join(tags, JOINER); + return set; + } + + public static String joinTags(LinkedHashSet tags) { + return StringUtils.join(tags, JOINER); + } + + public static final RowMapper SERVICE_MAPPER = new RowMapper() { + public ServiceEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + ServiceEntity s = new ServiceEntity(); + s.id = rs.getString("pk_service"); + s.name = rs.getString("str_name"); + s.minCores = rs.getInt("int_cores_min"); + s.maxCores = rs.getInt("int_cores_max"); + s.minMemory = rs.getLong("int_mem_min"); + s.minGpus = rs.getInt("int_gpus_min"); + s.maxGpus = rs.getInt("int_gpus_max"); + s.minGpuMemory = rs.getLong("int_gpu_mem_min"); + s.threadable = rs.getBoolean("b_threadable"); + s.tags = splitTags(rs.getString("str_tags")); + s.timeout = rs.getInt("int_timeout"); + s.timeout_llu = rs.getInt("int_timeout_llu"); + s.minMemoryIncrease = rs.getLong("int_min_memory_increase"); + return s; } - - public static final RowMapper SERVICE_MAPPER = - new RowMapper() { - public ServiceEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - ServiceEntity s = new ServiceEntity(); - s.id = rs.getString("pk_service"); - s.name = rs.getString("str_name"); - s.minCores = rs.getInt("int_cores_min"); - s.maxCores = rs.getInt("int_cores_max"); - s.minMemory = rs.getLong("int_mem_min"); - s.minGpus = rs.getInt("int_gpus_min"); - s.maxGpus = rs.getInt("int_gpus_max"); - s.minGpuMemory = rs.getLong("int_gpu_mem_min"); - s.threadable = rs.getBoolean("b_threadable"); - s.tags = splitTags(rs.getString("str_tags")); - s.timeout = rs.getInt("int_timeout"); - s.timeout_llu = rs.getInt("int_timeout_llu"); - s.minMemoryIncrease = rs.getLong("int_min_memory_increase"); - return s; + }; + + public static final RowMapper SERVICE_OVERRIDE_MAPPER = + new RowMapper() { + public ServiceOverrideEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + ServiceOverrideEntity s = new ServiceOverrideEntity(); + s.id = rs.getString("pk_show_service"); + s.name = rs.getString("str_name"); + s.minCores = rs.getInt("int_cores_min"); + s.maxCores = rs.getInt("int_cores_max"); + s.minMemory = rs.getLong("int_mem_min"); + s.minGpus = rs.getInt("int_gpus_min"); + s.maxGpus = rs.getInt("int_gpus_max"); + s.minGpuMemory = rs.getLong("int_gpu_mem_min"); + s.threadable = rs.getBoolean("b_threadable"); + s.tags = splitTags(rs.getString("str_tags")); + s.showId = rs.getString("pk_show"); + s.timeout = rs.getInt("int_timeout"); + s.timeout_llu = rs.getInt("int_timeout_llu"); + s.minMemoryIncrease = rs.getLong("int_min_memory_increase"); + return s; } - }; - - public static final RowMapper SERVICE_OVERRIDE_MAPPER = - new RowMapper() { - public ServiceOverrideEntity mapRow(ResultSet rs, int rowNum) - throws SQLException { - ServiceOverrideEntity s = new ServiceOverrideEntity(); - s.id = rs.getString("pk_show_service"); - s.name = rs.getString("str_name"); - s.minCores = rs.getInt("int_cores_min"); - s.maxCores = rs.getInt("int_cores_max"); - s.minMemory = rs.getLong("int_mem_min"); - s.minGpus = rs.getInt("int_gpus_min"); - s.maxGpus = rs.getInt("int_gpus_max"); - s.minGpuMemory = rs.getLong("int_gpu_mem_min"); - s.threadable = rs.getBoolean("b_threadable"); - s.tags = splitTags(rs.getString("str_tags")); - s.showId = rs.getString("pk_show"); - s.timeout = rs.getInt("int_timeout"); - s.timeout_llu = rs.getInt("int_timeout_llu"); - s.minMemoryIncrease = rs.getLong("int_min_memory_increase"); - return s; - } - }; - - private static final String QUERY_FOR_SERVICE = - "SELECT " + - "service.pk_service," + - "service.str_name," + - "service.b_threadable," + - "service.int_cores_min," + - "service.int_cores_max," + - "service.int_mem_min," + - "service.int_gpus_min," + - "service.int_gpus_max," + - "service.int_gpu_mem_min," + - "service.str_tags, " + - "service.int_timeout, " + - "service.int_timeout_llu, " + - "service.int_min_memory_increase " + - "FROM " + - "service "; - - @Override - public ServiceEntity get(String id) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_SERVICE + " WHERE (pk_service=? OR str_name=?)", - SERVICE_MAPPER, id, id); - } - - private static final String QUERY_FOR_SERVICE_OVER = - "SELECT " + - "show_service.pk_show_service," + - "show_service.str_name," + - "show_service.b_threadable," + - "show_service.int_cores_min," + - "show_service.int_cores_max, "+ - "show_service.int_mem_min," + - "show_service.int_gpus_min," + - "show_service.int_gpus_max, "+ - "show_service.int_gpu_mem_min," + - "show_service.str_tags," + - "show_service.int_timeout," + - "show_service.int_timeout_llu," + - "show_service.int_min_memory_increase," + - "show.pk_show " + - "FROM " + - "show_service," + - "show " + - "WHERE " + - "show_service.pk_show = show.pk_show "; - - @Override - public ServiceOverrideEntity getOverride(String id, String show) { - return getJdbcTemplate() - .queryForObject( - QUERY_FOR_SERVICE_OVER - + " AND (show_service.pk_show_service=? OR show_service.str_name=?)" - + " AND (show.str_name=? OR show.pk_show=?)", - SERVICE_OVERRIDE_MAPPER, id, id, show, show); - } - - @Override - public ServiceOverrideEntity getOverride(String id) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_SERVICE_OVER + " AND (show_service.pk_show_service=? " + - "OR show_service.str_name=?)", - SERVICE_OVERRIDE_MAPPER, id, id); - } - - @Override - public boolean isOverridden(String service, String show) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM show_service, show WHERE " - + "show_service.pk_show = show.pk_show = ? " - + "AND show_service.str_name=? and show.str_name=?", - Integer.class, service, show) > 0; - } - - private static final String INSERT_SERVICE = - "INSERT INTO " + - "service " + - "(" + - "pk_service," + - "str_name," + - "b_threadable," + - "int_cores_min," + - "int_cores_max, "+ - "int_mem_min," + - "int_gpus_min," + - "int_gpus_max, "+ - "int_gpu_mem_min," + - "str_tags," + - "int_timeout," + - "int_timeout_llu, " + - "int_min_memory_increase " + - ") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)"; - - @Override - public void insert(ServiceEntity service) { - service.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_SERVICE, service.id, - service.name, service.threadable, service.minCores, - service.maxCores, service.minMemory, - service.minGpus, service.maxGpus, service.minGpuMemory, - StringUtils.join(service.tags.toArray(), " | "), - service.timeout, service.timeout_llu, - service.minMemoryIncrease); - } - - private static final String INSERT_SERVICE_WITH_SHOW = - "INSERT INTO " + - "show_service " + - "(" + - "pk_show_service," + - "pk_show, " + - "str_name," + - "b_threadable," + - "int_cores_min," + - "int_cores_max," + - "int_mem_min," + - "int_gpus_min," + - "int_gpus_max," + - "int_gpu_mem_min," + - "str_tags," + - "int_timeout," + - "int_timeout_llu, " + - "int_min_memory_increase " + - ") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; - - @Override - public void insert(ServiceOverrideEntity service) { - service.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_SERVICE_WITH_SHOW, service.id, - service.showId, service.name, service.threadable, - service.minCores, service.maxCores, service.minMemory, - service.minGpus, service.maxGpus, service.minGpuMemory, joinTags(service.tags), - service.timeout, service.timeout_llu, service.minMemoryIncrease); - } - - private static final String UPDATE_SERVICE = - "UPDATE " + - "service " + - "SET " + - "str_name=?," + - "b_threadable=?," + - "int_cores_min=?," + - "int_cores_max=?,"+ - "int_mem_min=?," + - "int_gpus_min=?," + - "int_gpus_max=?," + - "int_gpu_mem_min=?," + - "str_tags=?," + - "int_timeout=?," + - "int_timeout_llu=?, " + - "int_min_memory_increase=? " + - "WHERE " + - "pk_service = ?"; - - @Override - public void update(ServiceEntity service) { - getJdbcTemplate().update(UPDATE_SERVICE, service.name, - service.threadable, service.minCores, service.maxCores, - service.minMemory, service.minGpus, service.maxGpus, service.minGpuMemory, joinTags(service.tags), - service.timeout, service.timeout_llu, service.minMemoryIncrease, - service.getId()); - } - - private static final String UPDATE_SERVICE_WITH_SHOW = - "UPDATE " + - "show_service " + - "SET " + - "str_name=?," + - "b_threadable=?," + - "int_cores_min=?," + - "int_cores_max=?," + - "int_mem_min=?," + - "int_gpus_min=?," + - "int_gpus_max=?," + - "int_gpu_mem_min=?," + - "str_tags=?," + - "int_timeout=?," + - "int_timeout_llu=?, " + - "int_min_memory_increase=? " + - "WHERE " + - "pk_show_service = ?"; - - @Override - public void update(ServiceOverrideEntity service) { - getJdbcTemplate().update(UPDATE_SERVICE_WITH_SHOW, service.name, - service.threadable, service.minCores, service.maxCores, - service.minMemory, service.minGpus, service.maxGpus, service.minGpuMemory, joinTags(service.tags), - service.timeout, service.timeout_llu, service.minMemoryIncrease, - service.getId()); - } - - @Override - public void delete(ServiceEntity service) { - getJdbcTemplate().update( - "DELETE FROM service WHERE pk_service=?", service.getId()); - } - - @Override - public void delete(ServiceOverrideEntity service) { - getJdbcTemplate().update( - "DELETE FROM show_service WHERE pk_show_service=?", - service.getId()); - } + }; + + private static final String QUERY_FOR_SERVICE = + "SELECT " + "service.pk_service," + "service.str_name," + "service.b_threadable," + + "service.int_cores_min," + "service.int_cores_max," + "service.int_mem_min," + + "service.int_gpus_min," + "service.int_gpus_max," + "service.int_gpu_mem_min," + + "service.str_tags, " + "service.int_timeout, " + "service.int_timeout_llu, " + + "service.int_min_memory_increase " + "FROM " + "service "; + + @Override + public ServiceEntity get(String id) { + return getJdbcTemplate().queryForObject( + QUERY_FOR_SERVICE + " WHERE (pk_service=? OR str_name=?)", SERVICE_MAPPER, id, id); + } + + private static final String QUERY_FOR_SERVICE_OVER = "SELECT " + "show_service.pk_show_service," + + "show_service.str_name," + "show_service.b_threadable," + "show_service.int_cores_min," + + "show_service.int_cores_max, " + "show_service.int_mem_min," + "show_service.int_gpus_min," + + "show_service.int_gpus_max, " + "show_service.int_gpu_mem_min," + "show_service.str_tags," + + "show_service.int_timeout," + "show_service.int_timeout_llu," + + "show_service.int_min_memory_increase," + "show.pk_show " + "FROM " + "show_service," + + "show " + "WHERE " + "show_service.pk_show = show.pk_show "; + + @Override + public ServiceOverrideEntity getOverride(String id, String show) { + return getJdbcTemplate().queryForObject( + QUERY_FOR_SERVICE_OVER + " AND (show_service.pk_show_service=? OR show_service.str_name=?)" + + " AND (show.str_name=? OR show.pk_show=?)", + SERVICE_OVERRIDE_MAPPER, id, id, show, show); + } + + @Override + public ServiceOverrideEntity getOverride(String id) { + return getJdbcTemplate().queryForObject(QUERY_FOR_SERVICE_OVER + + " AND (show_service.pk_show_service=? " + "OR show_service.str_name=?)", + SERVICE_OVERRIDE_MAPPER, id, id); + } + + @Override + public boolean isOverridden(String service, String show) { + return getJdbcTemplate().queryForObject("SELECT COUNT(1) FROM show_service, show WHERE " + + "show_service.pk_show = show.pk_show = ? " + + "AND show_service.str_name=? and show.str_name=?", Integer.class, service, show) > 0; + } + + private static final String INSERT_SERVICE = "INSERT INTO " + "service " + "(" + "pk_service," + + "str_name," + "b_threadable," + "int_cores_min," + "int_cores_max, " + "int_mem_min," + + "int_gpus_min," + "int_gpus_max, " + "int_gpu_mem_min," + "str_tags," + "int_timeout," + + "int_timeout_llu, " + "int_min_memory_increase " + ") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)"; + + @Override + public void insert(ServiceEntity service) { + service.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_SERVICE, service.id, service.name, service.threadable, + service.minCores, service.maxCores, service.minMemory, service.minGpus, service.maxGpus, + service.minGpuMemory, StringUtils.join(service.tags.toArray(), " | "), service.timeout, + service.timeout_llu, service.minMemoryIncrease); + } + + private static final String INSERT_SERVICE_WITH_SHOW = "INSERT INTO " + "show_service " + "(" + + "pk_show_service," + "pk_show, " + "str_name," + "b_threadable," + "int_cores_min," + + "int_cores_max," + "int_mem_min," + "int_gpus_min," + "int_gpus_max," + "int_gpu_mem_min," + + "str_tags," + "int_timeout," + "int_timeout_llu, " + "int_min_memory_increase " + + ") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + + @Override + public void insert(ServiceOverrideEntity service) { + service.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_SERVICE_WITH_SHOW, service.id, service.showId, service.name, + service.threadable, service.minCores, service.maxCores, service.minMemory, service.minGpus, + service.maxGpus, service.minGpuMemory, joinTags(service.tags), service.timeout, + service.timeout_llu, service.minMemoryIncrease); + } + + private static final String UPDATE_SERVICE = + "UPDATE " + "service " + "SET " + "str_name=?," + "b_threadable=?," + "int_cores_min=?," + + "int_cores_max=?," + "int_mem_min=?," + "int_gpus_min=?," + "int_gpus_max=?," + + "int_gpu_mem_min=?," + "str_tags=?," + "int_timeout=?," + "int_timeout_llu=?, " + + "int_min_memory_increase=? " + "WHERE " + "pk_service = ?"; + + @Override + public void update(ServiceEntity service) { + getJdbcTemplate().update(UPDATE_SERVICE, service.name, service.threadable, service.minCores, + service.maxCores, service.minMemory, service.minGpus, service.maxGpus, service.minGpuMemory, + joinTags(service.tags), service.timeout, service.timeout_llu, service.minMemoryIncrease, + service.getId()); + } + + private static final String UPDATE_SERVICE_WITH_SHOW = + "UPDATE " + "show_service " + "SET " + "str_name=?," + "b_threadable=?," + "int_cores_min=?," + + "int_cores_max=?," + "int_mem_min=?," + "int_gpus_min=?," + "int_gpus_max=?," + + "int_gpu_mem_min=?," + "str_tags=?," + "int_timeout=?," + "int_timeout_llu=?, " + + "int_min_memory_increase=? " + "WHERE " + "pk_show_service = ?"; + + @Override + public void update(ServiceOverrideEntity service) { + getJdbcTemplate().update(UPDATE_SERVICE_WITH_SHOW, service.name, service.threadable, + service.minCores, service.maxCores, service.minMemory, service.minGpus, service.maxGpus, + service.minGpuMemory, joinTags(service.tags), service.timeout, service.timeout_llu, + service.minMemoryIncrease, service.getId()); + } + + @Override + public void delete(ServiceEntity service) { + getJdbcTemplate().update("DELETE FROM service WHERE pk_service=?", service.getId()); + } + + @Override + public void delete(ServiceOverrideEntity service) { + getJdbcTemplate().update("DELETE FROM show_service WHERE pk_show_service=?", service.getId()); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ShowDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ShowDaoJdbc.java index 86e126559..e8578752b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ShowDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ShowDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -40,237 +36,174 @@ import com.imageworks.spcue.util.SqlUtil; public class ShowDaoJdbc extends JdbcDaoSupport implements ShowDao { - @Autowired - private Environment env; - - private static final RowMapper SHOW_MAPPER = - new RowMapper() { - public ShowEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - ShowEntity show = new ShowEntity(); - show.name = rs.getString("str_name"); - show.id = rs.getString("pk_show"); - show.defaultMaxCores = rs.getInt("int_default_max_cores"); - show.defaultMinCores = rs.getInt("int_default_min_cores"); - show.defaultMaxGpus = rs.getInt("int_default_max_gpus"); - show.defaultMinGpus = rs.getInt("int_default_min_gpus"); - show.active = rs.getBoolean("b_active"); - - if (rs.getString("str_comment_email") != null) { - show.commentMail = rs.getString("str_comment_email").split(","); - } - else { - show.commentMail = new String[0]; - } - return show; - } - }; - - private static final String GET_SHOW = - "SELECT " + - "show.pk_show, " + - "show.int_default_max_cores, " + - "show.int_default_min_cores, " + - "show.int_default_max_gpus, " + - "show.int_default_min_gpus, " + - "show.str_name, " + - "show.b_active, " + - "show.str_comment_email " + - "FROM " + - "show "; - - private static final String GET_SHOW_BY_ALIAS = - "SELECT " + - "show.pk_show, " + - "show.int_default_max_cores, " + - "show.int_default_min_cores, " + - "show.int_default_max_gpus, " + - "show.int_default_min_gpus, " + - "show_alias.str_name, " + - "show.b_active, " + - "show.str_comment_email " + - "FROM " + - "show, " + - "show_alias " + - "WHERE " + - "show.pk_show = show_alias.pk_show " ; - - public ShowEntity findShowDetail(String name) { - try { - return getJdbcTemplate().queryForObject(GET_SHOW + "WHERE show.str_name=?", - SHOW_MAPPER, name); - } catch (EmptyResultDataAccessException e) { - return getJdbcTemplate().queryForObject(GET_SHOW_BY_ALIAS + "AND show_alias.str_name = ?", - SHOW_MAPPER, name); - } - } - - public ShowEntity getShowDetail(String id) { - return getJdbcTemplate().queryForObject( - GET_SHOW + "WHERE show.pk_show=?", SHOW_MAPPER, id); - } - - private static final String GET_PREFERRED_SHOW = - "SELECT " + - "show.pk_show, " + - "show.int_default_max_cores, " + - "show.int_default_min_cores, " + - "show.int_default_max_gpus, " + - "show.int_default_min_gpus, " + - "show.str_name, " + - "show.b_active, " + - "show.str_comment_email " + - "FROM " + - "show, "+ - "owner,"+ - "deed " + - "WHERE " + - "show.pk_show = owner.pk_show " + - "AND " + - "deed.pk_owner = owner.pk_owner " + - "AND " + - "deed.pk_host = ?"; - - public ShowEntity getShowDetail(HostInterface host) { - return getJdbcTemplate().queryForObject( - GET_PREFERRED_SHOW, SHOW_MAPPER, host.getHostId()); - } - - private static final String INSERT_SHOW = - "INSERT INTO show (pk_show,str_name) VALUES (?,?)"; - - private static final String INSERT_SHOW_STATS = - "INSERT INTO show_stats " + - "(pk_show, int_frame_insert_count, int_job_insert_count, int_frame_success_count, int_frame_fail_count) " + - "VALUES (?, 0, 0, 0, 0)"; - - public void insertShow(ShowEntity show) { - show.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_SHOW, show.id, show.name); - getJdbcTemplate().update(INSERT_SHOW_STATS, show.id); - } - - private static final String SHOW_EXISTS = - "SELECT " + - "COUNT(show.pk_show) " + - "FROM " + - "show LEFT JOIN show_alias ON (show.pk_show = show_alias.pk_show) " + - "WHERE " + - "(show.str_name = ? OR show_alias.str_name = ?) "; - public boolean showExists(String name) { - try { - return getJdbcTemplate().queryForObject(SHOW_EXISTS, - Integer.class, name, name) >= 1; - } catch (DataAccessException e) { - return false; - } - } - - @Override - public void delete(ShowInterface s) { - getJdbcTemplate().update("DELETE FROM point WHERE pk_show=?", - s.getShowId()); - getJdbcTemplate().update("DELETE FROM folder WHERE pk_show=?", - s.getShowId()); - getJdbcTemplate().update("DELETE FROM folder WHERE pk_show=?", - s.getShowId()); - getJdbcTemplate().update("DELETE FROM show_alias WHERE pk_show=?", - s.getShowId()); - getJdbcTemplate().update("DELETE FROM show_stats WHERE pk_show=?", - s.getShowId()); - getJdbcTemplate().update("DELETE FROM show WHERE pk_show=?", - s.getShowId()); - } - - public void updateShowDefaultMinCores(ShowInterface s, int val) { - if (val < 0) { - String msg = "Invalid argument, default min cores " + val + - "must be greater tham 0"; - throw new IllegalArgumentException(msg); - } - getJdbcTemplate().update( - "UPDATE show SET int_default_min_cores=? WHERE pk_show=?", - val, s.getShowId()); - } - - public void updateShowDefaultMaxCores(ShowInterface s, int val) { - if (val < 0) { - String msg = "Invalid argument, default max cores " + val + - "must be greater tham 0"; - throw new IllegalArgumentException(msg); - } - getJdbcTemplate().update( - "UPDATE show SET int_default_max_cores=? WHERE pk_show=?", - val, s.getShowId()); - } - - public void updateShowDefaultMinGpus(ShowInterface s, int val) { - getJdbcTemplate().update( - "UPDATE show SET int_default_min_gpus=? WHERE pk_show=?", - val, s.getShowId()); - } - - public void updateShowDefaultMaxGpus(ShowInterface s, int val) { - getJdbcTemplate().update( - "UPDATE show SET int_default_max_gpus=? WHERE pk_show=?", - val, s.getShowId()); - } - - @Override - public void updateBookingEnabled(ShowInterface s, boolean enabled) { - getJdbcTemplate().update( - "UPDATE show SET b_booking_enabled = ? WHERE pk_show=?", - enabled, s.getShowId()); - } - - @Override - public void updateDispatchingEnabled(ShowInterface s, boolean enabled) { - getJdbcTemplate().update( - "UPDATE show SET b_dispatch_enabled = ? WHERE pk_show=?", - enabled, s.getShowId()); - } - - @Override - public void updateActive(ShowInterface s, boolean enabled) { - getJdbcTemplate().update( - "UPDATE show SET b_active= ? WHERE pk_show=?", - enabled, s.getShowId()); - } - - @Override - public void updateShowCommentEmail(ShowInterface s, String[] email) { - getJdbcTemplate().update( - "UPDATE show SET str_comment_email = ? WHERE pk_show=?", - StringUtils.join(email, ","), s.getShowId()); - } - - @Override - public void updateShowsStatus() { - Stream protectedShowsRaw = Arrays - .stream(env.getProperty("protected_shows", String.class, "").split(",")); - String protectedShows = protectedShowsRaw.map(show -> "'" + show + "'") - .collect(Collectors.joining(",")); - int maxShowStaleDays = env.getProperty("max_show_stale_days", Integer.class, -1); - - if (maxShowStaleDays > 0) { - getJdbcTemplate().update("UPDATE show SET b_active=false " + - "WHERE pk_show NOT IN (SELECT pk_show " + - " FROM (SELECT pk_show, count(pk_job) FROM job_history " + - " WHERE " + - " (DATE_PART('days', NOW()) - DATE_PART('days', dt_last_modified)) < ? " + - "GROUP BY pk_show HAVING COUNT(pk_job) > 0) pk_show) " + - " AND str_name NOT IN (?)", - maxShowStaleDays, protectedShows); - } - } - - @Override - public void updateFrameCounters(ShowInterface s, int exitStatus) { - String col = "int_frame_success_count = int_frame_success_count + 1"; - if (exitStatus > 0) { - col = "int_frame_fail_count = int_frame_fail_count + 1"; - } - getJdbcTemplate().update( - "UPDATE show_stats SET " + col + " WHERE pk_show=?", s.getShowId()); - } + @Autowired + private Environment env; + + private static final RowMapper SHOW_MAPPER = new RowMapper() { + public ShowEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + ShowEntity show = new ShowEntity(); + show.name = rs.getString("str_name"); + show.id = rs.getString("pk_show"); + show.defaultMaxCores = rs.getInt("int_default_max_cores"); + show.defaultMinCores = rs.getInt("int_default_min_cores"); + show.defaultMaxGpus = rs.getInt("int_default_max_gpus"); + show.defaultMinGpus = rs.getInt("int_default_min_gpus"); + show.active = rs.getBoolean("b_active"); + + if (rs.getString("str_comment_email") != null) { + show.commentMail = rs.getString("str_comment_email").split(","); + } else { + show.commentMail = new String[0]; + } + return show; + } + }; + + private static final String GET_SHOW = + "SELECT " + "show.pk_show, " + "show.int_default_max_cores, " + "show.int_default_min_cores, " + + "show.int_default_max_gpus, " + "show.int_default_min_gpus, " + "show.str_name, " + + "show.b_active, " + "show.str_comment_email " + "FROM " + "show "; + + private static final String GET_SHOW_BY_ALIAS = + "SELECT " + "show.pk_show, " + "show.int_default_max_cores, " + "show.int_default_min_cores, " + + "show.int_default_max_gpus, " + "show.int_default_min_gpus, " + "show_alias.str_name, " + + "show.b_active, " + "show.str_comment_email " + "FROM " + "show, " + "show_alias " + + "WHERE " + "show.pk_show = show_alias.pk_show "; + + public ShowEntity findShowDetail(String name) { + try { + return getJdbcTemplate().queryForObject(GET_SHOW + "WHERE show.str_name=?", SHOW_MAPPER, + name); + } catch (EmptyResultDataAccessException e) { + return getJdbcTemplate().queryForObject(GET_SHOW_BY_ALIAS + "AND show_alias.str_name = ?", + SHOW_MAPPER, name); + } + } + + public ShowEntity getShowDetail(String id) { + return getJdbcTemplate().queryForObject(GET_SHOW + "WHERE show.pk_show=?", SHOW_MAPPER, id); + } + + private static final String GET_PREFERRED_SHOW = + "SELECT " + "show.pk_show, " + "show.int_default_max_cores, " + "show.int_default_min_cores, " + + "show.int_default_max_gpus, " + "show.int_default_min_gpus, " + "show.str_name, " + + "show.b_active, " + "show.str_comment_email " + "FROM " + "show, " + "owner," + "deed " + + "WHERE " + "show.pk_show = owner.pk_show " + "AND " + "deed.pk_owner = owner.pk_owner " + + "AND " + "deed.pk_host = ?"; + + public ShowEntity getShowDetail(HostInterface host) { + return getJdbcTemplate().queryForObject(GET_PREFERRED_SHOW, SHOW_MAPPER, host.getHostId()); + } + + private static final String INSERT_SHOW = "INSERT INTO show (pk_show,str_name) VALUES (?,?)"; + + private static final String INSERT_SHOW_STATS = "INSERT INTO show_stats " + + "(pk_show, int_frame_insert_count, int_job_insert_count, int_frame_success_count, int_frame_fail_count) " + + "VALUES (?, 0, 0, 0, 0)"; + + public void insertShow(ShowEntity show) { + show.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_SHOW, show.id, show.name); + getJdbcTemplate().update(INSERT_SHOW_STATS, show.id); + } + + private static final String SHOW_EXISTS = "SELECT " + "COUNT(show.pk_show) " + "FROM " + + "show LEFT JOIN show_alias ON (show.pk_show = show_alias.pk_show) " + "WHERE " + + "(show.str_name = ? OR show_alias.str_name = ?) "; + + public boolean showExists(String name) { + try { + return getJdbcTemplate().queryForObject(SHOW_EXISTS, Integer.class, name, name) >= 1; + } catch (DataAccessException e) { + return false; + } + } + + @Override + public void delete(ShowInterface s) { + getJdbcTemplate().update("DELETE FROM point WHERE pk_show=?", s.getShowId()); + getJdbcTemplate().update("DELETE FROM folder WHERE pk_show=?", s.getShowId()); + getJdbcTemplate().update("DELETE FROM folder WHERE pk_show=?", s.getShowId()); + getJdbcTemplate().update("DELETE FROM show_alias WHERE pk_show=?", s.getShowId()); + getJdbcTemplate().update("DELETE FROM show_stats WHERE pk_show=?", s.getShowId()); + getJdbcTemplate().update("DELETE FROM show WHERE pk_show=?", s.getShowId()); + } + + public void updateShowDefaultMinCores(ShowInterface s, int val) { + if (val < 0) { + String msg = "Invalid argument, default min cores " + val + "must be greater tham 0"; + throw new IllegalArgumentException(msg); + } + getJdbcTemplate().update("UPDATE show SET int_default_min_cores=? WHERE pk_show=?", val, + s.getShowId()); + } + + public void updateShowDefaultMaxCores(ShowInterface s, int val) { + if (val < 0) { + String msg = "Invalid argument, default max cores " + val + "must be greater tham 0"; + throw new IllegalArgumentException(msg); + } + getJdbcTemplate().update("UPDATE show SET int_default_max_cores=? WHERE pk_show=?", val, + s.getShowId()); + } + + public void updateShowDefaultMinGpus(ShowInterface s, int val) { + getJdbcTemplate().update("UPDATE show SET int_default_min_gpus=? WHERE pk_show=?", val, + s.getShowId()); + } + + public void updateShowDefaultMaxGpus(ShowInterface s, int val) { + getJdbcTemplate().update("UPDATE show SET int_default_max_gpus=? WHERE pk_show=?", val, + s.getShowId()); + } + + @Override + public void updateBookingEnabled(ShowInterface s, boolean enabled) { + getJdbcTemplate().update("UPDATE show SET b_booking_enabled = ? WHERE pk_show=?", enabled, + s.getShowId()); + } + + @Override + public void updateDispatchingEnabled(ShowInterface s, boolean enabled) { + getJdbcTemplate().update("UPDATE show SET b_dispatch_enabled = ? WHERE pk_show=?", enabled, + s.getShowId()); + } + + @Override + public void updateActive(ShowInterface s, boolean enabled) { + getJdbcTemplate().update("UPDATE show SET b_active= ? WHERE pk_show=?", enabled, s.getShowId()); + } + + @Override + public void updateShowCommentEmail(ShowInterface s, String[] email) { + getJdbcTemplate().update("UPDATE show SET str_comment_email = ? WHERE pk_show=?", + StringUtils.join(email, ","), s.getShowId()); + } + + @Override + public void updateShowsStatus() { + Stream protectedShowsRaw = + Arrays.stream(env.getProperty("protected_shows", String.class, "").split(",")); + String protectedShows = + protectedShowsRaw.map(show -> "'" + show + "'").collect(Collectors.joining(",")); + int maxShowStaleDays = env.getProperty("max_show_stale_days", Integer.class, -1); + + if (maxShowStaleDays > 0) { + getJdbcTemplate().update("UPDATE show SET b_active=false " + + "WHERE pk_show NOT IN (SELECT pk_show " + + " FROM (SELECT pk_show, count(pk_job) FROM job_history " + " WHERE " + + " (DATE_PART('days', NOW()) - DATE_PART('days', dt_last_modified)) < ? " + + "GROUP BY pk_show HAVING COUNT(pk_job) > 0) pk_show) " + " AND str_name NOT IN (?)", + maxShowStaleDays, protectedShows); + } + } + + @Override + public void updateFrameCounters(ShowInterface s, int exitStatus) { + String col = "int_frame_success_count = int_frame_success_count + 1"; + if (exitStatus > 0) { + col = "int_frame_fail_count = int_frame_fail_count + 1"; + } + getJdbcTemplate().update("UPDATE show_stats SET " + col + " WHERE pk_show=?", s.getShowId()); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/SubscriptionDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/SubscriptionDaoJdbc.java index a211dea67..08318f8c7 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/SubscriptionDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/SubscriptionDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -38,192 +34,129 @@ public class SubscriptionDaoJdbc extends JdbcDaoSupport implements SubscriptionDao { - private static final String IS_SHOW_OVER_SIZE = - "SELECT " + - "COUNT(1) " + - "FROM " + - "subscription s " + - "WHERE " + - "s.pk_show = ? " + - "AND " + - "s.pk_alloc = ? " + - "AND " + - "s.int_cores > s.int_size "; - - public boolean isShowOverSize(ShowInterface show, AllocationInterface alloc) { - try { - return getJdbcTemplate().queryForObject(IS_SHOW_OVER_SIZE, - Integer.class, show.getShowId(), alloc.getAllocationId()) > 0; - } catch (EmptyResultDataAccessException e) { - return false; - } - } + private static final String IS_SHOW_OVER_SIZE = + "SELECT " + "COUNT(1) " + "FROM " + "subscription s " + "WHERE " + "s.pk_show = ? " + "AND " + + "s.pk_alloc = ? " + "AND " + "s.int_cores > s.int_size "; - public boolean isShowOverSize(VirtualProc proc) { - try { - return getJdbcTemplate().queryForObject(IS_SHOW_OVER_SIZE, - Integer.class, proc.getShowId(), proc.getAllocationId()) > 0; - } catch (EmptyResultDataAccessException e) { - return false; - } + public boolean isShowOverSize(ShowInterface show, AllocationInterface alloc) { + try { + return getJdbcTemplate().queryForObject(IS_SHOW_OVER_SIZE, Integer.class, show.getShowId(), + alloc.getAllocationId()) > 0; + } catch (EmptyResultDataAccessException e) { + return false; } - - private static final String IS_SHOW_AT_OR_OVER_SIZE = - "SELECT " + - "COUNT(1) " + - "FROM " + - "subscription s " + - "WHERE " + - "s.pk_show = ? " + - "AND " + - "s.pk_alloc = ? " + - "AND " + - "s.int_cores >= s.int_size "; - - public boolean isShowAtOrOverSize(ShowInterface show, AllocationInterface alloc) { - try { - return getJdbcTemplate().queryForObject(IS_SHOW_AT_OR_OVER_SIZE, - Integer.class, show.getShowId(), alloc.getAllocationId()) > 0; - } catch (EmptyResultDataAccessException e) { - return false; - } + } + + public boolean isShowOverSize(VirtualProc proc) { + try { + return getJdbcTemplate().queryForObject(IS_SHOW_OVER_SIZE, Integer.class, proc.getShowId(), + proc.getAllocationId()) > 0; + } catch (EmptyResultDataAccessException e) { + return false; } - - private static final String IS_SHOW_OVER_BURST = - "SELECT " + - "COUNT(1) " + - "FROM " + - "subscription s " + - "WHERE " + - "s.pk_show = ? " + - "AND " + - "s.pk_alloc = ? " + - "AND " + - "s.int_cores + ? > s.int_burst"; - - @Override - public boolean isShowOverBurst(ShowInterface show, AllocationInterface alloc, int coreUnits) { - try { - return getJdbcTemplate().queryForObject(IS_SHOW_OVER_BURST, - Integer.class, show.getShowId(), alloc.getAllocationId(), - coreUnits) > 0; - } catch (EmptyResultDataAccessException e) { - return true; - } + } + + private static final String IS_SHOW_AT_OR_OVER_SIZE = + "SELECT " + "COUNT(1) " + "FROM " + "subscription s " + "WHERE " + "s.pk_show = ? " + "AND " + + "s.pk_alloc = ? " + "AND " + "s.int_cores >= s.int_size "; + + public boolean isShowAtOrOverSize(ShowInterface show, AllocationInterface alloc) { + try { + return getJdbcTemplate().queryForObject(IS_SHOW_AT_OR_OVER_SIZE, Integer.class, + show.getShowId(), alloc.getAllocationId()) > 0; + } catch (EmptyResultDataAccessException e) { + return false; } - - private static final String IS_SHOW_AT_OR_OVER_BURST = - "SELECT " + - "COUNT(1) " + - "FROM " + - "subscription s " + - "WHERE " + - "s.pk_show = ? " + - "AND " + - "s.pk_alloc = ? " + - "AND " + - "s.int_cores >= s.int_burst"; - - @Override - public boolean isShowAtOrOverBurst(ShowInterface show, AllocationInterface alloc) { - try { - return getJdbcTemplate().queryForObject(IS_SHOW_AT_OR_OVER_BURST, - Integer.class, show.getShowId(), alloc.getAllocationId()) > 0; - } catch (EmptyResultDataAccessException e) { - return true; - } + } + + private static final String IS_SHOW_OVER_BURST = + "SELECT " + "COUNT(1) " + "FROM " + "subscription s " + "WHERE " + "s.pk_show = ? " + "AND " + + "s.pk_alloc = ? " + "AND " + "s.int_cores + ? > s.int_burst"; + + @Override + public boolean isShowOverBurst(ShowInterface show, AllocationInterface alloc, int coreUnits) { + try { + return getJdbcTemplate().queryForObject(IS_SHOW_OVER_BURST, Integer.class, show.getShowId(), + alloc.getAllocationId(), coreUnits) > 0; + } catch (EmptyResultDataAccessException e) { + return true; } - - private static final String GET_SUB = - "SELECT " + - "subscription.pk_alloc," + - "subscription.pk_show,"+ - "subscription.int_size,"+ - "subscription.int_burst,"+ - "subscription.pk_subscription,"+ - "(alloc.str_name || '.' || show.str_name) AS str_name " + - "FROM " + - "subscription," + - "alloc," + - "show," + - "facility " + - "WHERE " + - "subscription.pk_show = show.pk_show " + - "AND " + - "subscription.pk_alloc = alloc.pk_alloc " + - "AND " + - "alloc.pk_facility = facility.pk_facility "; - - public static RowMapper SUB_MAPPER = new RowMapper() { - public SubscriptionEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - SubscriptionEntity s = new SubscriptionEntity(); - s.allocationId = rs.getString("pk_alloc"); - s.burst = rs.getInt("int_burst"); - s.size = rs.getInt("int_size"); - s.name = rs.getString("str_name"); - s.showId = rs.getString("pk_show"); - s.id = rs.getString("pk_subscription"); - return s; - } - }; - - public SubscriptionEntity getSubscriptionDetail(String id) { - return getJdbcTemplate().queryForObject( - GET_SUB + " AND pk_subscription=?", - SUB_MAPPER, id); + } + + private static final String IS_SHOW_AT_OR_OVER_BURST = + "SELECT " + "COUNT(1) " + "FROM " + "subscription s " + "WHERE " + "s.pk_show = ? " + "AND " + + "s.pk_alloc = ? " + "AND " + "s.int_cores >= s.int_burst"; + + @Override + public boolean isShowAtOrOverBurst(ShowInterface show, AllocationInterface alloc) { + try { + return getJdbcTemplate().queryForObject(IS_SHOW_AT_OR_OVER_BURST, Integer.class, + show.getShowId(), alloc.getAllocationId()) > 0; + } catch (EmptyResultDataAccessException e) { + return true; } - - private static final String INSERT_SUBSCRIPTION = - "INSERT INTO " + - "subscription " + - "( " + - "pk_subscription, pk_alloc, pk_show, int_size, int_burst"+ - ") " + - "VALUES (?,?,?,?,?)"; - - public void insertSubscription(SubscriptionEntity detail) { - detail.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_SUBSCRIPTION, - detail.id, detail.allocationId, detail.showId, detail.size, detail.burst); - } - private static final String HAS_RUNNING_PROCS = - "SELECT " + - "COUNT(1) " + - "FROM " + - "subscription s " + - "WHERE " + - "s.pk_subscription=? " + - "AND " + - "s.int_cores > 0 "; - - public boolean hasRunningProcs(SubscriptionInterface sub) { - try { - return getJdbcTemplate().queryForObject(HAS_RUNNING_PROCS, - Integer.class, sub.getSubscriptionId()) > 0; - } catch (DataAccessException e) { - return false; - } - } - - public void deleteSubscription(SubscriptionInterface sub) { - if (hasRunningProcs(sub)) { - throw new EntityModificationError("You cannot delete a subscription with running procs"); - } - getJdbcTemplate().update( - "DELETE FROM subscription WHERE pk_subscription=?", - sub.getSubscriptionId()); + } + + private static final String GET_SUB = "SELECT " + "subscription.pk_alloc," + + "subscription.pk_show," + "subscription.int_size," + "subscription.int_burst," + + "subscription.pk_subscription," + "(alloc.str_name || '.' || show.str_name) AS str_name " + + "FROM " + "subscription," + "alloc," + "show," + "facility " + "WHERE " + + "subscription.pk_show = show.pk_show " + "AND " + "subscription.pk_alloc = alloc.pk_alloc " + + "AND " + "alloc.pk_facility = facility.pk_facility "; + + public static RowMapper SUB_MAPPER = new RowMapper() { + public SubscriptionEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + SubscriptionEntity s = new SubscriptionEntity(); + s.allocationId = rs.getString("pk_alloc"); + s.burst = rs.getInt("int_burst"); + s.size = rs.getInt("int_size"); + s.name = rs.getString("str_name"); + s.showId = rs.getString("pk_show"); + s.id = rs.getString("pk_subscription"); + return s; } - - public void updateSubscriptionSize(SubscriptionInterface sub, int size) { - getJdbcTemplate().update( - "UPDATE subscription SET int_size=? WHERE pk_subscription=?", - size, sub.getSubscriptionId()); + }; + + public SubscriptionEntity getSubscriptionDetail(String id) { + return getJdbcTemplate().queryForObject(GET_SUB + " AND pk_subscription=?", SUB_MAPPER, id); + } + + private static final String INSERT_SUBSCRIPTION = "INSERT INTO " + "subscription " + "( " + + "pk_subscription, pk_alloc, pk_show, int_size, int_burst" + ") " + "VALUES (?,?,?,?,?)"; + + public void insertSubscription(SubscriptionEntity detail) { + detail.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_SUBSCRIPTION, detail.id, detail.allocationId, detail.showId, + detail.size, detail.burst); + } + + private static final String HAS_RUNNING_PROCS = "SELECT " + "COUNT(1) " + "FROM " + + "subscription s " + "WHERE " + "s.pk_subscription=? " + "AND " + "s.int_cores > 0 "; + + public boolean hasRunningProcs(SubscriptionInterface sub) { + try { + return getJdbcTemplate().queryForObject(HAS_RUNNING_PROCS, Integer.class, + sub.getSubscriptionId()) > 0; + } catch (DataAccessException e) { + return false; } + } - public void updateSubscriptionBurst(SubscriptionInterface sub, int size) { - getJdbcTemplate().update( - "UPDATE subscription SET int_burst=? WHERE pk_subscription=?", - size, sub.getSubscriptionId()); + public void deleteSubscription(SubscriptionInterface sub) { + if (hasRunningProcs(sub)) { + throw new EntityModificationError("You cannot delete a subscription with running procs"); } + getJdbcTemplate().update("DELETE FROM subscription WHERE pk_subscription=?", + sub.getSubscriptionId()); + } + + public void updateSubscriptionSize(SubscriptionInterface sub, int size) { + getJdbcTemplate().update("UPDATE subscription SET int_size=? WHERE pk_subscription=?", size, + sub.getSubscriptionId()); + } + + public void updateSubscriptionBurst(SubscriptionInterface sub, int size) { + getJdbcTemplate().update("UPDATE subscription SET int_burst=? WHERE pk_subscription=?", size, + sub.getSubscriptionId()); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/TaskDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/TaskDaoJdbc.java index 8f2b96068..330151240 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/TaskDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/TaskDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -41,219 +37,160 @@ */ public class TaskDaoJdbc extends JdbcDaoSupport implements TaskDao { - @Override - public void deleteTasks(PointInterface cdept) { - getJdbcTemplate().update( - "DELETE FROM task WHERE pk_point=?", - cdept.getPointId()); + @Override + public void deleteTasks(PointInterface cdept) { + getJdbcTemplate().update("DELETE FROM task WHERE pk_point=?", cdept.getPointId()); + } + + @Override + public void deleteTasks(ShowInterface show, DepartmentInterface dept) { + getJdbcTemplate().update("DELETE FROM task WHERE pk_show=? AND pk_dept=?", show.getShowId(), + dept.getDepartmentId()); + } + + @Override + public void deleteTask(TaskInterface task) { + getJdbcTemplate().update("DELETE FROM task WHERE pk_task=?", task.getId()); + } + + @Override + public boolean isManaged(TaskInterface t) { + try { + return getJdbcTemplate().queryForObject( + "SELECT b_managed FROM point WHERE pk_show=? and pk_dept=?", Integer.class, t.getShowId(), + t.getDepartmentId()) == 1; + } catch (org.springframework.dao.DataRetrievalFailureException e) { + return false; } - - @Override - public void deleteTasks(ShowInterface show, DepartmentInterface dept) { - getJdbcTemplate().update( - "DELETE FROM task WHERE pk_show=? AND pk_dept=?", - show.getShowId(), dept.getDepartmentId()); + } + + private static final String INSERT_TASK = "INSERT INTO " + "task " + "( " + "pk_task," + + "pk_point," + "str_shot," + "int_min_cores" + ") " + "VALUES (?,?,?,?)"; + + @Override + public void insertTask(TaskEntity task) { + task.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_TASK, task.id, task.getPointId(), task.shot, task.minCoreUnits); + } + + private static final String GET_TASK_DETAIL = + "SELECT " + "point.pk_dept," + "point.pk_show," + "point.pk_point," + "task.pk_task," + + "task.int_min_cores + task.int_adjust_cores AS int_min_cores," + "task.str_shot," + + "(task.str_shot || '.' || dept.str_name) AS str_name " + "FROM " + "point," + "task," + + "dept, " + "show " + "WHERE " + "point.pk_dept = dept.pk_dept " + "AND " + + "point.pk_show = show.pk_show " + "AND " + "point.pk_point = task.pk_point "; + + public static final RowMapper TASK_DETAIL_MAPPER = new RowMapper() { + public TaskEntity mapRow(ResultSet rs, int row) throws SQLException { + TaskEntity t = new TaskEntity(); + t.pointId = rs.getString("pk_point"); + t.deptId = rs.getString("pk_dept"); + t.showId = rs.getString("pk_show"); + t.id = rs.getString("pk_task"); + t.minCoreUnits = rs.getInt("int_min_cores"); + t.name = rs.getString("str_name"); + t.shot = rs.getString("str_shot"); + return t; } - - @Override - public void deleteTask(TaskInterface task) { - getJdbcTemplate().update( - "DELETE FROM task WHERE pk_task=?", - task.getId()); + }; + + @Override + public TaskEntity getTaskDetail(String id) { + return getJdbcTemplate().queryForObject(GET_TASK_DETAIL + " AND task.pk_task=?", + TASK_DETAIL_MAPPER, id); + } + + @Override + public TaskEntity getTaskDetail(DepartmentInterface d, String shot) { + return getJdbcTemplate().queryForObject( + GET_TASK_DETAIL + " AND point.pk_dept = ? AND task.str_shot = ?", TASK_DETAIL_MAPPER, + d.getDepartmentId(), shot); + } + + @Override + public TaskEntity getTaskDetail(JobInterface j) { + Map map = getJdbcTemplate() + .queryForMap("SELECT pk_dept, str_shot FROM job WHERE job.pk_job=?", j.getJobId()); + + return getJdbcTemplate().queryForObject( + GET_TASK_DETAIL + " AND task.str_shot = ? AND point.pk_dept = ?", TASK_DETAIL_MAPPER, + map.get("str_shot").toString(), map.get("pk_dept").toString()); + } + + public void updateTaskMinCores(TaskInterface t, int value) { + if (value < 0) { + throw new IllegalArgumentException("min cores must be greater than or equal to 0"); } - - @Override - public boolean isManaged(TaskInterface t) { - try { - return getJdbcTemplate().queryForObject("SELECT b_managed FROM point WHERE pk_show=? and pk_dept=?", - Integer.class, t.getShowId(), t.getDepartmentId()) == 1; - } catch (org.springframework.dao.DataRetrievalFailureException e) { - return false; - } + getJdbcTemplate().update("UPDATE task SET int_min_cores=? WHERE pk_task=?", value, + t.getTaskId()); + } + + @Override + public void adjustTaskMinCores(TaskInterface t, int value) { + if (value < 0) { + throw new IllegalArgumentException("min cores must be greater than or equal to 0"); } - - private static final String INSERT_TASK = - "INSERT INTO " + - "task " + - "( " + - "pk_task,"+ - "pk_point,"+ - "str_shot," + - "int_min_cores" + - ") " + - "VALUES (?,?,?,?)"; - - @Override - public void insertTask(TaskEntity task) { - task.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_TASK, - task.id, task.getPointId(), task.shot, task.minCoreUnits); + getJdbcTemplate().update("UPDATE task SET int_adjust_cores = ? - int_min_cores WHERE pk_task=?", + value, t.getTaskId()); + } + + @Override + public void mergeTask(TaskEntity t) { + String pkTask = null; + try { + pkTask = + getJdbcTemplate() + .queryForObject( + "SELECT task.pk_task FROM task, point WHERE task.pk_point = point.pk_point AND " + + "task.str_shot = ? AND point.pk_point=?", + String.class, t.shot, t.getPointId()); + + } catch (EmptyResultDataAccessException dae) { + // Eat this, its possible that no task exists } - private static final String GET_TASK_DETAIL = - "SELECT " + - "point.pk_dept,"+ - "point.pk_show,"+ - "point.pk_point,"+ - "task.pk_task," + - "task.int_min_cores + task.int_adjust_cores AS int_min_cores,"+ - "task.str_shot,"+ - "(task.str_shot || '.' || dept.str_name) AS str_name " + - "FROM " + - "point,"+ - "task,"+ - "dept, "+ - "show "+ - "WHERE " + - "point.pk_dept = dept.pk_dept "+ - "AND " + - "point.pk_show = show.pk_show " + - "AND " + - "point.pk_point = task.pk_point "; - - public static final RowMapper TASK_DETAIL_MAPPER = - new RowMapper() { - public TaskEntity mapRow(ResultSet rs, int row) throws SQLException { - TaskEntity t = new TaskEntity(); - t.pointId = rs.getString("pk_point"); - t.deptId = rs.getString("pk_dept"); - t.showId = rs.getString("pk_show"); - t.id = rs.getString("pk_task"); - t.minCoreUnits = rs.getInt("int_min_cores"); - t.name = rs.getString("str_name"); - t.shot = rs.getString("str_shot"); - return t; - } - }; - - @Override - public TaskEntity getTaskDetail(String id) { - return getJdbcTemplate().queryForObject( - GET_TASK_DETAIL + " AND task.pk_task=?", - TASK_DETAIL_MAPPER, id); + // No need to do anything with this task. + if (pkTask == null && t.minCoreUnits == 0) { + return; } - @Override - public TaskEntity getTaskDetail(DepartmentInterface d, String shot) { - return getJdbcTemplate().queryForObject( - GET_TASK_DETAIL + " AND point.pk_dept = ? AND task.str_shot = ?", - TASK_DETAIL_MAPPER, d.getDepartmentId(), shot); + if (t.minCoreUnits == 0) { + getJdbcTemplate().update("DELETE FROM task WHERE pk_point=? AND str_shot=? ", t.getPointId(), + t.shot); + } else if (getJdbcTemplate().update( + "UPDATE task SET int_min_cores=? WHERE pk_point=? AND str_shot=?", t.minCoreUnits, + t.getPointId(), t.shot) == 0) { + try { + insertTask(t); + } catch (org.springframework.dao.DataIntegrityViolationException e) { + logger.warn("error inserting task " + t.shot + "," + e); + } } + } - @Override - public TaskEntity getTaskDetail(JobInterface j) { - Map map = getJdbcTemplate().queryForMap( - "SELECT pk_dept, str_shot FROM job WHERE job.pk_job=?", j.getJobId()); - - return getJdbcTemplate().queryForObject( - GET_TASK_DETAIL + " AND task.str_shot = ? AND point.pk_dept = ?", - TASK_DETAIL_MAPPER, map.get("str_shot").toString(), map.get("pk_dept").toString()); - } + private static final String CLEAR_TASK_ADJUSTMENTS = "UPDATE " + "task " + "SET " + + "int_adjust_cores = 0 " + "WHERE " + "pk_show=? " + "AND " + "pk_dept = ? "; - public void updateTaskMinCores(TaskInterface t, int value) { - if (value < 0) { - throw new IllegalArgumentException("min cores must be greater than or equal to 0"); - } - getJdbcTemplate().update( - "UPDATE task SET int_min_cores=? WHERE pk_task=?", - value, t.getTaskId()); - } + @Override + public void clearTaskAdjustments(PointInterface cdept) { + getJdbcTemplate().update(CLEAR_TASK_ADJUSTMENTS, cdept.getShowId(), cdept.getDepartmentId()); + } - @Override - public void adjustTaskMinCores(TaskInterface t, int value) { - if (value < 0) { - throw new IllegalArgumentException("min cores must be greater than or equal to 0"); - } - getJdbcTemplate().update( - "UPDATE task SET int_adjust_cores = ? - int_min_cores WHERE pk_task=?", - value, t.getTaskId()); - } + private static final String CLEAR_TASK_ADJUSTMENT = + "UPDATE " + "task " + "SET " + "int_adjust_cores = 0 " + "WHERE " + "pk_task=?"; - @Override - public void mergeTask(TaskEntity t) { - String pkTask = null; - try { - pkTask = getJdbcTemplate().queryForObject( - "SELECT task.pk_task FROM task, point WHERE task.pk_point = point.pk_point AND " + - "task.str_shot = ? AND point.pk_point=?", String.class, - t.shot, t.getPointId()); + @Override + public void clearTaskAdjustment(TaskInterface t) { + getJdbcTemplate().update(CLEAR_TASK_ADJUSTMENT, t.getTaskId()); + } - } catch (EmptyResultDataAccessException dae) { - // Eat this, its possible that no task exists - } + private static final String IS_JOB_MANAGED = "SELECT " + "COUNT(1) " + "FROM " + "job," + "task," + + "point " + "WHERE " + "job.pk_show = point.pk_show " + "AND " + + "job.pk_dept = point.pk_dept " + "AND " + "task.pk_point = point.pk_point " + "AND " + + "task.str_shot = job.str_shot " + "AND " + "job.pk_job = ?"; - // No need to do anything with this task. - if (pkTask == null && t.minCoreUnits == 0) { - return; - } - - if (t.minCoreUnits == 0) { - getJdbcTemplate().update("DELETE FROM task WHERE pk_point=? AND str_shot=? ", - t.getPointId(), t.shot); - } - else if (getJdbcTemplate().update( - "UPDATE task SET int_min_cores=? WHERE pk_point=? AND str_shot=?", - t.minCoreUnits, t.getPointId(), t.shot) == 0) { - try { - insertTask(t); - } - catch (org.springframework.dao.DataIntegrityViolationException e) { - logger.warn("error inserting task " + t.shot + "," + e); - } - } - } - - private static final String CLEAR_TASK_ADJUSTMENTS = - "UPDATE " + - "task " + - "SET " + - "int_adjust_cores = 0 " + - "WHERE " + - "pk_show=? " + - "AND " + - "pk_dept = ? "; - - @Override - public void clearTaskAdjustments(PointInterface cdept) { - getJdbcTemplate().update(CLEAR_TASK_ADJUSTMENTS, - cdept.getShowId(), cdept.getDepartmentId()); - } - - private static final String CLEAR_TASK_ADJUSTMENT = - "UPDATE " + - "task " + - "SET " + - "int_adjust_cores = 0 " + - "WHERE " + - "pk_task=?"; - - @Override - public void clearTaskAdjustment(TaskInterface t) { - getJdbcTemplate().update(CLEAR_TASK_ADJUSTMENT, t.getTaskId()); - } - - private static final String IS_JOB_MANAGED = - "SELECT " + - "COUNT(1) " + - "FROM " + - "job,"+ - "task,"+ - "point " + - "WHERE " + - "job.pk_show = point.pk_show " + - "AND " + - "job.pk_dept = point.pk_dept " + - "AND " + - "task.pk_point = point.pk_point " + - "AND " + - "task.str_shot = job.str_shot " + - "AND " + - "job.pk_job = ?"; - - @Override - public boolean isManaged(JobInterface j) { - return getJdbcTemplate().queryForObject(IS_JOB_MANAGED, - Integer.class, j.getJobId()) > 0; - } + @Override + public boolean isManaged(JobInterface j) { + return getJdbcTemplate().queryForObject(IS_JOB_MANAGED, Integer.class, j.getJobId()) > 0; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/WhiteboardDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/WhiteboardDaoJdbc.java index 1bc6bed59..31338b5a0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/WhiteboardDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/WhiteboardDaoJdbc.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dao.postgres; import java.sql.ResultSet; @@ -130,2226 +126,1557 @@ import com.imageworks.spcue.util.SqlUtil; public class WhiteboardDaoJdbc extends JdbcDaoSupport implements WhiteboardDao { - @SuppressWarnings("unused") - private static final Logger logger = LogManager.getLogger(WhiteboardDaoJdbc.class); - - private FrameSearchFactory frameSearchFactory; - private ProcSearchFactory procSearchFactory; - - @Override - public Service getService(String id) { - return getJdbcTemplate().queryForObject( - GET_SERVICE + " WHERE (pk_service=? or str_name=?)", - SERVICE_MAPPER, id, id); - } - - @Override - public Service findService(String name) { - return getJdbcTemplate().queryForObject( - GET_SERVICE + " WHERE service.str_name=?", - SERVICE_MAPPER, name); - } - @Override - public ServiceSeq getDefaultServices() { - List services = getJdbcTemplate().query(GET_SERVICE, SERVICE_MAPPER); - return ServiceSeq.newBuilder().addAllServices(services).build(); - } - - @Override - public ServiceOverrideSeq getServiceOverrides(ShowInterface show) { - return ServiceOverrideSeq.newBuilder().addAllServiceOverrides(getJdbcTemplate().query( - GET_SERVICE_OVERRIDE + " AND show_service.pk_show = ?", - SERVICE_OVERRIDE_MAPPER, show.getId())).build(); - } - - @Override - public ServiceOverride getServiceOverride(ShowInterface show, String name) { - return getJdbcTemplate().queryForObject ( - GET_SERVICE_OVERRIDE + - " AND show_service.pk_show=? AND (show_service.str_name=? OR" + - " show_service.pk_show_service=?)", - SERVICE_OVERRIDE_MAPPER, show.getId(), name, name); - } - - @Override - public Filter getFilter(FilterInterface filter) { - return getJdbcTemplate().queryForObject(GET_FILTER + " AND pk_filter=?", - FILTER_MAPPER, filter.getFilterId()); - } - - @Override - public Filter findFilter(ShowInterface show, String name) { - return getJdbcTemplate().queryForObject( - GET_FILTER + " AND filter.pk_show=? AND filter.str_name=?", - FILTER_MAPPER, show.getShowId(), name); - } - - @Override - public Filter findFilter(String show, String name) { - return getJdbcTemplate().queryForObject( - GET_FILTER + " AND show.str_name=? AND filter.str_name=?", - FILTER_MAPPER, show, name); - } - - @Override - public FilterSeq getFilters(ShowInterface show) { - return FilterSeq.newBuilder().addAllFilters(getJdbcTemplate().query( - GET_FILTER + " AND show.pk_show=? ORDER BY f_order ASC", - FILTER_MAPPER, show.getShowId())).build(); - } - - @Override - public ActionSeq getActions(FilterInterface filter) { - return ActionSeq.newBuilder().addAllActions(getJdbcTemplate().query( - GET_ACTION + " AND filter.pk_filter=? ORDER BY b_stop ASC, ts_created ASC ", - ACTION_MAPPER, filter.getFilterId())).build(); - } - - @Override - public MatcherSeq getMatchers(FilterInterface filter) { - return MatcherSeq.newBuilder().addAllMatchers(getJdbcTemplate().query( - GET_MATCHER + " AND filter.pk_filter=? ORDER BY ts_created ASC", - MATCHER_MAPPER, filter.getFilterId())).build(); - } - - @Override - public Action getAction(ActionInterface action) { - return getJdbcTemplate().queryForObject( - GET_ACTION + " AND action.pk_action=?", - ACTION_MAPPER, action.getActionId()); - } - - @Override - public Matcher getMatcher(MatcherInterface matcher) { - return getJdbcTemplate().queryForObject( - GET_MATCHER + " AND matcher.pk_matcher=?", - MATCHER_MAPPER, matcher.getMatcherId()); - } - - @Override - public Show getShow(String id) { - return getJdbcTemplate().queryForObject( - GET_SHOW + " AND show.pk_show=?", - SHOW_MAPPER, id); - } - - @Override - public ShowSeq getShows() { - List shows = getJdbcTemplate().query(GET_SHOW, SHOW_MAPPER); - return ShowSeq.newBuilder().addAllShows(shows).build(); - } - - @Override - public ShowSeq getActiveShows() { - List shows = getJdbcTemplate().query(GET_SHOW + " AND b_active=?", - SHOW_MAPPER, true); - return ShowSeq.newBuilder().addAllShows(shows).build(); - } - - @Override - public Show findShow(String name) { - return getJdbcTemplate().queryForObject(GET_SHOW + " AND show.str_name=?", - SHOW_MAPPER, name); - } - - @Override - public Subscription getSubscription(String id) { - return getJdbcTemplate().queryForObject( - GET_SUBSCRIPTION + " AND subscription.pk_subscription=?", - SUBSCRIPTION_MAPPER, id); - } - - @Override - public Subscription findSubscription(String show, String alloc) { - return getJdbcTemplate().queryForObject( - GET_SUBSCRIPTION + - " AND show.str_name=? AND alloc.str_name=?", - SUBSCRIPTION_MAPPER, show, alloc); - } - - @Override - public SubscriptionSeq getSubscriptions(ShowInterface show) { - List subscriptions = getJdbcTemplate().query( - GET_SUBSCRIPTION + " AND show.pk_show=?", - SUBSCRIPTION_MAPPER, show.getShowId()); - return SubscriptionSeq.newBuilder().addAllSubscriptions(subscriptions).build(); - } - - @Override - public SubscriptionSeq getSubscriptions(AllocationInterface alloc) { - List subscriptions = getJdbcTemplate().query( - GET_SUBSCRIPTION + " AND subscription.pk_alloc=?", - SUBSCRIPTION_MAPPER, alloc.getAllocationId()); - return SubscriptionSeq.newBuilder().addAllSubscriptions(subscriptions).build(); - } - - @Override - public Allocation findAllocation(String name) { - return getJdbcTemplate().queryForObject( - GET_ALLOCATION + " AND alloc.str_name=?", - ALLOCATION_MAPPER, name); - } - - @Override - public Allocation getAllocation(String id) { - return getJdbcTemplate().queryForObject(GET_ALLOCATION + " AND alloc.pk_alloc=?", - ALLOCATION_MAPPER, id); - } - - @Override - public AllocationSeq getAllocations() { - return AllocationSeq.newBuilder().addAllAllocations(getJdbcTemplate().query( - GET_ALLOCATION + " ORDER BY alloc.str_name ", - ALLOCATION_MAPPER)).build(); - } - - @Override - public AllocationSeq getAllocations( - com.imageworks.spcue.FacilityInterface facility) { - return AllocationSeq.newBuilder().addAllAllocations(getJdbcTemplate().query( - GET_ALLOCATION + " AND alloc.pk_facility = ?", - ALLOCATION_MAPPER, facility.getFacilityId())).build(); - } - - @Override - public JobSeq getJobs(GroupInterface group) { - List jobs = getJdbcTemplate().query( - GET_PENDING_JOBS + " AND job.pk_folder=? ORDER BY job.str_name ASC", - JOB_MAPPER, group.getId()); - return JobSeq.newBuilder().addAllJobs(jobs).build(); - } - - @Override - public List getJobNames(JobSearchInterface r) { - return getJdbcTemplate().query(r.getFilteredQuery(GET_JOB_NAMES), - new RowMapper() { - public String mapRow(ResultSet rs, int rowNum) throws SQLException { - return rs.getString(1); - } - }, r.getValuesArray()); - } - - @Override - public JobSeq getJobs(JobSearchInterface r) { - List jobs = getJdbcTemplate().query( - r.getFilteredQuery(GET_JOB) + "ORDER BY job.str_name ASC", JOB_MAPPER, r.getValuesArray()); - return JobSeq.newBuilder().addAllJobs(jobs).build(); - } - - @Override - public Job findJob(String name) { - return getJdbcTemplate().queryForObject( - GET_PENDING_JOBS + " AND job.str_name=?", - JOB_MAPPER, name.toLowerCase()); - } - - @Override - public Job getJob(String id) { - return getJdbcTemplate().queryForObject( - GET_JOB + " AND job.pk_job=?", - JOB_MAPPER, id); - } - - @Override - public Layer getLayer(String id) { - return getJdbcTemplate().queryForObject( - GET_LAYER_WITH_LIMITS + " WHERE layer.pk_layer=?", - LAYER_MAPPER, id); - } - - @Override - public Layer findLayer(String job, String layer) { - return getJdbcTemplate().queryForObject( - GET_LAYER_WITH_LIMITS + " WHERE job.str_state='PENDING' AND job.str_name=? AND layer.str_name=?", - LAYER_MAPPER, job, layer); - } - - @Override - public LayerSeq getLayers(JobInterface job) { - String query = GET_LAYER_WITH_LIMITS + " WHERE layer.pk_job=? ORDER BY layer.int_dispatch_order ASC"; - List layers = getJdbcTemplate().query( - query, LAYER_MAPPER, job.getJobId()); - return LayerSeq.newBuilder().addAllLayers(layers).build(); - } - - public Layer addLimitNames(Layer layer) { - return layer.toBuilder().addAllLimits(getLimitNames(layer.getId())).build(); - } - - public List getLimitNames(String layerId) { - return getJdbcTemplate().query(GET_LIMIT_NAMES, - LIMIT_NAME_MAPPER, layerId); - } - - @Override - public List getLimits(LayerInterface layer) { - List limits = getJdbcTemplate().query( - GET_LIMIT_FROM_LAYER_ID, LIMIT_MAPPER, layer.getLayerId()); - return limits; - } - - @Override - public GroupSeq getGroups(ShowInterface show) { - List groups = getJdbcTemplate().query( - GET_GROUPS + " AND folder.pk_show=? ORDER BY folder_level.int_level ASC, folder.str_name ASC ", - GROUP_MAPPER, show.getShowId()); - return GroupSeq.newBuilder().addAllGroups(groups).build(); - } - - @Override - public GroupSeq getGroups(GroupInterface group) { - List groups = getJdbcTemplate().query( - GET_GROUPS + " AND folder.pk_parent_folder=? ORDER BY folder_level.int_level ASC, folder.f_order DESC, folder.str_name ASC ", - GROUP_MAPPER, group.getGroupId()); - return GroupSeq.newBuilder().addAllGroups(groups).build(); - } - - @Override - public Group getGroup(String id) { - return getJdbcTemplate().queryForObject( - GET_GROUPS + " AND folder.pk_folder=?", - GROUP_MAPPER, id); - } - - @Override - public Group getRootGroup(ShowInterface show) { - return getJdbcTemplate().queryForObject( - GET_GROUPS + " AND show.pk_show=? AND folder.b_default=?", - GROUP_MAPPER, show.getShowId(), true); - } - - @Override - public Frame findFrame(String job, String layer, int frame) { - return getJdbcTemplate().queryForObject(FIND_FRAME, FRAME_MAPPER, job, layer, frame); - } - - @Override - public Frame getFrame(String id) { - return getJdbcTemplate().queryForObject( - GET_FRAME + " AND frame.pk_frame=?", FRAME_MAPPER, id); - } - - @Override - public FrameSeq getFrames(FrameSearchInterface r) { - List frames = getJdbcTemplate().query( - r.getSortedQuery(GET_FRAMES_CRITERIA), FRAME_MAPPER, r.getValuesArray()); - return FrameSeq.newBuilder().addAllFrames(frames).build(); - } - - @Override - public Depend getDepend(DependInterface depend) { - return getJdbcTemplate().queryForObject( - GET_DEPEND + " WHERE pk_depend=?",DEPEND_MAPPER, depend.getId()); - } - - @Override - public Depend getDepend(com.imageworks.spcue.depend.AbstractDepend depend) { - return getJdbcTemplate().queryForObject( - GET_DEPEND + " WHERE pk_depend=?",DEPEND_MAPPER, depend.getId()); - } - - @Override - public DependSeq getWhatDependsOnThis(JobInterface job) { - List depends = getJdbcTemplate().query( - GET_DEPEND + " WHERE pk_parent IS NULL AND pk_job_depend_on=?", - DEPEND_MAPPER, job.getJobId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - } - - @Override - public DependSeq getWhatDependsOnThis(LayerInterface layer) { - List depends = getJdbcTemplate().query( - GET_DEPEND + " WHERE pk_parent IS NULL AND pk_layer_depend_on=?", - DEPEND_MAPPER, layer.getLayerId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - - } - - @Override - public DependSeq getWhatDependsOnThis(FrameInterface frame) { - List depends = getJdbcTemplate().query( - GET_DEPEND + " WHERE pk_frame_depend_on=?", - DEPEND_MAPPER, frame.getFrameId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - } - - @Override - public DependSeq getWhatThisDependsOn(JobInterface job) { - List depends = getJdbcTemplate().query( - GET_DEPEND + " WHERE pk_parent IS NULL AND pk_layer_depend_er IS NULL AND " + - "pk_frame_depend_er IS NULL AND pk_job_depend_er=?", - DEPEND_MAPPER, job.getJobId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - } - - @Override - public DependSeq getWhatThisDependsOn(LayerInterface layer) { - List depends = getJdbcTemplate().query( - GET_DEPEND + " WHERE pk_parent IS NULL AND pk_layer_depend_er=?", - DEPEND_MAPPER, layer.getLayerId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - } - - @Override - public DependSeq getWhatThisDependsOn(FrameInterface frame) { - /* - * This should show anything that is making the frame dependent. - */ - List depends = getJdbcTemplate().query( - GET_DEPEND + " WHERE " + - "(pk_job_depend_er=? AND str_type IN ('JOB_ON_JOB','JOB_ON_LAYER','JOB_ON_FRAME')) OR " + - "(pk_layer_depend_er=? AND str_type IN ('LAYER_ON_JOB','LAYER_ON_LAYER','LAYER_ON_FRAME')) " + - "OR (pk_frame_depend_er=?)", - DEPEND_MAPPER, frame.getJobId(), frame.getLayerId(), frame.getFrameId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - } - - @Override - public DependSeq getDepends(JobInterface job) { - List depends = getJdbcTemplate().query( - GET_DEPEND + " WHERE pk_job_depend_er=? AND str_type != 'FRAME_ON_FRAME'", - DEPEND_MAPPER, job.getJobId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - } - - @Override - public Depend getDepend(String id) { - return getJdbcTemplate().queryForObject( - GET_DEPEND + " WHERE pk_depend=?",DEPEND_MAPPER,id); - } - - @Override - public Group findGroup(String show, String group) { - return getJdbcTemplate().queryForObject( - GET_GROUPS + " AND show.str_name=? AND folder.str_name=?", - GROUP_MAPPER, show, group); - } - - @Override - public Host findHost(String name) { - return getJdbcTemplate().queryForObject( - GET_HOST + " AND host.str_name=?", HOST_MAPPER, name); - } - - @Override - public HostSeq getHosts(HostSearchInterface r) { - List hosts = getJdbcTemplate().query( - r.getFilteredQuery(GET_HOST), HOST_MAPPER, r.getValuesArray()); - return HostSeq.newBuilder().addAllHosts(hosts).build(); - } - - @Override - public Host getHost(String id) { - return getJdbcTemplate().queryForObject( - GET_HOST + " AND host.pk_host=?", HOST_MAPPER, id); - } - - @Override - public ProcSeq getProcs(HostInterface host) { - ProcSearchInterface r = procSearchFactory.create(); - r.filterByHost(host); - r.sortByHostName(); - r.sortByDispatchedTime(); - return ProcSeq.newBuilder().addAllProcs(getProcs(r).getProcsList()).build(); - } - - @Override - public ProcSeq getProcs(ProcSearchInterface p) { - p.sortByHostName(); - p.sortByDispatchedTime(); - List procs = getJdbcTemplate().query(p.getFilteredQuery(GET_PROC), - PROC_MAPPER, p.getValuesArray()); - return ProcSeq.newBuilder().addAllProcs(procs).build(); - } - - @Override - public CommentSeq getComments(HostInterface h) { - List comments = getJdbcTemplate().query( - GET_HOST_COMMENTS, COMMENT_MAPPER, h.getHostId()); - return CommentSeq.newBuilder().addAllComments(comments).build(); - } - - @Override - public CommentSeq getComments(JobInterface j) { - List comments = getJdbcTemplate().query( - GET_JOB_COMMENTS, COMMENT_MAPPER, j.getJobId()); - return CommentSeq.newBuilder().addAllComments(comments).build(); - } - - @Override - public UpdatedFrameCheckResult getUpdatedFrames(JobInterface job, - List layers, int epochSeconds) { - - long timeDiff = (System.currentTimeMillis() / 1000) - epochSeconds; - if (timeDiff > 60) { - throw new IllegalArgumentException("the last update timestamp cannot be over " + - "a minute off the current time, difference was: " + timeDiff); - } - - UpdatedFrameCheckResult.Builder resultBuilder = UpdatedFrameCheckResult.newBuilder(); - resultBuilder.setState(JobState.valueOf(getJdbcTemplate().queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", String.class, job.getJobId()))); - - FrameSearchInterface r = frameSearchFactory.create(job); - r.filterByLayers(layers); - r.filterByChangeDate(epochSeconds); - r.setMaxResults(100); - - List updatedFrameList = getJdbcTemplate().query( - r.getFilteredQuery(GET_UPDATED_FRAME), UPDATED_FRAME_MAPPER, r.getValuesArray()); - resultBuilder.setUpdatedFrames(UpdatedFrameSeq.newBuilder().addAllUpdatedFrames(updatedFrameList).build()); - resultBuilder.setServerTime((int) (System.currentTimeMillis() / 1000) - 1); - - return resultBuilder.build(); - } - - @Override - public Department getDepartment(ShowInterface show, String name) { - return getJdbcTemplate().queryForObject( - GET_DEPARTMENT, DEPARTMENT_MAPPER, - show.getShowId(), name); - } - - @Override - public DepartmentSeq getDepartments (ShowInterface show) { - List departments = getJdbcTemplate().query( - GET_DEPARTMENTS, DEPARTMENT_MAPPER, - show.getShowId()); - return DepartmentSeq.newBuilder().addAllDepartments(departments).build(); - } - - @Override - public List getDepartmentNames() { - return getJdbcTemplate().query("SELECT str_name FROM dept ORDER BY str_name ASC", - new RowMapper() { - public String mapRow(ResultSet rs, int row) throws SQLException { - return rs.getString("str_name"); - } - }); - } - - @Override - public Task getTask(ShowInterface show, DepartmentInterface dept, String shot) { - return getJdbcTemplate().queryForObject( - GET_TASK + " AND point.pk_show=? AND point.pk_dept=? AND task.str_shot=?", - TASK_MAPPER, show.getShowId(), dept.getDepartmentId(), shot); - } - - @Override - public TaskSeq getTasks(ShowInterface show, DepartmentInterface dept) { - if (dept == null) { - return TaskSeq.newBuilder().addAllTasks(getJdbcTemplate().query( - GET_TASK + " AND point.pk_show=? ORDER BY task.str_shot", - TASK_MAPPER, show.getShowId())).build(); - } else { - return TaskSeq.newBuilder().addAllTasks(getJdbcTemplate().query( - GET_TASK + " AND point.pk_show=? AND point.pk_dept=? ORDER BY task.str_shot", - TASK_MAPPER, show.getShowId(), dept.getDepartmentId())).build(); - } - } - - @Override - public DeedSeq getDeeds(OwnerEntity owner) { - List deeds = getJdbcTemplate().query( - QUERY_FOR_DEED + " AND owner.pk_owner=?", - DEED_MAPPER, owner.getId()); - return DeedSeq.newBuilder().addAllDeeds(deeds).build(); - } - - @Override - public DeedSeq getDeeds(ShowInterface show) { - List deeds = getJdbcTemplate().query( - QUERY_FOR_DEED + " AND show.pk_show=?", - DEED_MAPPER, show.getId()); - return DeedSeq.newBuilder().addAllDeeds(deeds).build(); - } - - @Override - public Host getHost(DeedEntity deed) { - return getJdbcTemplate().queryForObject( - GET_HOST + " AND host.pk_host=?", - HOST_MAPPER, deed.id); - } - - @Override - public Deed getDeed(HostInterface host) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_DEED + " AND host.pk_host=?", - DEED_MAPPER, host.getHostId()); - } - - @Override - public HostSeq getHosts(OwnerEntity owner) { - StringBuilder sb = new StringBuilder(4096); - String query = GET_HOST; - query = query.replace("FROM " , "FROM owner, deed,"); - sb.append(query); - sb.append("AND deed.pk_host = host.pk_host "); - sb.append("AND deed.pk_owner = owner.pk_owner "); - sb.append("AND owner.pk_owner = ?"); - - List hosts = getJdbcTemplate().query( - sb.toString(), HOST_MAPPER, owner.getId()); - return HostSeq.newBuilder().addAllHosts(hosts).build(); - } - - @Override - public Owner getOwner(DeedEntity deed) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_OWNER + " AND " + - "pk_owner = (SELECT deed.pk_owner FROM deed " + - "WHERE pk_deed=?)", OWNER_MAPPER, deed.getId()); - } - - @Override - public Owner getOwner(HostInterface host) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_OWNER + " AND " + - "pk_owner = (SELECT deed.pk_owner FROM deed " + - "WHERE pk_host=?)", OWNER_MAPPER, host.getHostId()); - } - - @Override - public List getOwners(ShowInterface show) { - return getJdbcTemplate().query( - QUERY_FOR_OWNER + " AND owner.pk_show=?", OWNER_MAPPER, - show.getShowId()); - } - - - @Override - public RenderPartition getRenderPartition(LocalHostAssignment l) { - return getJdbcTemplate().queryForObject(QUERY_FOR_RENDER_PART + - "WHERE host_local.pk_host_local = ?", - RENDER_PARTION_MAPPER, l.getId()); - } - - - @Override - public RenderPartitionSeq getRenderPartitions(HostInterface host) { - List partitions = getJdbcTemplate().query(QUERY_FOR_RENDER_PART + - "WHERE host_local.pk_host = ?", - RENDER_PARTION_MAPPER, host.getHostId()); - return RenderPartitionSeq.newBuilder().addAllRenderPartitions(partitions).build(); - } - - - @Override - public Owner getOwner(String name) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_OWNER + " AND " + - "(" + - "owner.str_username = ? " + - "OR " + - "owner.pk_owner = ?" + - ")", OWNER_MAPPER, name, name); - } - - @Override - public Facility getFacility(String name) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_FACILITY + - " WHERE facility.pk_facility = ? OR facility.str_name = ?", - FACILITY_MAPPER, name, name); - } - - @Override - public FacilitySeq getFacilities() { - return FacilitySeq.newBuilder().addAllFacilities(getJdbcTemplate().query( - QUERY_FOR_FACILITY, FACILITY_MAPPER)).build(); - } - - @Override - public Limit findLimit(String name) { - String findLimitQuery = QUERY_FOR_LIMIT + - " WHERE limit_record.str_name = ? " + - "GROUP BY " + - "limit_record.str_name, " + - "limit_record.pk_limit_record, " + - "limit_record.int_max_value"; - return getJdbcTemplate().queryForObject(findLimitQuery, LIMIT_MAPPER, name); - } - - @Override - public Limit getLimit(String id) { - String getLimitQuery = QUERY_FOR_LIMIT + - " WHERE limit_record.pk_limit_record = ? "+ - "GROUP BY " + - "limit_record.str_name, " + - "limit_record.pk_limit_record, " + - "limit_record.int_max_value"; - return getJdbcTemplate().queryForObject(getLimitQuery, LIMIT_MAPPER, id); - } - - @Override - public List getLimits() { - String getLimitsQuery = QUERY_FOR_LIMIT + - " GROUP BY " + - "limit_record.str_name, " + - "limit_record.pk_limit_record, " + - "limit_record.int_max_value"; - return getJdbcTemplate().query(getLimitsQuery, LIMIT_MAPPER); - } - + @SuppressWarnings("unused") + private static final Logger logger = LogManager.getLogger(WhiteboardDaoJdbc.class); + + private FrameSearchFactory frameSearchFactory; + private ProcSearchFactory procSearchFactory; + + @Override + public Service getService(String id) { + return getJdbcTemplate().queryForObject(GET_SERVICE + " WHERE (pk_service=? or str_name=?)", + SERVICE_MAPPER, id, id); + } + + @Override + public Service findService(String name) { + return getJdbcTemplate().queryForObject(GET_SERVICE + " WHERE service.str_name=?", + SERVICE_MAPPER, name); + } + + @Override + public ServiceSeq getDefaultServices() { + List services = getJdbcTemplate().query(GET_SERVICE, SERVICE_MAPPER); + return ServiceSeq.newBuilder().addAllServices(services).build(); + } + + @Override + public ServiceOverrideSeq getServiceOverrides(ShowInterface show) { + return ServiceOverrideSeq.newBuilder() + .addAllServiceOverrides( + getJdbcTemplate().query(GET_SERVICE_OVERRIDE + " AND show_service.pk_show = ?", + SERVICE_OVERRIDE_MAPPER, show.getId())) + .build(); + } + + @Override + public ServiceOverride getServiceOverride(ShowInterface show, String name) { + return getJdbcTemplate().queryForObject( + GET_SERVICE_OVERRIDE + " AND show_service.pk_show=? AND (show_service.str_name=? OR" + + " show_service.pk_show_service=?)", + SERVICE_OVERRIDE_MAPPER, show.getId(), name, name); + } + + @Override + public Filter getFilter(FilterInterface filter) { + return getJdbcTemplate().queryForObject(GET_FILTER + " AND pk_filter=?", FILTER_MAPPER, + filter.getFilterId()); + } + + @Override + public Filter findFilter(ShowInterface show, String name) { + return getJdbcTemplate().queryForObject( + GET_FILTER + " AND filter.pk_show=? AND filter.str_name=?", FILTER_MAPPER, show.getShowId(), + name); + } + + @Override + public Filter findFilter(String show, String name) { + return getJdbcTemplate().queryForObject( + GET_FILTER + " AND show.str_name=? AND filter.str_name=?", FILTER_MAPPER, show, name); + } + + @Override + public FilterSeq getFilters(ShowInterface show) { + return FilterSeq.newBuilder() + .addAllFilters( + getJdbcTemplate().query(GET_FILTER + " AND show.pk_show=? ORDER BY f_order ASC", + FILTER_MAPPER, show.getShowId())) + .build(); + } + + @Override + public ActionSeq getActions(FilterInterface filter) { + return ActionSeq.newBuilder() + .addAllActions(getJdbcTemplate().query( + GET_ACTION + " AND filter.pk_filter=? ORDER BY b_stop ASC, ts_created ASC ", + ACTION_MAPPER, filter.getFilterId())) + .build(); + } + + @Override + public MatcherSeq getMatchers(FilterInterface filter) { + return MatcherSeq.newBuilder() + .addAllMatchers( + getJdbcTemplate().query(GET_MATCHER + " AND filter.pk_filter=? ORDER BY ts_created ASC", + MATCHER_MAPPER, filter.getFilterId())) + .build(); + } + + @Override + public Action getAction(ActionInterface action) { + return getJdbcTemplate().queryForObject(GET_ACTION + " AND action.pk_action=?", ACTION_MAPPER, + action.getActionId()); + } + + @Override + public Matcher getMatcher(MatcherInterface matcher) { + return getJdbcTemplate().queryForObject(GET_MATCHER + " AND matcher.pk_matcher=?", + MATCHER_MAPPER, matcher.getMatcherId()); + } + + @Override + public Show getShow(String id) { + return getJdbcTemplate().queryForObject(GET_SHOW + " AND show.pk_show=?", SHOW_MAPPER, id); + } + + @Override + public ShowSeq getShows() { + List shows = getJdbcTemplate().query(GET_SHOW, SHOW_MAPPER); + return ShowSeq.newBuilder().addAllShows(shows).build(); + } + + @Override + public ShowSeq getActiveShows() { + List shows = getJdbcTemplate().query(GET_SHOW + " AND b_active=?", SHOW_MAPPER, true); + return ShowSeq.newBuilder().addAllShows(shows).build(); + } + + @Override + public Show findShow(String name) { + return getJdbcTemplate().queryForObject(GET_SHOW + " AND show.str_name=?", SHOW_MAPPER, name); + } + + @Override + public Subscription getSubscription(String id) { + return getJdbcTemplate().queryForObject( + GET_SUBSCRIPTION + " AND subscription.pk_subscription=?", SUBSCRIPTION_MAPPER, id); + } + + @Override + public Subscription findSubscription(String show, String alloc) { + return getJdbcTemplate().queryForObject( + GET_SUBSCRIPTION + " AND show.str_name=? AND alloc.str_name=?", SUBSCRIPTION_MAPPER, show, + alloc); + } + + @Override + public SubscriptionSeq getSubscriptions(ShowInterface show) { + List subscriptions = getJdbcTemplate() + .query(GET_SUBSCRIPTION + " AND show.pk_show=?", SUBSCRIPTION_MAPPER, show.getShowId()); + return SubscriptionSeq.newBuilder().addAllSubscriptions(subscriptions).build(); + } + + @Override + public SubscriptionSeq getSubscriptions(AllocationInterface alloc) { + List subscriptions = + getJdbcTemplate().query(GET_SUBSCRIPTION + " AND subscription.pk_alloc=?", + SUBSCRIPTION_MAPPER, alloc.getAllocationId()); + return SubscriptionSeq.newBuilder().addAllSubscriptions(subscriptions).build(); + } + + @Override + public Allocation findAllocation(String name) { + return getJdbcTemplate().queryForObject(GET_ALLOCATION + " AND alloc.str_name=?", + ALLOCATION_MAPPER, name); + } + + @Override + public Allocation getAllocation(String id) { + return getJdbcTemplate().queryForObject(GET_ALLOCATION + " AND alloc.pk_alloc=?", + ALLOCATION_MAPPER, id); + } + + @Override + public AllocationSeq getAllocations() { + return AllocationSeq.newBuilder().addAllAllocations( + getJdbcTemplate().query(GET_ALLOCATION + " ORDER BY alloc.str_name ", ALLOCATION_MAPPER)) + .build(); + } + + @Override + public AllocationSeq getAllocations(com.imageworks.spcue.FacilityInterface facility) { + return AllocationSeq.newBuilder() + .addAllAllocations(getJdbcTemplate().query(GET_ALLOCATION + " AND alloc.pk_facility = ?", + ALLOCATION_MAPPER, facility.getFacilityId())) + .build(); + } + + @Override + public JobSeq getJobs(GroupInterface group) { + List jobs = + getJdbcTemplate().query(GET_PENDING_JOBS + " AND job.pk_folder=? ORDER BY job.str_name ASC", + JOB_MAPPER, group.getId()); + return JobSeq.newBuilder().addAllJobs(jobs).build(); + } + + @Override + public List getJobNames(JobSearchInterface r) { + return getJdbcTemplate().query(r.getFilteredQuery(GET_JOB_NAMES), new RowMapper() { + public String mapRow(ResultSet rs, int rowNum) throws SQLException { + return rs.getString(1); + } + }, r.getValuesArray()); + } + + @Override + public JobSeq getJobs(JobSearchInterface r) { + List jobs = getJdbcTemplate().query( + r.getFilteredQuery(GET_JOB) + "ORDER BY job.str_name ASC", JOB_MAPPER, r.getValuesArray()); + return JobSeq.newBuilder().addAllJobs(jobs).build(); + } + + @Override + public Job findJob(String name) { + return getJdbcTemplate().queryForObject(GET_PENDING_JOBS + " AND job.str_name=?", JOB_MAPPER, + name.toLowerCase()); + } + + @Override + public Job getJob(String id) { + return getJdbcTemplate().queryForObject(GET_JOB + " AND job.pk_job=?", JOB_MAPPER, id); + } + + @Override + public Layer getLayer(String id) { + return getJdbcTemplate().queryForObject(GET_LAYER_WITH_LIMITS + " WHERE layer.pk_layer=?", + LAYER_MAPPER, id); + } + + @Override + public Layer findLayer(String job, String layer) { + return getJdbcTemplate().queryForObject( + GET_LAYER_WITH_LIMITS + + " WHERE job.str_state='PENDING' AND job.str_name=? AND layer.str_name=?", + LAYER_MAPPER, job, layer); + } + + @Override + public LayerSeq getLayers(JobInterface job) { + String query = + GET_LAYER_WITH_LIMITS + " WHERE layer.pk_job=? ORDER BY layer.int_dispatch_order ASC"; + List layers = getJdbcTemplate().query(query, LAYER_MAPPER, job.getJobId()); + return LayerSeq.newBuilder().addAllLayers(layers).build(); + } + + public Layer addLimitNames(Layer layer) { + return layer.toBuilder().addAllLimits(getLimitNames(layer.getId())).build(); + } + + public List getLimitNames(String layerId) { + return getJdbcTemplate().query(GET_LIMIT_NAMES, LIMIT_NAME_MAPPER, layerId); + } + + @Override + public List getLimits(LayerInterface layer) { + List limits = + getJdbcTemplate().query(GET_LIMIT_FROM_LAYER_ID, LIMIT_MAPPER, layer.getLayerId()); + return limits; + } + + @Override + public GroupSeq getGroups(ShowInterface show) { + List groups = getJdbcTemplate().query( + GET_GROUPS + + " AND folder.pk_show=? ORDER BY folder_level.int_level ASC, folder.str_name ASC ", + GROUP_MAPPER, show.getShowId()); + return GroupSeq.newBuilder().addAllGroups(groups).build(); + } + + @Override + public GroupSeq getGroups(GroupInterface group) { + List groups = getJdbcTemplate().query(GET_GROUPS + + " AND folder.pk_parent_folder=? ORDER BY folder_level.int_level ASC, folder.f_order DESC, folder.str_name ASC ", + GROUP_MAPPER, group.getGroupId()); + return GroupSeq.newBuilder().addAllGroups(groups).build(); + } + + @Override + public Group getGroup(String id) { + return getJdbcTemplate().queryForObject(GET_GROUPS + " AND folder.pk_folder=?", GROUP_MAPPER, + id); + } + + @Override + public Group getRootGroup(ShowInterface show) { + return getJdbcTemplate().queryForObject( + GET_GROUPS + " AND show.pk_show=? AND folder.b_default=?", GROUP_MAPPER, show.getShowId(), + true); + } + + @Override + public Frame findFrame(String job, String layer, int frame) { + return getJdbcTemplate().queryForObject(FIND_FRAME, FRAME_MAPPER, job, layer, frame); + } + + @Override + public Frame getFrame(String id) { + return getJdbcTemplate().queryForObject(GET_FRAME + " AND frame.pk_frame=?", FRAME_MAPPER, id); + } + + @Override + public FrameSeq getFrames(FrameSearchInterface r) { + List frames = getJdbcTemplate().query(r.getSortedQuery(GET_FRAMES_CRITERIA), + FRAME_MAPPER, r.getValuesArray()); + return FrameSeq.newBuilder().addAllFrames(frames).build(); + } + + @Override + public Depend getDepend(DependInterface depend) { + return getJdbcTemplate().queryForObject(GET_DEPEND + " WHERE pk_depend=?", DEPEND_MAPPER, + depend.getId()); + } + + @Override + public Depend getDepend(com.imageworks.spcue.depend.AbstractDepend depend) { + return getJdbcTemplate().queryForObject(GET_DEPEND + " WHERE pk_depend=?", DEPEND_MAPPER, + depend.getId()); + } + + @Override + public DependSeq getWhatDependsOnThis(JobInterface job) { + List depends = + getJdbcTemplate().query(GET_DEPEND + " WHERE pk_parent IS NULL AND pk_job_depend_on=?", + DEPEND_MAPPER, job.getJobId()); + return DependSeq.newBuilder().addAllDepends(depends).build(); + } + + @Override + public DependSeq getWhatDependsOnThis(LayerInterface layer) { + List depends = + getJdbcTemplate().query(GET_DEPEND + " WHERE pk_parent IS NULL AND pk_layer_depend_on=?", + DEPEND_MAPPER, layer.getLayerId()); + return DependSeq.newBuilder().addAllDepends(depends).build(); + + } + + @Override + public DependSeq getWhatDependsOnThis(FrameInterface frame) { + List depends = getJdbcTemplate().query(GET_DEPEND + " WHERE pk_frame_depend_on=?", + DEPEND_MAPPER, frame.getFrameId()); + return DependSeq.newBuilder().addAllDepends(depends).build(); + } + + @Override + public DependSeq getWhatThisDependsOn(JobInterface job) { + List depends = + getJdbcTemplate().query( + GET_DEPEND + " WHERE pk_parent IS NULL AND pk_layer_depend_er IS NULL AND " + + "pk_frame_depend_er IS NULL AND pk_job_depend_er=?", + DEPEND_MAPPER, job.getJobId()); + return DependSeq.newBuilder().addAllDepends(depends).build(); + } + + @Override + public DependSeq getWhatThisDependsOn(LayerInterface layer) { + List depends = + getJdbcTemplate().query(GET_DEPEND + " WHERE pk_parent IS NULL AND pk_layer_depend_er=?", + DEPEND_MAPPER, layer.getLayerId()); + return DependSeq.newBuilder().addAllDepends(depends).build(); + } + + @Override + public DependSeq getWhatThisDependsOn(FrameInterface frame) { /* - * Row Mappers + * This should show anything that is making the frame dependent. */ - - public static final RowMapper LIMIT_MAPPER = - new RowMapper() { - public Limit mapRow(ResultSet rs, int rowNum) throws SQLException { - return Limit.newBuilder() - .setId(SqlUtil.getString(rs, "pk_limit_record")) - .setName(SqlUtil.getString(rs, "str_name")) - .setMaxValue(rs.getInt("int_max_value")) - .setCurrentRunning(rs.getInt("int_current_running")) - .build(); - } - }; - - public static final RowMapper MATCHER_MAPPER = - new RowMapper() { - public Matcher mapRow(ResultSet rs, int rowNum) throws SQLException { - return Matcher.newBuilder() - .setId(SqlUtil.getString(rs,"pk_matcher")) - .setInput(SqlUtil.getString(rs,"str_value")) - .setSubject(MatchSubject.valueOf(SqlUtil.getString(rs,"str_subject"))) - .setType(MatchType.valueOf(SqlUtil.getString(rs,"str_match"))) - .build(); - } - }; - - public static final RowMapper FILTER_MAPPER = - new RowMapper() { - public Filter mapRow(ResultSet rs, int rowNum) throws SQLException { - return Filter.newBuilder() - .setId(SqlUtil.getString(rs,"pk_filter")) - .setType(FilterType.valueOf(SqlUtil.getString(rs,"str_type"))) - .setOrder(rs.getFloat("f_order")) - .setName(SqlUtil.getString(rs,"str_name")) - .setEnabled(rs.getBoolean("b_enabled")) - .build(); - } - }; - - public static final RowMapper ACTION_MAPPER = - new RowMapper() { - public Action mapRow(ResultSet rs, int rowNum) throws SQLException { - Action.Builder builder = Action.newBuilder() - .setId(SqlUtil.getString(rs,"pk_action")) - .setBooleanValue(false) - .setIntegerValue(0) - .setFloatValue(0f) - .setStringValue("") - .setType(ActionType.valueOf(SqlUtil.getString(rs,"str_action"))) - .setValueType(ActionValueType.valueOf(SqlUtil.getString(rs,"str_value_type"))); - - switch (builder.getValueType()) { - case GROUP_TYPE: - builder.setGroupValue(SqlUtil.getString(rs,"pk_folder")); - break; - case STRING_TYPE: - builder.setStringValue(SqlUtil.getString(rs,"str_value")); - break; - case INTEGER_TYPE: - builder.setIntegerValue(rs.getInt("int_value")); - break; - case FLOAT_TYPE: - builder.setFloatValue(rs.getFloat("float_value")); - break; - case BOOLEAN_TYPE: - builder.setBooleanValue(rs.getBoolean("b_value")); - break; - } - return builder.build(); - } - }; - - public static final RowMapper FACILITY_MAPPER = - new RowMapper() { - public Facility mapRow(ResultSet rs, int rowNum) throws SQLException { - return Facility.newBuilder() - .setName(rs.getString("str_name")) - .setId(rs.getString("pk_facility")) - .build(); - } - }; - - - public static final RowMapper DEED_MAPPER = - new RowMapper() { - public Deed mapRow(ResultSet rs, int rowNum) throws SQLException { - return Deed.newBuilder() - .setId(SqlUtil.getString(rs,"pk_deed")) - .setHost(SqlUtil.getString(rs,"str_host")) - .setOwner(SqlUtil.getString(rs,"str_username")) - .build(); - } - }; - - public static final RowMapper - RENDER_PARTION_MAPPER = new RowMapper() { + List depends = getJdbcTemplate().query(GET_DEPEND + " WHERE " + + "(pk_job_depend_er=? AND str_type IN ('JOB_ON_JOB','JOB_ON_LAYER','JOB_ON_FRAME')) OR " + + "(pk_layer_depend_er=? AND str_type IN ('LAYER_ON_JOB','LAYER_ON_LAYER','LAYER_ON_FRAME')) " + + "OR (pk_frame_depend_er=?)", DEPEND_MAPPER, frame.getJobId(), frame.getLayerId(), + frame.getFrameId()); + return DependSeq.newBuilder().addAllDepends(depends).build(); + } + + @Override + public DependSeq getDepends(JobInterface job) { + List depends = getJdbcTemplate().query( + GET_DEPEND + " WHERE pk_job_depend_er=? AND str_type != 'FRAME_ON_FRAME'", DEPEND_MAPPER, + job.getJobId()); + return DependSeq.newBuilder().addAllDepends(depends).build(); + } + + @Override + public Depend getDepend(String id) { + return getJdbcTemplate().queryForObject(GET_DEPEND + " WHERE pk_depend=?", DEPEND_MAPPER, id); + } + + @Override + public Group findGroup(String show, String group) { + return getJdbcTemplate().queryForObject( + GET_GROUPS + " AND show.str_name=? AND folder.str_name=?", GROUP_MAPPER, show, group); + } + + @Override + public Host findHost(String name) { + return getJdbcTemplate().queryForObject(GET_HOST + " AND host.str_name=?", HOST_MAPPER, name); + } + + @Override + public HostSeq getHosts(HostSearchInterface r) { + List hosts = + getJdbcTemplate().query(r.getFilteredQuery(GET_HOST), HOST_MAPPER, r.getValuesArray()); + return HostSeq.newBuilder().addAllHosts(hosts).build(); + } + + @Override + public Host getHost(String id) { + return getJdbcTemplate().queryForObject(GET_HOST + " AND host.pk_host=?", HOST_MAPPER, id); + } + + @Override + public ProcSeq getProcs(HostInterface host) { + ProcSearchInterface r = procSearchFactory.create(); + r.filterByHost(host); + r.sortByHostName(); + r.sortByDispatchedTime(); + return ProcSeq.newBuilder().addAllProcs(getProcs(r).getProcsList()).build(); + } + + @Override + public ProcSeq getProcs(ProcSearchInterface p) { + p.sortByHostName(); + p.sortByDispatchedTime(); + List procs = + getJdbcTemplate().query(p.getFilteredQuery(GET_PROC), PROC_MAPPER, p.getValuesArray()); + return ProcSeq.newBuilder().addAllProcs(procs).build(); + } + + @Override + public CommentSeq getComments(HostInterface h) { + List comments = + getJdbcTemplate().query(GET_HOST_COMMENTS, COMMENT_MAPPER, h.getHostId()); + return CommentSeq.newBuilder().addAllComments(comments).build(); + } + + @Override + public CommentSeq getComments(JobInterface j) { + List comments = + getJdbcTemplate().query(GET_JOB_COMMENTS, COMMENT_MAPPER, j.getJobId()); + return CommentSeq.newBuilder().addAllComments(comments).build(); + } + + @Override + public UpdatedFrameCheckResult getUpdatedFrames(JobInterface job, List layers, + int epochSeconds) { + + long timeDiff = (System.currentTimeMillis() / 1000) - epochSeconds; + if (timeDiff > 60) { + throw new IllegalArgumentException("the last update timestamp cannot be over " + + "a minute off the current time, difference was: " + timeDiff); + } + + UpdatedFrameCheckResult.Builder resultBuilder = UpdatedFrameCheckResult.newBuilder(); + resultBuilder.setState(JobState.valueOf(getJdbcTemplate() + .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job.getJobId()))); + + FrameSearchInterface r = frameSearchFactory.create(job); + r.filterByLayers(layers); + r.filterByChangeDate(epochSeconds); + r.setMaxResults(100); + + List updatedFrameList = getJdbcTemplate() + .query(r.getFilteredQuery(GET_UPDATED_FRAME), UPDATED_FRAME_MAPPER, r.getValuesArray()); + resultBuilder.setUpdatedFrames( + UpdatedFrameSeq.newBuilder().addAllUpdatedFrames(updatedFrameList).build()); + resultBuilder.setServerTime((int) (System.currentTimeMillis() / 1000) - 1); + + return resultBuilder.build(); + } + + @Override + public Department getDepartment(ShowInterface show, String name) { + return getJdbcTemplate().queryForObject(GET_DEPARTMENT, DEPARTMENT_MAPPER, show.getShowId(), + name); + } + + @Override + public DepartmentSeq getDepartments(ShowInterface show) { + List departments = + getJdbcTemplate().query(GET_DEPARTMENTS, DEPARTMENT_MAPPER, show.getShowId()); + return DepartmentSeq.newBuilder().addAllDepartments(departments).build(); + } + + @Override + public List getDepartmentNames() { + return getJdbcTemplate().query("SELECT str_name FROM dept ORDER BY str_name ASC", + new RowMapper() { + public String mapRow(ResultSet rs, int row) throws SQLException { + return rs.getString("str_name"); + } + }); + } + + @Override + public Task getTask(ShowInterface show, DepartmentInterface dept, String shot) { + return getJdbcTemplate().queryForObject( + GET_TASK + " AND point.pk_show=? AND point.pk_dept=? AND task.str_shot=?", TASK_MAPPER, + show.getShowId(), dept.getDepartmentId(), shot); + } + + @Override + public TaskSeq getTasks(ShowInterface show, DepartmentInterface dept) { + if (dept == null) { + return TaskSeq.newBuilder() + .addAllTasks( + getJdbcTemplate().query(GET_TASK + " AND point.pk_show=? ORDER BY task.str_shot", + TASK_MAPPER, show.getShowId())) + .build(); + } else { + return TaskSeq.newBuilder() + .addAllTasks(getJdbcTemplate().query( + GET_TASK + " AND point.pk_show=? AND point.pk_dept=? ORDER BY task.str_shot", + TASK_MAPPER, show.getShowId(), dept.getDepartmentId())) + .build(); + } + } + + @Override + public DeedSeq getDeeds(OwnerEntity owner) { + List deeds = getJdbcTemplate().query(QUERY_FOR_DEED + " AND owner.pk_owner=?", + DEED_MAPPER, owner.getId()); + return DeedSeq.newBuilder().addAllDeeds(deeds).build(); + } + + @Override + public DeedSeq getDeeds(ShowInterface show) { + List deeds = + getJdbcTemplate().query(QUERY_FOR_DEED + " AND show.pk_show=?", DEED_MAPPER, show.getId()); + return DeedSeq.newBuilder().addAllDeeds(deeds).build(); + } + + @Override + public Host getHost(DeedEntity deed) { + return getJdbcTemplate().queryForObject(GET_HOST + " AND host.pk_host=?", HOST_MAPPER, deed.id); + } + + @Override + public Deed getDeed(HostInterface host) { + return getJdbcTemplate().queryForObject(QUERY_FOR_DEED + " AND host.pk_host=?", DEED_MAPPER, + host.getHostId()); + } + + @Override + public HostSeq getHosts(OwnerEntity owner) { + StringBuilder sb = new StringBuilder(4096); + String query = GET_HOST; + query = query.replace("FROM ", "FROM owner, deed,"); + sb.append(query); + sb.append("AND deed.pk_host = host.pk_host "); + sb.append("AND deed.pk_owner = owner.pk_owner "); + sb.append("AND owner.pk_owner = ?"); + + List hosts = getJdbcTemplate().query(sb.toString(), HOST_MAPPER, owner.getId()); + return HostSeq.newBuilder().addAllHosts(hosts).build(); + } + + @Override + public Owner getOwner(DeedEntity deed) { + return getJdbcTemplate().queryForObject(QUERY_FOR_OWNER + " AND " + + "pk_owner = (SELECT deed.pk_owner FROM deed " + "WHERE pk_deed=?)", OWNER_MAPPER, + deed.getId()); + } + + @Override + public Owner getOwner(HostInterface host) { + return getJdbcTemplate().queryForObject(QUERY_FOR_OWNER + " AND " + + "pk_owner = (SELECT deed.pk_owner FROM deed " + "WHERE pk_host=?)", OWNER_MAPPER, + host.getHostId()); + } + + @Override + public List getOwners(ShowInterface show) { + return getJdbcTemplate().query(QUERY_FOR_OWNER + " AND owner.pk_show=?", OWNER_MAPPER, + show.getShowId()); + } + + @Override + public RenderPartition getRenderPartition(LocalHostAssignment l) { + return getJdbcTemplate().queryForObject( + QUERY_FOR_RENDER_PART + "WHERE host_local.pk_host_local = ?", RENDER_PARTION_MAPPER, + l.getId()); + } + + @Override + public RenderPartitionSeq getRenderPartitions(HostInterface host) { + List partitions = + getJdbcTemplate().query(QUERY_FOR_RENDER_PART + "WHERE host_local.pk_host = ?", + RENDER_PARTION_MAPPER, host.getHostId()); + return RenderPartitionSeq.newBuilder().addAllRenderPartitions(partitions).build(); + } + + @Override + public Owner getOwner(String name) { + return getJdbcTemplate().queryForObject(QUERY_FOR_OWNER + " AND " + "(" + + "owner.str_username = ? " + "OR " + "owner.pk_owner = ?" + ")", OWNER_MAPPER, name, name); + } + + @Override + public Facility getFacility(String name) { + return getJdbcTemplate().queryForObject( + QUERY_FOR_FACILITY + " WHERE facility.pk_facility = ? OR facility.str_name = ?", + FACILITY_MAPPER, name, name); + } + + @Override + public FacilitySeq getFacilities() { + return FacilitySeq.newBuilder() + .addAllFacilities(getJdbcTemplate().query(QUERY_FOR_FACILITY, FACILITY_MAPPER)).build(); + } + + @Override + public Limit findLimit(String name) { + String findLimitQuery = QUERY_FOR_LIMIT + " WHERE limit_record.str_name = ? " + "GROUP BY " + + "limit_record.str_name, " + "limit_record.pk_limit_record, " + + "limit_record.int_max_value"; + return getJdbcTemplate().queryForObject(findLimitQuery, LIMIT_MAPPER, name); + } + + @Override + public Limit getLimit(String id) { + String getLimitQuery = QUERY_FOR_LIMIT + " WHERE limit_record.pk_limit_record = ? " + + "GROUP BY " + "limit_record.str_name, " + "limit_record.pk_limit_record, " + + "limit_record.int_max_value"; + return getJdbcTemplate().queryForObject(getLimitQuery, LIMIT_MAPPER, id); + } + + @Override + public List getLimits() { + String getLimitsQuery = QUERY_FOR_LIMIT + " GROUP BY " + "limit_record.str_name, " + + "limit_record.pk_limit_record, " + "limit_record.int_max_value"; + return getJdbcTemplate().query(getLimitsQuery, LIMIT_MAPPER); + } + + /* + * Row Mappers + */ + + public static final RowMapper LIMIT_MAPPER = new RowMapper() { + public Limit mapRow(ResultSet rs, int rowNum) throws SQLException { + return Limit.newBuilder().setId(SqlUtil.getString(rs, "pk_limit_record")) + .setName(SqlUtil.getString(rs, "str_name")).setMaxValue(rs.getInt("int_max_value")) + .setCurrentRunning(rs.getInt("int_current_running")).build(); + } + }; + + public static final RowMapper MATCHER_MAPPER = new RowMapper() { + public Matcher mapRow(ResultSet rs, int rowNum) throws SQLException { + return Matcher.newBuilder().setId(SqlUtil.getString(rs, "pk_matcher")) + .setInput(SqlUtil.getString(rs, "str_value")) + .setSubject(MatchSubject.valueOf(SqlUtil.getString(rs, "str_subject"))) + .setType(MatchType.valueOf(SqlUtil.getString(rs, "str_match"))).build(); + } + }; + + public static final RowMapper FILTER_MAPPER = new RowMapper() { + public Filter mapRow(ResultSet rs, int rowNum) throws SQLException { + return Filter.newBuilder().setId(SqlUtil.getString(rs, "pk_filter")) + .setType(FilterType.valueOf(SqlUtil.getString(rs, "str_type"))) + .setOrder(rs.getFloat("f_order")).setName(SqlUtil.getString(rs, "str_name")) + .setEnabled(rs.getBoolean("b_enabled")).build(); + } + }; + + public static final RowMapper ACTION_MAPPER = new RowMapper() { + public Action mapRow(ResultSet rs, int rowNum) throws SQLException { + Action.Builder builder = Action.newBuilder().setId(SqlUtil.getString(rs, "pk_action")) + .setBooleanValue(false).setIntegerValue(0).setFloatValue(0f).setStringValue("") + .setType(ActionType.valueOf(SqlUtil.getString(rs, "str_action"))) + .setValueType(ActionValueType.valueOf(SqlUtil.getString(rs, "str_value_type"))); + + switch (builder.getValueType()) { + case GROUP_TYPE: + builder.setGroupValue(SqlUtil.getString(rs, "pk_folder")); + break; + case STRING_TYPE: + builder.setStringValue(SqlUtil.getString(rs, "str_value")); + break; + case INTEGER_TYPE: + builder.setIntegerValue(rs.getInt("int_value")); + break; + case FLOAT_TYPE: + builder.setFloatValue(rs.getFloat("float_value")); + break; + case BOOLEAN_TYPE: + builder.setBooleanValue(rs.getBoolean("b_value")); + break; + } + return builder.build(); + } + }; + + public static final RowMapper FACILITY_MAPPER = new RowMapper() { + public Facility mapRow(ResultSet rs, int rowNum) throws SQLException { + return Facility.newBuilder().setName(rs.getString("str_name")) + .setId(rs.getString("pk_facility")).build(); + } + }; + + public static final RowMapper DEED_MAPPER = new RowMapper() { + public Deed mapRow(ResultSet rs, int rowNum) throws SQLException { + return Deed.newBuilder().setId(SqlUtil.getString(rs, "pk_deed")) + .setHost(SqlUtil.getString(rs, "str_host")) + .setOwner(SqlUtil.getString(rs, "str_username")).build(); + } + }; + + public static final RowMapper RENDER_PARTION_MAPPER = + new RowMapper() { public RenderPartition mapRow(ResultSet rs, int rowNum) throws SQLException { - RenderPartition.Builder builder = RenderPartition.newBuilder() - .setId(SqlUtil.getString(rs,"pk_host_local")) - .setCores(rs.getInt("int_cores_max") - rs.getInt("int_cores_idle")) - .setMaxCores(rs.getInt("int_cores_max")) - .setThreads(rs.getInt("int_threads")) - .setMaxMemory(rs.getLong("int_mem_max")) - .setMemory( rs.getLong("int_mem_max") - rs.getLong("int_mem_idle")) - .setGpus(rs.getInt("int_gpus_max") - rs.getInt("int_gpus_idle")) - .setMaxGpus(rs.getInt("int_gpus_max")) - .setGpuMemory(rs.getLong("int_gpu_mem_max") - rs.getLong("int_gpu_mem_idle")) - .setMaxGpuMemory(rs.getLong("int_gpu_mem_max")) - .setHost(SqlUtil.getString(rs,"str_host_name")) - .setJob(SqlUtil.getString(rs,"str_job_name")) - .setRenderPartType(RenderPartitionType.valueOf(SqlUtil.getString(rs,"str_type"))) - .setLayer("") - .setFrame(""); - - if (SqlUtil.getString(rs,"str_layer_name") != null) { - builder.setLayer(SqlUtil.getString(rs,"str_layer_name")); - } - - if (SqlUtil.getString(rs,"str_frame_name") != null) { - builder.setFrame(SqlUtil.getString(rs,"str_frame_name")); - } - - return builder.build(); + RenderPartition.Builder builder = + RenderPartition.newBuilder().setId(SqlUtil.getString(rs, "pk_host_local")) + .setCores(rs.getInt("int_cores_max") - rs.getInt("int_cores_idle")) + .setMaxCores(rs.getInt("int_cores_max")).setThreads(rs.getInt("int_threads")) + .setMaxMemory(rs.getLong("int_mem_max")) + .setMemory(rs.getLong("int_mem_max") - rs.getLong("int_mem_idle")) + .setGpus(rs.getInt("int_gpus_max") - rs.getInt("int_gpus_idle")) + .setMaxGpus(rs.getInt("int_gpus_max")) + .setGpuMemory(rs.getLong("int_gpu_mem_max") - rs.getLong("int_gpu_mem_idle")) + .setMaxGpuMemory(rs.getLong("int_gpu_mem_max")) + .setHost(SqlUtil.getString(rs, "str_host_name")) + .setJob(SqlUtil.getString(rs, "str_job_name")) + .setRenderPartType(RenderPartitionType.valueOf(SqlUtil.getString(rs, "str_type"))) + .setLayer("").setFrame(""); + + if (SqlUtil.getString(rs, "str_layer_name") != null) { + builder.setLayer(SqlUtil.getString(rs, "str_layer_name")); + } + + if (SqlUtil.getString(rs, "str_frame_name") != null) { + builder.setFrame(SqlUtil.getString(rs, "str_frame_name")); + } + + return builder.build(); } - }; - - public static final RowMapper - OWNER_MAPPER = new RowMapper() { - public Owner mapRow(ResultSet rs, int rowNum) throws SQLException { - return Owner.newBuilder() - .setName(SqlUtil.getString(rs,"str_username")) - .setId(SqlUtil.getString(rs,"pk_owner")) - .setShow(SqlUtil.getString(rs,"str_show")) - .setHostCount(rs.getInt("host_count")) - .build(); + }; + + public static final RowMapper OWNER_MAPPER = new RowMapper() { + public Owner mapRow(ResultSet rs, int rowNum) throws SQLException { + return Owner.newBuilder().setName(SqlUtil.getString(rs, "str_username")) + .setId(SqlUtil.getString(rs, "pk_owner")).setShow(SqlUtil.getString(rs, "str_show")) + .setHostCount(rs.getInt("host_count")).build(); + } + }; + + public static final RowMapper DEPARTMENT_MAPPER = new RowMapper() { + public Department mapRow(ResultSet rs, int row) throws SQLException { + return Department.newBuilder().setId(SqlUtil.getString(rs, "pk_point")) + .setName(SqlUtil.getString(rs, "str_name")).setDept(SqlUtil.getString(rs, "str_dept")) + .setTiManaged(rs.getBoolean("b_managed")).setTiTask(SqlUtil.getString(rs, "str_ti_task")) + .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))).build(); + } + }; + + public static final RowMapper PROC_MAPPER = new RowMapper() { + public Proc mapRow(ResultSet rs, int row) throws SQLException { + return Proc.newBuilder().setId(SqlUtil.getString(rs, "pk_proc")) + .setName(CueUtil.buildProcName(SqlUtil.getString(rs, "host_name"), + rs.getInt("int_cores_reserved"), rs.getInt("int_gpus_reserved"))) + .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores_reserved"))) + .setReservedMemory(rs.getLong("int_mem_reserved")) + .setReservedGpus(rs.getInt("int_gpus_reserved")) + .setReservedGpuMemory(rs.getLong("int_gpu_mem_reserved")) + .setUsedMemory(rs.getLong("int_mem_used")) + .setUsedGpuMemory(rs.getLong("int_gpu_mem_used")) + .setFrameName(SqlUtil.getString(rs, "frame_name")) + .setJobName(SqlUtil.getString(rs, "job_name")) + .setGroupName(SqlUtil.getString(rs, "folder_name")) + .setShowName(SqlUtil.getString(rs, "show_name")) + .setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)) + .setBookedTime((int) (rs.getTimestamp("ts_booked").getTime() / 1000)) + .setDispatchTime((int) (rs.getTimestamp("ts_dispatched").getTime() / 1000)) + .setUnbooked(rs.getBoolean("b_unbooked")) + .setLogPath(String.format("%s/%s.%s.rqlog", SqlUtil.getString(rs, "str_log_dir"), + SqlUtil.getString(rs, "job_name"), SqlUtil.getString(rs, "frame_name"))) + .setRedirectTarget(SqlUtil.getString(rs, "str_redirect")) + .setChildProcesses(SqlUtil.getByteString(rs, "bytea_children")) + .addAllServices(Arrays.asList(SqlUtil.getString(rs, "str_services").split(","))).build(); + } + }; + + public static final RowMapper TASK_MAPPER = new RowMapper() { + public Task mapRow(ResultSet rs, int row) throws SQLException { + return Task.newBuilder().setId(SqlUtil.getString(rs, "pk_task")) + .setDept(SqlUtil.getString(rs, "str_dept")).setShot(SqlUtil.getString(rs, "str_shot")) + .setMinCores(Convert.coreUnitsToWholeCores(rs.getInt("int_min_cores"))) + .setAdjustCores(Convert.coreUnitsToWholeCores(rs.getInt("int_adjust_cores"))).build(); + } + }; + + public static final RowMapper COMMENT_MAPPER = new RowMapper() { + + public Comment mapRow(ResultSet rs, int row) throws SQLException { + return Comment.newBuilder().setId(SqlUtil.getString(rs, "pk_comment")) + .setMessage(SqlUtil.getString(rs, "str_message")) + .setSubject(SqlUtil.getString(rs, "str_subject")) + .setTimestamp((int) (rs.getTimestamp("ts_created").getTime() / 1000)) + .setUser(SqlUtil.getString(rs, "str_user")).build(); + } + }; + + public static NestedHost.Builder mapNestedHostBuilder(ResultSet rs) throws SQLException { + NestedHost.Builder builder = NestedHost.newBuilder().setId(SqlUtil.getString(rs, "pk_host")) + .setName(SqlUtil.getString(rs, "host_name")) + .setAllocName(SqlUtil.getString(rs, "alloc_name")) + .setBootTime((int) (rs.getTimestamp("ts_booted").getTime() / 1000)) + .setFreeMcp(rs.getLong("int_mcp_free")).setFreeMemory(rs.getLong("int_mem_free")) + .setFreeSwap(rs.getLong("int_swap_free")).setFreeGpuMemory(rs.getLong("int_gpu_mem_free")) + .setLoad(rs.getInt("int_load")).setNimbyEnabled(rs.getBoolean("b_nimby")) + .setCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) + .setIdleCores(Convert.coreUnitsToCores(rs.getInt("int_cores_idle"))) + .setMemory(rs.getLong("int_mem")).setIdleMemory(rs.getLong("int_mem_idle")) + .setGpus(rs.getInt("int_gpus")).setIdleGpus(rs.getInt("int_gpus_idle")) + .setGpuMemory(rs.getLong("int_gpu_mem")).setIdleGpuMemory(rs.getLong("int_gpu_mem_idle")) + .setState(HardwareState.valueOf(SqlUtil.getString(rs, "host_state"))) + .setTotalMcp(rs.getLong("int_mcp_total")).setTotalMemory(rs.getLong("int_mem_total")) + .setTotalSwap(rs.getLong("int_swap_total")) + .setTotalGpuMemory(rs.getLong("int_gpu_mem_total")) + .setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)) + .setLockState(LockState.valueOf(SqlUtil.getString(rs, "str_lock_state"))) + .setHasComment(rs.getBoolean("b_comment")) + .setThreadMode(ThreadMode.values()[rs.getInt("int_thread_mode")]) + .setOs(SqlUtil.getString(rs, "str_os")); + + String tags = SqlUtil.getString(rs, "str_tags"); + if (tags != null) + builder.addAllTags(Arrays.asList(tags.split(" "))); + return builder; + } + + public static Host.Builder mapHostBuilder(ResultSet rs) throws SQLException { + Host.Builder builder = Host.newBuilder(); + builder.setId(SqlUtil.getString(rs, "pk_host")); + builder.setName(SqlUtil.getString(rs, "host_name")); + builder.setAllocName(SqlUtil.getString(rs, "alloc_name")); + builder.setBootTime((int) (rs.getTimestamp("ts_booted").getTime() / 1000)); + builder.setFreeMcp(rs.getLong("int_mcp_free")); + builder.setFreeMemory(rs.getLong("int_mem_free")); + builder.setFreeSwap(rs.getLong("int_swap_free")); + builder.setFreeGpuMemory(rs.getLong("int_gpu_mem_free")); + builder.setLoad(rs.getInt("int_load")); + builder.setNimbyEnabled(rs.getBoolean("b_nimby")); + builder.setCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))); + builder.setIdleCores(Convert.coreUnitsToCores(rs.getInt("int_cores_idle"))); + builder.setMemory(rs.getLong("int_mem")); + builder.setIdleMemory(rs.getLong("int_mem_idle")); + builder.setGpus(rs.getInt("int_gpus")); + builder.setIdleGpus(rs.getInt("int_gpus_idle")); + builder.setGpuMemory(rs.getLong("int_gpu_mem")); + builder.setIdleGpuMemory(rs.getLong("int_gpu_mem_idle")); + builder.setState(HardwareState.valueOf(SqlUtil.getString(rs, "host_state"))); + builder.setTotalMcp(rs.getLong("int_mcp_total")); + builder.setTotalMemory(rs.getLong("int_mem_total")); + builder.setTotalSwap(rs.getLong("int_swap_total")); + builder.setTotalGpuMemory(rs.getLong("int_gpu_mem_total")); + builder.setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)); + builder.setLockState(LockState.valueOf(SqlUtil.getString(rs, "str_lock_state"))); + builder.setHasComment(rs.getBoolean("b_comment")); + builder.setThreadMode(ThreadMode.values()[rs.getInt("int_thread_mode")]); + builder.setOs(SqlUtil.getString(rs, "str_os")); + + String tags = SqlUtil.getString(rs, "str_tags"); + if (tags != null) + builder.addAllTags(Arrays.asList(tags.split(" "))); + return builder; + } + + public static final RowMapper HOST_MAPPER = new RowMapper() { + public Host mapRow(ResultSet rs, int row) throws SQLException { + Host.Builder builder = mapHostBuilder(rs); + return builder.build(); + } + }; + + public static final RowMapper DEPEND_MAPPER = new RowMapper() { + public Depend mapRow(ResultSet rs, int rowNum) throws SQLException { + return Depend.newBuilder().setId(SqlUtil.getString(rs, "pk_depend")) + .setActive(rs.getBoolean("b_active")).setAnyFrame(rs.getBoolean("b_any")) + .setDependErFrame(SqlUtil.getString(rs, "depend_er_frame")) + .setDependErLayer(SqlUtil.getString(rs, "depend_er_layer")) + .setDependErJob(SqlUtil.getString(rs, "depend_er_job")) + .setDependOnFrame(SqlUtil.getString(rs, "depend_on_frame")) + .setDependOnLayer(SqlUtil.getString(rs, "depend_on_layer")) + .setDependOnJob(SqlUtil.getString(rs, "depend_on_job")) + .setType(DependType.valueOf(SqlUtil.getString(rs, "str_type"))) + .setTarget(DependTarget.valueOf(SqlUtil.getString(rs, "str_target"))).build(); + } + }; + + public static final RowMapper ALLOCATION_MAPPER = new RowMapper() { + public Allocation mapRow(ResultSet rs, int rowNum) throws SQLException { + return Allocation.newBuilder().setId(rs.getString("pk_alloc")) + .setName(rs.getString("str_name")).setFacility(rs.getString("facility_name")) + .setTag(rs.getString("str_tag")).setBillable(rs.getBoolean("b_billable")) + .setStats(AllocationStats.newBuilder() + .setCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) + .setAvailableCores(Convert.coreUnitsToCores(rs.getInt("int_available_cores"))) + .setIdleCores(Convert.coreUnitsToCores(rs.getInt("int_idle_cores"))) + .setRunningCores(Convert.coreUnitsToCores(rs.getInt("int_running_cores"))) + .setLockedCores(Convert.coreUnitsToCores(rs.getInt("int_locked_cores"))) + .setGpus(rs.getInt("int_gpus")).setAvailableGpus(rs.getInt("int_available_gpus")) + .setIdleGpus(rs.getInt("int_idle_gpus")).setRunningGpus(rs.getInt("int_running_gpus")) + .setLockedGpus(rs.getInt("int_locked_gpus")).setHosts(rs.getInt("int_hosts")) + .setDownHosts(rs.getInt("int_down_hosts")) + .setLockedHosts(rs.getInt("int_locked_hosts")).build()) + .build(); + } + }; + + private static final RowMapper GROUP_MAPPER = new RowMapper() { + + public Group mapRow(ResultSet rs, int rowNum) throws SQLException { + GroupStats stats = GroupStats.newBuilder().setDeadFrames(rs.getInt("int_dead_count")) + .setRunningFrames(rs.getInt("int_running_count")) + .setWaitingFrames(rs.getInt("int_waiting_count")) + .setDependFrames(rs.getInt("int_depend_count")).setPendingJobs(rs.getInt("int_job_count")) + .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) + .setReservedGpus(rs.getInt("int_gpus")).build(); + return Group.newBuilder().setId(SqlUtil.getString(rs, "pk_folder")) + .setName(SqlUtil.getString(rs, "group_name")) + .setDepartment(SqlUtil.getString(rs, "str_dept")) + .setDefaultJobPriority(rs.getInt("int_job_priority")) + .setDefaultJobMinCores(Convert.coreUnitsToCores(rs.getInt("int_job_min_cores"))) + .setDefaultJobMaxCores(Convert.coreUnitsToCores(rs.getInt("int_job_max_cores"))) + .setDefaultJobMinGpus(rs.getInt("int_job_min_gpus")) + .setDefaultJobMaxGpus(rs.getInt("int_job_max_gpus")) + .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_max_cores"))) + .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) + .setMaxGpus(rs.getInt("int_max_gpus")).setMinGpus(rs.getInt("int_min_gpus")) + .setLevel(rs.getInt("int_level")).setParentId(SqlUtil.getString(rs, "pk_parent_folder")) + .setGroupStats(stats).build(); + } + }; + + public static final RowMapper JOB_MAPPER = new RowMapper() { + public Job mapRow(ResultSet rs, int rowNum) throws SQLException { + Job.Builder jobBuilder = Job.newBuilder().setId(SqlUtil.getString(rs, "pk_job")) + .setLogDir(SqlUtil.getString(rs, "str_log_dir")) + .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_max_cores"))) + .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) + .setMaxGpus(rs.getInt("int_max_gpus")).setMinGpus(rs.getInt("int_min_gpus")) + .setName(SqlUtil.getString(rs, "str_name")).setPriority(rs.getInt("int_priority")) + .setShot(SqlUtil.getString(rs, "str_shot")).setShow(SqlUtil.getString(rs, "str_show")) + .setFacility(SqlUtil.getString(rs, "facility_name")) + .setGroup(SqlUtil.getString(rs, "group_name")) + .setState(JobState.valueOf(SqlUtil.getString(rs, "str_state"))) + .setUser(SqlUtil.getString(rs, "str_user")).setIsPaused(rs.getBoolean("b_paused")) + .setHasComment(rs.getBoolean("b_comment")).setAutoEat(rs.getBoolean("b_autoeat")) + .setStartTime((int) (rs.getTimestamp("ts_started").getTime() / 1000)) + .setOs(SqlUtil.getString(rs, "str_os")); + + int uid = rs.getInt("int_uid"); + if (!rs.wasNull()) { + jobBuilder.setUid(uid); + } + + Timestamp ts = rs.getTimestamp("ts_stopped"); + if (ts != null) { + jobBuilder.setStopTime((int) (ts.getTime() / 1000)); + } else { + jobBuilder.setStopTime(0); + } + + jobBuilder.setJobStats(mapJobStats(rs)); + return jobBuilder.build(); + } + }; + + public static JobStats mapJobStats(ResultSet rs) throws SQLException { + + JobStats.Builder statsBuilder = JobStats.newBuilder() + .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) + .setReservedGpus(rs.getInt("int_gpus")).setMaxRss(rs.getLong("int_max_rss")) + .setTotalFrames(rs.getInt("int_frame_count")).setTotalLayers(rs.getInt("int_layer_count")) + .setWaitingFrames(rs.getInt("int_waiting_count")) + .setRunningFrames(rs.getInt("int_running_count")).setDeadFrames(rs.getInt("int_dead_count")) + .setSucceededFrames(rs.getInt("int_succeeded_count")) + .setEatenFrames(rs.getInt("int_eaten_count")).setDependFrames(rs.getInt("int_depend_count")) + .setPendingFrames(rs.getInt("int_waiting_count") + rs.getInt("int_depend_count")) + .setFailedCoreSec(rs.getLong("int_core_time_fail")) + .setRenderedCoreSec(rs.getLong("int_core_time_success")) + .setTotalCoreSec(rs.getLong("int_core_time_fail") + rs.getLong("int_core_time_success")) + .setFailedGpuSec(rs.getLong("int_gpu_time_fail")) + .setRenderedGpuSec(rs.getLong("int_gpu_time_success")) + .setTotalGpuSec(rs.getLong("int_gpu_time_fail") + rs.getLong("int_gpu_time_success")) + .setRenderedFrameCount(rs.getLong("int_frame_success_count")) + .setFailedFrameCount(rs.getLong("int_frame_fail_count")) + .setHighFrameSec(rs.getInt("int_clock_time_high")); + + if (statsBuilder.getRenderedFrameCount() > 0) { + statsBuilder.setAvgCoreSec( + (int) (rs.getLong("int_clock_time_success") / statsBuilder.getRenderedFrameCount())); + statsBuilder.setAvgCoreSec( + (int) (statsBuilder.getRenderedCoreSec() / statsBuilder.getRenderedFrameCount())); + statsBuilder.setRemainingCoreSec( + (long) statsBuilder.getPendingFrames() * statsBuilder.getAvgCoreSec()); + } else { + statsBuilder.setAvgFrameSec(0); + statsBuilder.setAvgCoreSec(0); + statsBuilder.setRemainingCoreSec(0); + } + return statsBuilder.build(); + } + + public static final RowMapper LAYER_MAPPER = new RowMapper() { + public Layer mapRow(ResultSet rs, int rowNum) throws SQLException { + Layer.Builder builder = Layer.newBuilder().setId(SqlUtil.getString(rs, "pk_layer")) + .setParentId(SqlUtil.getString(rs, "pk_job")).setChunkSize(rs.getInt("int_chunk_size")) + .setDispatchOrder(rs.getInt("int_dispatch_order")) + .setName(SqlUtil.getString(rs, "str_name")).setCommand(SqlUtil.getString(rs, "str_cmd")) + .setRange(SqlUtil.getString(rs, "str_range")) + .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_cores_min"))) + .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_cores_max"))) + .setIsThreadable(rs.getBoolean("b_threadable")).setMinMemory(rs.getLong("int_mem_min")) + .setMinGpus(rs.getInt("int_gpus_min")).setMaxGpus(rs.getInt("int_gpus_max")) + .setMinGpuMemory(rs.getLong("int_gpu_mem_min")) + .setType(LayerType.valueOf(SqlUtil.getString(rs, "str_type"))) + .addAllTags( + Sets.newHashSet(SqlUtil.getString(rs, "str_tags").replaceAll(" ", "").split("\\|"))) + .addAllServices(Arrays.asList(SqlUtil.getString(rs, "str_services").split(","))) + .addAllLimits(Arrays.asList(SqlUtil.getString(rs, "str_limit_names").split(","))) + .setMemoryOptimizerEnabled(rs.getBoolean("b_optimize")) + .setTimeout(rs.getInt("int_timeout")).setTimeoutLlu(rs.getInt("int_timeout_llu")); + + LayerStats.Builder statsBuilder = LayerStats.newBuilder() + .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) + .setReservedGpus(rs.getInt("int_gpus")).setMaxRss(rs.getLong("int_max_rss")) + .setTotalFrames(rs.getInt("int_total_count")) + .setWaitingFrames(rs.getInt("int_waiting_count")) + .setRunningFrames(rs.getInt("int_running_count")) + .setDeadFrames(rs.getInt("int_dead_count")) + .setSucceededFrames(rs.getInt("int_succeeded_count")) + .setEatenFrames(rs.getInt("int_eaten_count")) + .setDependFrames(rs.getInt("int_depend_count")) + .setPendingFrames(rs.getInt("int_waiting_count") + rs.getInt("int_depend_count")) + .setFailedCoreSec(rs.getLong("int_core_time_fail")) + .setRenderedCoreSec(rs.getLong("int_core_time_success")) + .setTotalCoreSec(rs.getLong("int_core_time_fail") + rs.getLong("int_core_time_success")) + .setFailedGpuSec(rs.getLong("int_gpu_time_fail")) + .setRenderedGpuSec(rs.getLong("int_gpu_time_success")) + .setTotalGpuSec(rs.getLong("int_gpu_time_fail") + rs.getLong("int_gpu_time_success")) + .setRenderedFrameCount(rs.getLong("int_frame_success_count")) + .setFailedFrameCount(rs.getLong("int_frame_fail_count")) + .setHighFrameSec(rs.getInt("int_clock_time_high")) + .setLowFrameSec(rs.getInt("int_clock_time_low")); + + if (statsBuilder.getRenderedFrameCount() > 0) { + statsBuilder.setAvgFrameSec( + (int) (rs.getLong("int_clock_time_success") / statsBuilder.getRenderedFrameCount())); + statsBuilder.setAvgCoreSec( + (int) (statsBuilder.getRenderedCoreSec() / statsBuilder.getRenderedFrameCount())); + statsBuilder.setRemainingCoreSec( + (long) statsBuilder.getPendingFrames() * statsBuilder.getAvgCoreSec()); + } else { + statsBuilder.setAvgFrameSec(0); + statsBuilder.setAvgCoreSec(0); + statsBuilder.setRemainingCoreSec(0); + } + builder.setLayerStats(statsBuilder.build()); + return builder.build(); + } + }; + + private static final RowMapper LIMIT_NAME_MAPPER = new RowMapper() { + public String mapRow(ResultSet rs, int rowNum) throws SQLException { + return rs.getString("str_name"); + } + }; + + public static final RowMapper SUBSCRIPTION_MAPPER = new RowMapper() { + public Subscription mapRow(ResultSet rs, int rowNum) throws SQLException { + return Subscription.newBuilder().setId(SqlUtil.getString(rs, "pk_subscription")) + .setBurst(rs.getInt("int_burst")).setName(rs.getString("name")) + .setReservedCores(rs.getInt("int_cores")).setReservedGpus(rs.getInt("int_gpus")) + .setSize(rs.getInt("int_size")).setAllocationName(rs.getString("alloc_name")) + .setShowName(rs.getString("show_name")).setFacility(rs.getString("facility_name")) + .build(); + } + }; + + public static final RowMapper UPDATED_FRAME_MAPPER = new RowMapper() { + public UpdatedFrame mapRow(ResultSet rs, int rowNum) throws SQLException { + UpdatedFrame.Builder builder = UpdatedFrame.newBuilder() + .setId(SqlUtil.getString(rs, "pk_frame")).setExitStatus(rs.getInt("int_exit_status")) + .setMaxRss(rs.getInt("int_mem_max_used")).setRetryCount(rs.getInt("int_retries")) + .setState(FrameState.valueOf(SqlUtil.getString(rs, "str_state"))) + .setUsedMemory(rs.getInt("int_mem_used")); + + if (SqlUtil.getString(rs, "str_host") != null) { + builder.setLastResource( + String.format(Locale.ROOT, "%s/%2.2f/%d", SqlUtil.getString(rs, "str_host"), + Convert.coreUnitsToCores(rs.getInt("int_cores")), rs.getInt("int_gpus"))); + } else { + builder.setLastResource(""); + } + + java.sql.Timestamp ts_started = rs.getTimestamp("ts_started"); + if (ts_started != null) { + builder.setStartTime((int) (rs.getTimestamp("ts_started").getTime() / 1000)); + } else { + builder.setStartTime(0); + } + java.sql.Timestamp ts_stopped = rs.getTimestamp("ts_stopped"); + if (ts_stopped != null) { + builder.setStopTime((int) (ts_stopped.getTime() / 1000)); + } else { + builder.setStopTime(0); + } + + if (rs.getString("pk_frame_override") != null) { + String[] rgb = rs.getString("str_rgb").split(","); + FrameStateDisplayOverride override = FrameStateDisplayOverride.newBuilder() + .setState(FrameState.valueOf(rs.getString("str_frame_state"))) + .setText(rs.getString("str_override_text")) + .setColor(FrameStateDisplayOverride.RGB.newBuilder().setRed(Integer.parseInt(rgb[0])) + .setGreen(Integer.parseInt(rgb[1])).setBlue(Integer.parseInt(rgb[2])).build()) + .build(); + builder.setFrameStateDisplayOverride(override); + } + + return builder.build(); + } + }; + + public static final RowMapper FRAME_MAPPER = new RowMapper() { + public Frame mapRow(ResultSet rs, int rowNum) throws SQLException { + Frame.Builder builder = Frame.newBuilder().setId(SqlUtil.getString(rs, "pk_frame")) + .setName(SqlUtil.getString(rs, "str_name")).setExitStatus(rs.getInt("int_exit_status")) + .setMaxRss(rs.getLong("int_mem_max_used")).setNumber(rs.getInt("int_number")) + .setDispatchOrder(rs.getInt("int_dispatch_order")).setRetryCount(rs.getInt("int_retries")) + .setState(FrameState.valueOf(SqlUtil.getString(rs, "str_state"))) + .setLayerName(SqlUtil.getString(rs, "layer_name")) + .setUsedMemory(rs.getLong("int_mem_used")) + .setReservedMemory(rs.getLong("int_mem_reserved")) + .setReservedGpuMemory(rs.getLong("int_gpu_mem_reserved")) + .setCheckpointState( + CheckpointState.valueOf(SqlUtil.getString(rs, "str_checkpoint_state"))) + .setCheckpointCount(rs.getInt("int_checkpoint_count")); + + if (SqlUtil.getString(rs, "str_host") != null) { + builder.setLastResource(CueUtil.buildProcName(SqlUtil.getString(rs, "str_host"), + rs.getInt("int_cores"), rs.getInt("int_gpus"))); + } else { + builder.setLastResource(""); + } + + java.sql.Timestamp ts_started = rs.getTimestamp("ts_started"); + if (ts_started != null) { + builder.setStartTime((int) (rs.getTimestamp("ts_started").getTime() / 1000)); + } else { + builder.setStartTime(0); + } + java.sql.Timestamp ts_stopped = rs.getTimestamp("ts_stopped"); + if (ts_stopped != null) { + builder.setStopTime((int) (ts_stopped.getTime() / 1000)); + } else { + builder.setStopTime(0); + } + java.sql.Timestamp ts_llu = rs.getTimestamp("ts_llu"); + if (ts_llu != null) { + builder.setLluTime((int) (ts_llu.getTime() / 1000)); + } else { + builder.setLluTime(0); + } + + builder.setTotalCoreTime(rs.getInt("int_total_past_core_time")); + builder.setTotalGpuTime(rs.getInt("int_total_past_gpu_time")); + if (builder.getState() == FrameState.RUNNING) { + builder.setTotalCoreTime(builder.getTotalCoreTime() + + (int) (System.currentTimeMillis() / 1000 - builder.getStartTime()) + * rs.getInt("int_cores") / 100); + builder.setTotalGpuTime(builder.getTotalGpuTime() + + (int) (System.currentTimeMillis() / 1000 - builder.getStartTime()) + * rs.getInt("int_gpus")); + } + + if (rs.getString("pk_frame_override") != null) { + String[] rgb = rs.getString("str_rgb").split(","); + FrameStateDisplayOverride override = FrameStateDisplayOverride.newBuilder() + .setState(FrameState.valueOf(rs.getString("str_frame_state"))) + .setText(rs.getString("str_override_text")) + .setColor(FrameStateDisplayOverride.RGB.newBuilder().setRed(Integer.parseInt(rgb[0])) + .setGreen(Integer.parseInt(rgb[1])).setBlue(Integer.parseInt(rgb[2])).build()) + .build(); + builder.setFrameStateDisplayOverride(override); + } + + return builder.build(); + } + }; + + private static final RowMapper SERVICE_MAPPER = new RowMapper() { + public Service mapRow(ResultSet rs, int rowNum) throws SQLException { + return Service.newBuilder().setId(SqlUtil.getString(rs, "pk_service")) + .setName(SqlUtil.getString(rs, "str_name")).setThreadable(rs.getBoolean("b_threadable")) + .setMinCores(rs.getInt("int_cores_min")).setMaxCores(rs.getInt("int_cores_max")) + .setMinMemory(rs.getInt("int_mem_min")).setMinGpus(rs.getInt("int_gpus_min")) + .setMaxGpus(rs.getInt("int_gpus_max")).setMinGpuMemory(rs.getInt("int_gpu_mem_min")) + .addAllTags( + Lists.newArrayList(ServiceDaoJdbc.splitTags(SqlUtil.getString(rs, "str_tags")))) + .setTimeout(rs.getInt("int_timeout")).setTimeoutLlu(rs.getInt("int_timeout_llu")) + .setMinMemoryIncrease(rs.getInt("int_min_memory_increase")).build(); + } + }; + + private static final RowMapper SERVICE_OVERRIDE_MAPPER = + new RowMapper() { + public ServiceOverride mapRow(ResultSet rs, int rowNum) throws SQLException { + Service data = Service.newBuilder().setId(SqlUtil.getString(rs, "pk_show_service")) + .setName(SqlUtil.getString(rs, "str_name")) + .setThreadable(rs.getBoolean("b_threadable")).setMinCores(rs.getInt("int_cores_min")) + .setMaxCores(rs.getInt("int_cores_max")).setMinMemory(rs.getInt("int_mem_min")) + .setMinGpus(rs.getInt("int_gpus_min")).setMaxGpus(rs.getInt("int_gpus_max")) + .setMinGpuMemory(rs.getInt("int_gpu_mem_min")) + .addAllTags( + Lists.newArrayList(ServiceDaoJdbc.splitTags(SqlUtil.getString(rs, "str_tags")))) + .setTimeout(rs.getInt("int_timeout")).setTimeoutLlu(rs.getInt("int_timeout_llu")) + .setMinMemoryIncrease(rs.getInt("int_min_memory_increase")).build(); + return ServiceOverride.newBuilder().setId(SqlUtil.getString(rs, "pk_show_service")) + .setData(data).build(); } - }; - - public static final RowMapper DEPARTMENT_MAPPER = - new RowMapper() { - public Department mapRow(ResultSet rs, int row) throws SQLException { - return Department.newBuilder() - .setId(SqlUtil.getString(rs,"pk_point")) - .setName(SqlUtil.getString(rs,"str_name")) - .setDept(SqlUtil.getString(rs,"str_dept")) - .setTiManaged(rs.getBoolean("b_managed")) - .setTiTask(SqlUtil.getString(rs,"str_ti_task")) - .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) - .build(); - } - }; - - public static final RowMapper PROC_MAPPER = - new RowMapper() { - public Proc mapRow(ResultSet rs, int row) throws SQLException { - return Proc.newBuilder() - .setId(SqlUtil.getString(rs,"pk_proc")) - .setName(CueUtil.buildProcName(SqlUtil.getString(rs,"host_name"), - rs.getInt("int_cores_reserved"), rs.getInt("int_gpus_reserved"))) - .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores_reserved"))) - .setReservedMemory(rs.getLong("int_mem_reserved")) - .setReservedGpus(rs.getInt("int_gpus_reserved")) - .setReservedGpuMemory(rs.getLong("int_gpu_mem_reserved")) - .setUsedMemory(rs.getLong("int_mem_used")) - .setUsedGpuMemory(rs.getLong("int_gpu_mem_used")) - .setFrameName(SqlUtil.getString(rs, "frame_name")) - .setJobName(SqlUtil.getString(rs,"job_name")) - .setGroupName(SqlUtil.getString(rs,"folder_name")) - .setShowName(SqlUtil.getString(rs,"show_name")) - .setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)) - .setBookedTime((int) (rs.getTimestamp("ts_booked").getTime() / 1000)) - .setDispatchTime((int) (rs.getTimestamp("ts_dispatched").getTime() / 1000)) - .setUnbooked(rs.getBoolean("b_unbooked")) - .setLogPath(String.format("%s/%s.%s.rqlog", - SqlUtil.getString(rs,"str_log_dir"), SqlUtil.getString(rs,"job_name"), - SqlUtil.getString(rs,"frame_name"))) - .setRedirectTarget(SqlUtil.getString(rs, "str_redirect")) - .setChildProcesses(SqlUtil.getByteString(rs, "bytea_children")) - .addAllServices(Arrays.asList(SqlUtil.getString(rs,"str_services").split(","))) - .build(); - } - }; - - public static final RowMapper TASK_MAPPER = - new RowMapper() { - public Task mapRow(ResultSet rs, int row) throws SQLException { - return Task.newBuilder() - .setId(SqlUtil.getString(rs,"pk_task")) - .setDept(SqlUtil.getString(rs,"str_dept")) - .setShot(SqlUtil.getString(rs,"str_shot")) - .setMinCores(Convert.coreUnitsToWholeCores(rs.getInt("int_min_cores"))) - .setAdjustCores(Convert.coreUnitsToWholeCores(rs.getInt("int_adjust_cores"))) - .build(); - } - }; - - public static final RowMapper COMMENT_MAPPER = - new RowMapper() { - - public Comment mapRow(ResultSet rs, int row) throws SQLException { - return Comment.newBuilder() - .setId(SqlUtil.getString(rs,"pk_comment")) - .setMessage(SqlUtil.getString(rs,"str_message")) - .setSubject(SqlUtil.getString(rs,"str_subject")) - .setTimestamp((int)(rs.getTimestamp("ts_created").getTime() / 1000)) - .setUser(SqlUtil.getString(rs,"str_user")) - .build(); - } - }; - - - public static NestedHost.Builder mapNestedHostBuilder(ResultSet rs) throws SQLException { - NestedHost.Builder builder = NestedHost.newBuilder() - .setId(SqlUtil.getString(rs,"pk_host")) - .setName(SqlUtil.getString(rs,"host_name")) - .setAllocName(SqlUtil.getString(rs,"alloc_name")) - .setBootTime((int) (rs.getTimestamp("ts_booted").getTime() / 1000)) - .setFreeMcp(rs.getLong("int_mcp_free")) - .setFreeMemory(rs.getLong("int_mem_free")) - .setFreeSwap(rs.getLong("int_swap_free")) - .setFreeGpuMemory(rs.getLong("int_gpu_mem_free")) - .setLoad(rs.getInt("int_load")) - .setNimbyEnabled(rs.getBoolean("b_nimby")) - .setCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) - .setIdleCores(Convert.coreUnitsToCores(rs.getInt("int_cores_idle"))) - .setMemory(rs.getLong("int_mem")) - .setIdleMemory(rs.getLong("int_mem_idle")) - .setGpus(rs.getInt("int_gpus")) - .setIdleGpus(rs.getInt("int_gpus_idle")) - .setGpuMemory(rs.getLong("int_gpu_mem")) - .setIdleGpuMemory(rs.getLong("int_gpu_mem_idle")) - .setState(HardwareState.valueOf(SqlUtil.getString(rs,"host_state"))) - .setTotalMcp(rs.getLong("int_mcp_total")) - .setTotalMemory(rs.getLong("int_mem_total")) - .setTotalSwap(rs.getLong("int_swap_total")) - .setTotalGpuMemory(rs.getLong("int_gpu_mem_total")) - .setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)) - .setLockState(LockState.valueOf(SqlUtil.getString(rs,"str_lock_state"))) - .setHasComment(rs.getBoolean("b_comment")) - .setThreadMode(ThreadMode.values()[rs.getInt("int_thread_mode")]) - .setOs(SqlUtil.getString(rs,"str_os")); - - String tags = SqlUtil.getString(rs,"str_tags"); - if (tags != null) - builder.addAllTags(Arrays.asList(tags.split(" "))); - return builder; - } - - public static Host.Builder mapHostBuilder(ResultSet rs) throws SQLException { - Host.Builder builder = Host.newBuilder(); - builder.setId(SqlUtil.getString(rs,"pk_host")); - builder.setName(SqlUtil.getString(rs,"host_name")); - builder.setAllocName(SqlUtil.getString(rs,"alloc_name")); - builder.setBootTime((int) (rs.getTimestamp("ts_booted").getTime() / 1000)); - builder.setFreeMcp(rs.getLong("int_mcp_free")); - builder.setFreeMemory(rs.getLong("int_mem_free")); - builder.setFreeSwap(rs.getLong("int_swap_free")); - builder.setFreeGpuMemory(rs.getLong("int_gpu_mem_free")); - builder.setLoad(rs.getInt("int_load")); - builder.setNimbyEnabled(rs.getBoolean("b_nimby")); - builder.setCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))); - builder.setIdleCores(Convert.coreUnitsToCores(rs.getInt("int_cores_idle"))); - builder.setMemory(rs.getLong("int_mem")); - builder.setIdleMemory(rs.getLong("int_mem_idle")); - builder.setGpus(rs.getInt("int_gpus")); - builder.setIdleGpus(rs.getInt("int_gpus_idle")); - builder.setGpuMemory(rs.getLong("int_gpu_mem")); - builder.setIdleGpuMemory(rs.getLong("int_gpu_mem_idle")); - builder.setState(HardwareState.valueOf(SqlUtil.getString(rs,"host_state"))); - builder.setTotalMcp(rs.getLong("int_mcp_total")); - builder.setTotalMemory(rs.getLong("int_mem_total")); - builder.setTotalSwap(rs.getLong("int_swap_total")); - builder.setTotalGpuMemory(rs.getLong("int_gpu_mem_total")); - builder.setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)); - builder.setLockState(LockState.valueOf(SqlUtil.getString(rs,"str_lock_state"))); - builder.setHasComment(rs.getBoolean("b_comment")); - builder.setThreadMode(ThreadMode.values()[rs.getInt("int_thread_mode")]); - builder.setOs(SqlUtil.getString(rs,"str_os")); - - String tags = SqlUtil.getString(rs,"str_tags"); - if (tags != null) - builder.addAllTags(Arrays.asList(tags.split(" "))); - return builder; - } - - public static final RowMapper HOST_MAPPER = - new RowMapper() { - public Host mapRow(ResultSet rs, int row) throws SQLException { - Host.Builder builder = mapHostBuilder(rs); - return builder.build(); - } - }; - - public static final RowMapper DEPEND_MAPPER = - new RowMapper() { - public Depend mapRow(ResultSet rs, int rowNum) throws SQLException { - return Depend.newBuilder() - .setId(SqlUtil.getString(rs,"pk_depend")) - .setActive(rs.getBoolean("b_active")) - .setAnyFrame(rs.getBoolean("b_any")) - .setDependErFrame(SqlUtil.getString(rs,"depend_er_frame")) - .setDependErLayer(SqlUtil.getString(rs,"depend_er_layer")) - .setDependErJob(SqlUtil.getString(rs,"depend_er_job")) - .setDependOnFrame(SqlUtil.getString(rs,"depend_on_frame")) - .setDependOnLayer(SqlUtil.getString(rs,"depend_on_layer")) - .setDependOnJob(SqlUtil.getString(rs, "depend_on_job")) - .setType(DependType.valueOf(SqlUtil.getString(rs,"str_type"))) - .setTarget(DependTarget.valueOf(SqlUtil.getString(rs,"str_target"))) - .build(); - } - }; - - public static final RowMapper ALLOCATION_MAPPER = - new RowMapper() { - public Allocation mapRow(ResultSet rs, int rowNum) throws SQLException { - return Allocation.newBuilder() - .setId(rs.getString("pk_alloc")) - .setName(rs.getString("str_name")) - .setFacility(rs.getString("facility_name")) - .setTag(rs.getString("str_tag")) - .setBillable(rs.getBoolean("b_billable")) - .setStats(AllocationStats.newBuilder() - .setCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) - .setAvailableCores(Convert.coreUnitsToCores(rs.getInt("int_available_cores"))) - .setIdleCores(Convert.coreUnitsToCores(rs.getInt("int_idle_cores"))) - .setRunningCores(Convert.coreUnitsToCores(rs.getInt("int_running_cores"))) - .setLockedCores(Convert.coreUnitsToCores(rs.getInt("int_locked_cores"))) - .setGpus(rs.getInt("int_gpus")) - .setAvailableGpus(rs.getInt("int_available_gpus")) - .setIdleGpus(rs.getInt("int_idle_gpus")) - .setRunningGpus(rs.getInt("int_running_gpus")) - .setLockedGpus(rs.getInt("int_locked_gpus")) - .setHosts(rs.getInt("int_hosts")) - .setDownHosts(rs.getInt("int_down_hosts")) - .setLockedHosts(rs.getInt("int_locked_hosts")) - .build()) - .build(); - } - }; - - private static final RowMapper GROUP_MAPPER = - new RowMapper() { - - public Group mapRow(ResultSet rs, int rowNum) throws SQLException { - GroupStats stats = GroupStats.newBuilder() - .setDeadFrames(rs.getInt("int_dead_count")) - .setRunningFrames(rs.getInt("int_running_count")) - .setWaitingFrames(rs.getInt("int_waiting_count")) - .setDependFrames(rs.getInt("int_depend_count")) - .setPendingJobs(rs.getInt("int_job_count")) - .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) - .setReservedGpus(rs.getInt("int_gpus")) - .build(); - return Group.newBuilder() - .setId(SqlUtil.getString(rs,"pk_folder")) - .setName(SqlUtil.getString(rs,"group_name")) - .setDepartment(SqlUtil.getString(rs,"str_dept")) - .setDefaultJobPriority(rs.getInt("int_job_priority")) - .setDefaultJobMinCores(Convert.coreUnitsToCores(rs.getInt("int_job_min_cores"))) - .setDefaultJobMaxCores(Convert.coreUnitsToCores(rs.getInt("int_job_max_cores"))) - .setDefaultJobMinGpus(rs.getInt("int_job_min_gpus")) - .setDefaultJobMaxGpus(rs.getInt("int_job_max_gpus")) - .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_max_cores"))) - .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) - .setMaxGpus(rs.getInt("int_max_gpus")) - .setMinGpus(rs.getInt("int_min_gpus")) - .setLevel(rs.getInt("int_level")) - .setParentId(SqlUtil.getString(rs, "pk_parent_folder")) - .setGroupStats(stats) - .build(); - } - }; - - public static final RowMapper JOB_MAPPER = - new RowMapper() { - public Job mapRow(ResultSet rs, int rowNum) throws SQLException { - Job.Builder jobBuilder = Job.newBuilder() - .setId(SqlUtil.getString(rs, "pk_job")) - .setLogDir(SqlUtil.getString(rs, "str_log_dir")) - .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_max_cores"))) - .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) - .setMaxGpus(rs.getInt("int_max_gpus")) - .setMinGpus(rs.getInt("int_min_gpus")) - .setName(SqlUtil.getString(rs,"str_name")) - .setPriority(rs.getInt("int_priority")) - .setShot(SqlUtil.getString(rs,"str_shot")) - .setShow(SqlUtil.getString(rs,"str_show")) - .setFacility(SqlUtil.getString(rs,"facility_name")) - .setGroup(SqlUtil.getString(rs,"group_name")) - .setState(JobState.valueOf(SqlUtil.getString(rs,"str_state"))) - .setUser(SqlUtil.getString(rs,"str_user")) - .setIsPaused(rs.getBoolean("b_paused")) - .setHasComment(rs.getBoolean("b_comment")) - .setAutoEat(rs.getBoolean("b_autoeat")) - .setStartTime((int) (rs.getTimestamp("ts_started").getTime() / 1000)) - .setOs(SqlUtil.getString(rs,"str_os")); - - int uid = rs.getInt("int_uid"); - if (!rs.wasNull()) { - jobBuilder.setUid(uid); - } - - Timestamp ts = rs.getTimestamp("ts_stopped"); - if (ts != null) { - jobBuilder.setStopTime((int) (ts.getTime() / 1000)); - } - else { - jobBuilder.setStopTime(0); - } - - jobBuilder.setJobStats(mapJobStats(rs)); - return jobBuilder.build(); - } - }; - - public static JobStats mapJobStats(ResultSet rs) throws SQLException { - - JobStats.Builder statsBuilder = JobStats.newBuilder() - .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) - .setReservedGpus(rs.getInt("int_gpus")) - .setMaxRss(rs.getLong("int_max_rss")) - .setTotalFrames(rs.getInt("int_frame_count")) - .setTotalLayers(rs.getInt("int_layer_count")) - .setWaitingFrames(rs.getInt("int_waiting_count")) - .setRunningFrames(rs.getInt("int_running_count")) - .setDeadFrames(rs.getInt("int_dead_count")) - .setSucceededFrames(rs.getInt("int_succeeded_count")) - .setEatenFrames(rs.getInt("int_eaten_count")) - .setDependFrames(rs.getInt("int_depend_count")) - .setPendingFrames(rs.getInt("int_waiting_count") + rs.getInt("int_depend_count")) - .setFailedCoreSec(rs.getLong("int_core_time_fail")) - .setRenderedCoreSec(rs.getLong("int_core_time_success")) - .setTotalCoreSec( rs.getLong("int_core_time_fail") + rs.getLong("int_core_time_success")) - .setFailedGpuSec(rs.getLong("int_gpu_time_fail")) - .setRenderedGpuSec(rs.getLong("int_gpu_time_success")) - .setTotalGpuSec(rs.getLong("int_gpu_time_fail") + rs.getLong("int_gpu_time_success")) - .setRenderedFrameCount( rs.getLong("int_frame_success_count")) - .setFailedFrameCount(rs.getLong("int_frame_fail_count")) - .setHighFrameSec(rs.getInt("int_clock_time_high")); - - if (statsBuilder.getRenderedFrameCount() > 0) { - statsBuilder.setAvgCoreSec( - (int) (rs.getLong("int_clock_time_success") / statsBuilder.getRenderedFrameCount())); - statsBuilder.setAvgCoreSec( - (int) (statsBuilder.getRenderedCoreSec() / statsBuilder.getRenderedFrameCount())); - statsBuilder.setRemainingCoreSec( - (long) statsBuilder.getPendingFrames() * statsBuilder.getAvgCoreSec()); - } - else { - statsBuilder.setAvgFrameSec(0); - statsBuilder.setAvgCoreSec(0); - statsBuilder.setRemainingCoreSec(0); - } - return statsBuilder.build(); - } - - public static final RowMapper LAYER_MAPPER = - new RowMapper() { - public Layer mapRow(ResultSet rs, int rowNum) throws SQLException { - Layer.Builder builder = Layer.newBuilder() - .setId(SqlUtil.getString(rs,"pk_layer")) - .setParentId(SqlUtil.getString(rs,"pk_job")) - .setChunkSize(rs.getInt("int_chunk_size")) - .setDispatchOrder(rs.getInt("int_dispatch_order")) - .setName(SqlUtil.getString(rs,"str_name")) - .setCommand(SqlUtil.getString(rs,"str_cmd")) - .setRange(SqlUtil.getString(rs,"str_range")) - .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_cores_min"))) - .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_cores_max"))) - .setIsThreadable(rs.getBoolean("b_threadable")) - .setMinMemory(rs.getLong("int_mem_min")) - .setMinGpus(rs.getInt("int_gpus_min")) - .setMaxGpus(rs.getInt("int_gpus_max")) - .setMinGpuMemory(rs.getLong("int_gpu_mem_min")) - .setType(LayerType.valueOf(SqlUtil.getString(rs,"str_type"))) - .addAllTags(Sets.newHashSet( - SqlUtil.getString(rs,"str_tags"). - replaceAll(" ","").split("\\|"))) - .addAllServices(Arrays.asList(SqlUtil.getString(rs,"str_services").split(","))) - .addAllLimits(Arrays.asList(SqlUtil.getString(rs,"str_limit_names").split(","))) - .setMemoryOptimizerEnabled(rs.getBoolean("b_optimize")) - .setTimeout(rs.getInt("int_timeout")) - .setTimeoutLlu(rs.getInt("int_timeout_llu")); - - LayerStats.Builder statsBuilder = LayerStats.newBuilder() - .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) - .setReservedGpus(rs.getInt("int_gpus")) - .setMaxRss(rs.getLong("int_max_rss")) - .setTotalFrames(rs.getInt("int_total_count")) - .setWaitingFrames(rs.getInt("int_waiting_count")) - .setRunningFrames(rs.getInt("int_running_count")) - .setDeadFrames(rs.getInt("int_dead_count")) - .setSucceededFrames(rs.getInt("int_succeeded_count")) - .setEatenFrames(rs.getInt("int_eaten_count")) - .setDependFrames(rs.getInt("int_depend_count")) - .setPendingFrames( - rs.getInt("int_waiting_count") + rs.getInt("int_depend_count")) - .setFailedCoreSec(rs.getLong("int_core_time_fail")) - .setRenderedCoreSec(rs.getLong("int_core_time_success")) - .setTotalCoreSec( - rs.getLong("int_core_time_fail") + rs.getLong("int_core_time_success")) - .setFailedGpuSec(rs.getLong("int_gpu_time_fail")) - .setRenderedGpuSec(rs.getLong("int_gpu_time_success")) - .setTotalGpuSec(rs.getLong("int_gpu_time_fail") + rs.getLong("int_gpu_time_success")) - .setRenderedFrameCount( rs.getLong("int_frame_success_count")) - .setFailedFrameCount(rs.getLong("int_frame_fail_count")) - .setHighFrameSec(rs.getInt("int_clock_time_high")) - .setLowFrameSec(rs.getInt("int_clock_time_low")); - - if (statsBuilder.getRenderedFrameCount() > 0) { - statsBuilder.setAvgFrameSec( - (int) (rs.getLong("int_clock_time_success") / statsBuilder.getRenderedFrameCount())); - statsBuilder.setAvgCoreSec( - (int) (statsBuilder.getRenderedCoreSec() / statsBuilder.getRenderedFrameCount())); - statsBuilder.setRemainingCoreSec( - (long) statsBuilder.getPendingFrames() * statsBuilder.getAvgCoreSec()); - } - else { - statsBuilder.setAvgFrameSec(0); - statsBuilder.setAvgCoreSec(0); - statsBuilder.setRemainingCoreSec(0); - } - builder.setLayerStats(statsBuilder.build()); - return builder.build(); - } - }; - - private static final RowMapper LIMIT_NAME_MAPPER = - new RowMapper() { - public String mapRow(ResultSet rs, int rowNum) throws SQLException { - return rs.getString("str_name"); - } - }; - - public static final RowMapper SUBSCRIPTION_MAPPER = - new RowMapper() { - public Subscription mapRow(ResultSet rs, int rowNum) throws SQLException { - return Subscription.newBuilder() - .setId(SqlUtil.getString(rs, "pk_subscription")) - .setBurst(rs.getInt("int_burst")) - .setName(rs.getString("name")) - .setReservedCores(rs.getInt("int_cores")) - .setReservedGpus(rs.getInt("int_gpus")) - .setSize(rs.getInt("int_size")) - .setAllocationName(rs.getString("alloc_name")) - .setShowName(rs.getString("show_name")) - .setFacility(rs.getString("facility_name")) - .build(); - } - }; - - public static final RowMapper UPDATED_FRAME_MAPPER = - new RowMapper() { - public UpdatedFrame mapRow(ResultSet rs, int rowNum) throws SQLException { - UpdatedFrame.Builder builder = UpdatedFrame.newBuilder() - .setId(SqlUtil.getString(rs, "pk_frame")) - .setExitStatus(rs.getInt("int_exit_status")) - .setMaxRss(rs.getInt("int_mem_max_used")) - .setRetryCount(rs.getInt("int_retries")) - .setState(FrameState.valueOf(SqlUtil.getString(rs, "str_state"))) - .setUsedMemory(rs.getInt("int_mem_used")); - - if (SqlUtil.getString(rs, "str_host") != null) { - builder.setLastResource(String.format(Locale.ROOT, "%s/%2.2f/%d", - SqlUtil.getString(rs, "str_host"), - Convert.coreUnitsToCores(rs.getInt("int_cores")), - rs.getInt("int_gpus"))); - } else { - builder.setLastResource(""); - } - - java.sql.Timestamp ts_started = rs.getTimestamp("ts_started"); - if (ts_started != null) { - builder.setStartTime((int) (rs.getTimestamp("ts_started").getTime() / 1000)); - } else { - builder.setStartTime(0); - } - java.sql.Timestamp ts_stopped = rs.getTimestamp("ts_stopped"); - if (ts_stopped != null) { - builder.setStopTime((int) (ts_stopped.getTime() / 1000)); - } else { - builder.setStopTime(0); - } - - if (rs.getString("pk_frame_override") != null){ - String[] rgb = rs.getString("str_rgb").split(","); - FrameStateDisplayOverride override = FrameStateDisplayOverride.newBuilder() - .setState(FrameState.valueOf(rs.getString("str_frame_state"))) - .setText(rs.getString("str_override_text")) - .setColor(FrameStateDisplayOverride.RGB.newBuilder() - .setRed(Integer.parseInt(rgb[0])) - .setGreen(Integer.parseInt(rgb[1])) - .setBlue(Integer.parseInt(rgb[2])) - .build() - ) - .build(); - builder.setFrameStateDisplayOverride(override); - } - - return builder.build(); - } - }; - - public static final RowMapper FRAME_MAPPER = - new RowMapper() { - public Frame mapRow(ResultSet rs, int rowNum) throws SQLException { - Frame.Builder builder = Frame.newBuilder() - .setId(SqlUtil.getString(rs,"pk_frame")) - .setName(SqlUtil.getString(rs,"str_name")) - .setExitStatus(rs.getInt("int_exit_status")) - .setMaxRss(rs.getLong("int_mem_max_used")) - .setNumber(rs.getInt("int_number")) - .setDispatchOrder(rs.getInt("int_dispatch_order")) - .setRetryCount(rs.getInt("int_retries")) - .setState(FrameState.valueOf(SqlUtil.getString(rs,"str_state"))) - .setLayerName(SqlUtil.getString(rs,"layer_name")) - .setUsedMemory(rs.getLong("int_mem_used")) - .setReservedMemory(rs.getLong("int_mem_reserved")) - .setReservedGpuMemory(rs.getLong("int_gpu_mem_reserved")) - .setCheckpointState(CheckpointState.valueOf( - SqlUtil.getString(rs,"str_checkpoint_state"))) - .setCheckpointCount(rs.getInt("int_checkpoint_count")); - - if (SqlUtil.getString(rs,"str_host") != null) { - builder.setLastResource(CueUtil.buildProcName(SqlUtil.getString(rs,"str_host"), - rs.getInt("int_cores"), rs.getInt("int_gpus"))); - } else { - builder.setLastResource(""); - } - - java.sql.Timestamp ts_started = rs.getTimestamp("ts_started"); - if (ts_started != null) { - builder.setStartTime((int) (rs.getTimestamp("ts_started").getTime() / 1000)); - } - else { - builder.setStartTime(0); - } - java.sql.Timestamp ts_stopped = rs.getTimestamp("ts_stopped"); - if (ts_stopped!= null) { - builder.setStopTime((int) (ts_stopped.getTime() / 1000)); - } - else { - builder.setStopTime(0); - } - java.sql.Timestamp ts_llu = rs.getTimestamp("ts_llu"); - if (ts_llu!= null) { - builder.setLluTime((int) (ts_llu.getTime() / 1000)); - } - else { - builder.setLluTime(0); - } - - builder.setTotalCoreTime(rs.getInt("int_total_past_core_time")); - builder.setTotalGpuTime(rs.getInt("int_total_past_gpu_time")); - if (builder.getState() == FrameState.RUNNING) { - builder.setTotalCoreTime(builder.getTotalCoreTime() + - (int)(System.currentTimeMillis() / 1000 - builder.getStartTime()) * rs.getInt("int_cores") / 100); - builder.setTotalGpuTime(builder.getTotalGpuTime() + - (int)(System.currentTimeMillis() / 1000 - builder.getStartTime()) * rs.getInt("int_gpus")); - } - - if (rs.getString("pk_frame_override") != null){ - String[] rgb = rs.getString("str_rgb").split(","); - FrameStateDisplayOverride override = FrameStateDisplayOverride.newBuilder() - .setState(FrameState.valueOf(rs.getString("str_frame_state"))) - .setText(rs.getString("str_override_text")) - .setColor(FrameStateDisplayOverride.RGB.newBuilder() - .setRed(Integer.parseInt(rgb[0])) - .setGreen(Integer.parseInt(rgb[1])) - .setBlue(Integer.parseInt(rgb[2])) - .build() - ) - .build(); - builder.setFrameStateDisplayOverride(override); - } - - return builder.build(); - } - }; - - private static final RowMapper SERVICE_MAPPER = - new RowMapper() { - public Service mapRow(ResultSet rs, int rowNum) throws SQLException { - return Service.newBuilder() - .setId(SqlUtil.getString(rs,"pk_service")) - .setName(SqlUtil.getString(rs,"str_name")) - .setThreadable(rs.getBoolean("b_threadable")) - .setMinCores(rs.getInt("int_cores_min")) - .setMaxCores(rs.getInt("int_cores_max")) - .setMinMemory(rs.getInt("int_mem_min")) - .setMinGpus(rs.getInt("int_gpus_min")) - .setMaxGpus(rs.getInt("int_gpus_max")) - .setMinGpuMemory(rs.getInt("int_gpu_mem_min")) - .addAllTags(Lists.newArrayList(ServiceDaoJdbc.splitTags( - SqlUtil.getString(rs,"str_tags")))) - .setTimeout(rs.getInt("int_timeout")) - .setTimeoutLlu(rs.getInt("int_timeout_llu")) - .setMinMemoryIncrease(rs.getInt("int_min_memory_increase")) - .build(); - } - }; - - private static final RowMapper SERVICE_OVERRIDE_MAPPER = - new RowMapper() { - public ServiceOverride mapRow(ResultSet rs, int rowNum) throws SQLException { - Service data = Service.newBuilder() - .setId(SqlUtil.getString(rs,"pk_show_service")) - .setName(SqlUtil.getString(rs,"str_name")) - .setThreadable(rs.getBoolean("b_threadable")) - .setMinCores(rs.getInt("int_cores_min")) - .setMaxCores(rs.getInt("int_cores_max")) - .setMinMemory(rs.getInt("int_mem_min")) - .setMinGpus(rs.getInt("int_gpus_min")) - .setMaxGpus(rs.getInt("int_gpus_max")) - .setMinGpuMemory(rs.getInt("int_gpu_mem_min")) - .addAllTags(Lists.newArrayList(ServiceDaoJdbc.splitTags( - SqlUtil.getString(rs,"str_tags")))) - .setTimeout(rs.getInt("int_timeout")) - .setTimeoutLlu(rs.getInt("int_timeout_llu")) - .setMinMemoryIncrease(rs.getInt("int_min_memory_increase")) - .build(); - return ServiceOverride.newBuilder() - .setId(SqlUtil.getString(rs,"pk_show_service")) - .setData(data) - .build(); - } - }; - - public static final RowMapper SHOW_MAPPER = - new RowMapper() { - public Show mapRow(ResultSet rs, int rowNum) throws SQLException { - ShowStats stats = ShowStats.newBuilder() - .setPendingFrames(rs.getInt("int_pending_count")) - .setRunningFrames(rs.getInt("int_running_count")) - .setDeadFrames(rs.getInt("int_dead_count")) - .setCreatedFrameCount(rs.getLong("int_frame_insert_count")) - .setCreatedJobCount(rs.getLong("int_job_insert_count")) - .setRenderedFrameCount(rs.getLong("int_frame_success_count")) - .setFailedFrameCount(rs.getLong("int_frame_fail_count")) - .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) - .setReservedGpus(rs.getInt("int_gpus")) - .setPendingJobs(rs.getInt("int_job_count")) - .build(); - return Show.newBuilder() - .setId(SqlUtil.getString(rs,"pk_show")) - .setName(SqlUtil.getString(rs,"str_name")) - .setActive(rs.getBoolean("b_active")) - .setDefaultMaxCores(Convert.coreUnitsToCores(rs.getInt("int_default_max_cores"))) - .setDefaultMinCores(Convert.coreUnitsToCores(rs.getInt("int_default_min_cores"))) - .setDefaultMaxGpus(rs.getInt("int_default_max_gpus")) - .setDefaultMinGpus(rs.getInt("int_default_min_gpus")) - .setBookingEnabled(rs.getBoolean("b_booking_enabled")) - .setDispatchEnabled(rs.getBoolean("b_dispatch_enabled")) - .setCommentEmail(SqlUtil.getString(rs,"str_comment_email")) - .setShowStats(stats) - .build(); - } - }; - /* - * Queries - */ - - private static final String GET_JOB_NAMES = - "SELECT " + - "job.str_name "+ - "FROM " + - "job," + - "show " + - "WHERE " + - "job.pk_show = show.pk_show " + - "AND " + - "job.str_state = 'PENDING' "; - - private static final String GET_HOST_COMMENTS = - "SELECT " + - "* " + - "FROM " + - "comments " + - "WHERE " + - "pk_host=? " + - "ORDER BY " + - "ts_created ASC"; - - private static final String GET_FILTER = - "SELECT " + - "filter.* " + - "FROM " + - "filter," + - "show " + - "WHERE " + - "filter.pk_show = show.pk_show"; - - private static final String GET_FRAME = - "SELECT " + - "frame.pk_frame, " + - "frame.int_exit_status,"+ - "frame.str_name,"+ - "frame.int_number,"+ - "frame.int_dispatch_order,"+ - "frame.ts_started,"+ - "frame.ts_stopped,"+ - "frame.ts_llu,"+ - "frame.int_retries,"+ - "frame.str_state,"+ - "frame.str_host,"+ - "frame.int_cores,"+ - "frame.int_gpus,"+ - "frame.int_mem_max_used," + - "frame.int_mem_used, " + - "frame.int_mem_reserved, " + - "frame.int_gpu_mem_reserved, " + - "frame.str_checkpoint_state,"+ - "frame.int_checkpoint_count,"+ - "frame.int_total_past_core_time,"+ - "frame.int_total_past_gpu_time,"+ - "layer.str_name AS layer_name," + - "job.str_name AS job_name,"+ - "frame_state_display_overrides.* " + - "FROM "+ - "job, " + - "layer, "+ - "frame " + - "LEFT JOIN frame_state_display_overrides ON " + - "(frame.pk_frame = frame_state_display_overrides.pk_frame AND " + - "frame.str_state = frame_state_display_overrides.str_frame_state) " + - "WHERE " + - "frame.pk_layer = layer.pk_layer "+ - "AND "+ - "frame.pk_job= job.pk_job"; - - private static final String FIND_FRAME = GET_FRAME + " " + - "AND " + - "job.str_state='PENDING' " + - "AND " + - "job.str_name=? " + - "AND " + - "layer.str_name=? " + - "AND " + - "frame.int_number=?"; - - private static final String GET_PROC = - "SELECT " + - "host.str_name AS host_name, " + - "job.str_name AS job_name, " + - "job.str_log_dir, " + - "folder.str_name as folder_name, " + - "show.str_name AS show_name, " + - "frame.str_name AS frame_name, " + - "layer.str_services, " + - "proc.pk_proc, " + - "proc.pk_host, " + - "proc.int_cores_reserved, " + - "proc.int_mem_reserved, " + - "proc.int_mem_used, " + - "proc.int_mem_max_used, " + - "proc.int_gpus_reserved, " + - "proc.int_gpu_mem_reserved, " + - "proc.int_gpu_mem_used, " + - "proc.int_gpu_mem_max_used, " + - "proc.ts_ping, " + - "proc.ts_booked, " + - "proc.ts_dispatched, " + - "proc.b_unbooked, " + - "proc.bytea_children, " + - "redirect.str_name AS str_redirect " + - "FROM proc " + - "JOIN host ON proc.pk_host = host.pk_host " + - "JOIN alloc ON host.pk_alloc = alloc.pk_alloc " + - "JOIN frame ON proc.pk_frame = frame.pk_frame " + - "JOIN layer ON proc.pk_layer = layer.pk_layer " + - "JOIN job ON proc.pk_job = job.pk_job " + - "JOIN folder ON job.pk_folder = folder.pk_folder " + - "JOIN show ON proc.pk_show = show.pk_show " + - "LEFT JOIN redirect ON proc.pk_proc = redirect.pk_proc " + - "WHERE true "; - - private static final String GET_JOB_COMMENTS = - "SELECT " + - "* " + - "FROM " + - "comments " + - "WHERE " + - "pk_job=? " + - "ORDER BY " + - "ts_created ASC"; - - private static final String GET_UPDATED_FRAME = - "SELECT " + - "frame.pk_frame, " + - "frame.int_exit_status,"+ - "frame.ts_started,"+ - "frame.ts_stopped,"+ - "frame.int_retries,"+ - "frame.str_state,"+ - "frame.str_host,"+ - "frame.int_cores,"+ - "frame.int_gpus,"+ - "frame.ts_llu,"+ - "COALESCE(proc.int_mem_max_used, frame.int_mem_max_used) AS int_mem_max_used," + - "COALESCE(proc.int_mem_used, frame.int_mem_used) AS int_mem_used," + - "frame_state_display_overrides.* " + - "FROM "+ - "job, " + - "layer,"+ - "frame " + - "LEFT JOIN proc ON (proc.pk_frame = frame.pk_frame) " + - "LEFT JOIN frame_state_display_overrides ON " + - "(frame.pk_frame = frame_state_display_overrides.pk_frame AND " + - "frame.str_state = frame_state_display_overrides.str_frame_state) " + - "WHERE " + - "frame.pk_layer = layer.pk_layer "+ - "AND "+ - "frame.pk_job= job.pk_job"; - - private static final String GET_ALLOCATION = - "SELECT " + - "alloc.pk_alloc, " + - "alloc.str_name, " + - "alloc.str_tag, " + - "alloc.b_billable,"+ - "facility.str_name AS facility_name,"+ - "vs_alloc_usage.int_cores,"+ - "vs_alloc_usage.int_idle_cores,"+ - "vs_alloc_usage.int_running_cores,"+ - "vs_alloc_usage.int_available_cores,"+ - "vs_alloc_usage.int_locked_cores,"+ - "vs_alloc_usage.int_gpus,"+ - "vs_alloc_usage.int_idle_gpus,"+ - "vs_alloc_usage.int_running_gpus,"+ - "vs_alloc_usage.int_available_gpus,"+ - "vs_alloc_usage.int_locked_gpus,"+ - "vs_alloc_usage.int_hosts,"+ - "vs_alloc_usage.int_locked_hosts,"+ - "vs_alloc_usage.int_down_hosts "+ - "FROM " + - "alloc, " + - "facility, " + - "vs_alloc_usage " + - "WHERE " + - "alloc.pk_alloc = vs_alloc_usage.pk_alloc " + - "AND " + - "alloc.pk_facility = facility.pk_facility " + - "AND " + - "alloc.b_enabled = true"; - - - private static final String GET_MATCHER = - "SELECT " + - "filter.pk_show," + - "matcher.* " + - "FROM " + - "filter,"+ - "matcher " + - "WHERE " + - "filter.pk_filter = matcher.pk_filter"; - - private static final String GET_DEPARTMENT = - "SELECT " + - "dept.str_name AS str_dept," + - "show.str_name || '.' || dept.str_name AS str_name, " + - "pk_point,"+ - "str_ti_task,"+ - "int_cores,"+ - "int_min_cores,"+ - "int_gpus,"+ - "int_min_gpus,"+ - "b_managed " + - "FROM " + - "point," + - "dept,"+ - "show " + - "WHERE " + - "point.pk_show = show.pk_show " + - "AND " + - "point.pk_dept = dept.pk_dept " + - "AND " + - "point.pk_show = ? " + - "AND " + - "dept.str_name = ?"; - - private static final String GET_DEPARTMENTS = - "SELECT " + - "dept.str_name AS str_dept," + - "show.str_name || '.' || dept.str_name AS str_name, " + - "pk_point,"+ - "str_ti_task,"+ - "int_cores,"+ - "int_min_cores,"+ - "int_gpus,"+ - "int_min_gpus,"+ - "b_managed " + - "FROM " + - "point," + - "dept,"+ - "show " + - "WHERE " + - "point.pk_show = show.pk_show " + - "AND " + - "point.pk_dept = dept.pk_dept " + - "AND " + - "point.pk_show = ? "; - - private static final String QUERY_FOR_OWNER = - "SELECT " + - "owner.pk_owner," + - "owner.str_username,"+ - "show.str_name AS str_show, " + - "(SELECT COUNT(1) FROM deed WHERE deed.pk_owner = owner.pk_owner) " + - " AS host_count " + - "FROM " + - "owner, " + - "show " + - "WHERE " + - "owner.pk_show = show.pk_show"; - - private static final String QUERY_FOR_RENDER_PART = - "SELECT " + - "host_local.pk_host_local,"+ - "host_local.int_cores_idle,"+ - "host_local.int_cores_max,"+ - "host_local.int_gpus_idle,"+ - "host_local.int_gpus_max,"+ - "host_local.int_threads,"+ - "host_local.int_mem_idle,"+ - "host_local.int_mem_max,"+ - "host_local.int_gpu_mem_idle,"+ - "host_local.int_gpu_mem_max,"+ - "host_local.str_type,"+ - "(SELECT str_name FROM host WHERE host.pk_host = host_local.pk_host) " + - "AS str_host_name,"+ - "(SELECT str_name FROM job WHERE job.pk_job = host_local.pk_job) " + - "AS str_job_name,"+ - "(SELECT str_name FROM layer WHERE layer.pk_layer = host_local.pk_layer) " + - "AS str_layer_name,"+ - "(SELECT str_name FROM frame WHERE frame.pk_frame = host_local.pk_frame) " + - "AS str_frame_name " + - "FROM " + - "host_local "; - - private static final String QUERY_FOR_FACILITY = - "SELECT " + - "facility.pk_facility," + - "facility.str_name " + - "FROM " + - "facility "; - - private static final String QUERY_FOR_LIMIT = - "SELECT " + - "limit_record.pk_limit_record, " + - "limit_record.str_name, " + - "limit_record.int_max_value, " + - "SUM(layer_stat.int_running_count) AS int_current_running " + - "FROM " + - "limit_record " + - "LEFT JOIN " + - "layer_limit ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN " + - "layer ON layer.pk_layer = layer_limit.pk_layer " + - "LEFT JOIN " + - "layer_stat ON layer_stat.pk_layer = layer.pk_layer "; - - private static final String GET_LIMIT_FROM_LAYER_ID = - "SELECT " + - "limit_record.pk_limit_record, " + - "limit_record.str_name, " + - "limit_record.int_max_value, " + - "SUM(layer_stat.int_running_count) AS int_current_running " + - "FROM " + - "limit_record " + - "LEFT JOIN " + - "layer_limit ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN " + - "layer ON layer.pk_layer = layer_limit.pk_layer " + - "LEFT JOIN " + - "layer_stat ON layer_stat.pk_layer = layer.pk_layer " + - "WHERE " + - "layer_limit.pk_layer = ? " + - "GROUP BY " + - "limit_record.str_name, " + - "limit_record.pk_limit_record, " + - "limit_record.int_max_value"; - - public static final String GET_GROUPS = - "SELECT " + - "show.pk_show, " + - "show.str_name AS str_show," + - "dept.str_name AS str_dept," + - "folder.pk_folder," + - "folder.pk_parent_folder," + - "folder.str_name AS group_name," + - "folder.int_job_priority,"+ - "folder.int_job_min_cores," + - "folder.int_job_max_cores," + - "folder_resource.int_min_cores,"+ - "folder_resource.int_max_cores,"+ - "folder.int_job_min_gpus," + - "folder.int_job_max_gpus," + - "folder_resource.int_min_gpus,"+ - "folder_resource.int_max_gpus,"+ - "folder.b_default, " + - "folder_level.int_level, " + - "c.int_waiting_count, " + - "c.int_depend_count, " + - "c.int_running_count,"+ - "c.int_dead_count,"+ - "c.int_job_count,"+ - "c.int_cores," + - "c.int_gpus " + - "FROM " + - "folder, " + - "folder_level," + - "folder_resource, "+ - "vs_folder_counts c, " + - "show," + - "dept " + - "WHERE " + - "show.pk_show = folder.pk_show "+ - "AND " + - "folder.pk_folder = folder_level.pk_folder " + - "AND " + - "folder.pk_folder = folder_resource.pk_folder " + - "AND " + - "folder.pk_folder = c.pk_folder " + - "AND " + - "folder.pk_dept = dept.pk_dept "; - - private static final String GET_ACTION = - "SELECT " + - "filter.pk_show," + - "action.* " + - "FROM " + - "filter,"+ - "action " + - "WHERE " + - "filter.pk_filter = action.pk_filter "; - - private static final String GET_JOB = - "SELECT " + - "job.pk_job,"+ - "job.str_log_dir," + - "job_resource.int_max_cores," + - "job_resource.int_min_cores," + - "job_resource.int_max_gpus," + - "job_resource.int_min_gpus," + - "job.str_name," + - "job.str_shot,"+ - "job.str_state,"+ - "job.int_uid,"+ - "job.str_user,"+ - "job.b_paused,"+ - "job.ts_started,"+ - "job.ts_stopped,"+ - "job.b_comment,"+ - "job.b_autoeat,"+ - "job.str_os,"+ - "job_resource.int_priority,"+ - "job.int_frame_count, " + - "job.int_layer_count, " + - "show.str_name as str_show," + - "show.pk_show as id_show,"+ - "facility.str_name AS facility_name,"+ - "folder.str_name AS group_name,"+ - "job_stat.int_waiting_count, "+ - "job_stat.int_running_count, "+ - "job_stat.int_dead_count, " + - "job_stat.int_eaten_count," + - "job_stat.int_depend_count, "+ - "job_stat.int_succeeded_count, "+ - "job_usage.int_core_time_success, "+ - "job_usage.int_core_time_fail, " + - "job_usage.int_gpu_time_success, "+ - "job_usage.int_gpu_time_fail, " + - "job_usage.int_frame_success_count, "+ - "job_usage.int_frame_fail_count, "+ - "job_usage.int_clock_time_high,"+ - "job_usage.int_clock_time_success,"+ - "job_mem.int_max_rss,"+ - "(job_resource.int_cores + job_resource.int_local_cores) AS int_cores," + - "(job_resource.int_gpus + job_resource.int_local_gpus) AS int_gpus " + - "FROM " + - "job,"+ - "folder,"+ - "show," + - "facility,"+ - "job_stat," + - "job_resource, " + - "job_mem, " + - "job_usage " + - "WHERE " + - "job.pk_show = show.pk_show " + - "AND " + - "job.pk_folder = folder.pk_folder " + - "AND " + - "job.pk_facility = facility.pk_facility " + - "AND " + - "job.pk_job = job_stat.pk_job " + - "AND " + - "job.pk_job = job_resource.pk_job " + - "AND " + - "job.pk_job = job_mem.pk_job " + - "AND " + - "job.pk_job = job_usage.pk_job "; - - private static final String GET_LAYER = - "SELECT " + - "layer.*," + - "layer_stat.int_total_count," + - "layer_stat.int_waiting_count," + - "layer_stat.int_running_count," + - "layer_stat.int_dead_count," + - "layer_stat.int_depend_count," + - "layer_stat.int_eaten_count," + - "layer_stat.int_succeeded_count," + - "layer_usage.int_core_time_success," + - "layer_usage.int_core_time_fail, "+ - "layer_usage.int_gpu_time_success," + - "layer_usage.int_gpu_time_fail, "+ - "layer_usage.int_frame_success_count, "+ - "layer_usage.int_frame_fail_count, "+ - "layer_usage.int_clock_time_low, "+ - "layer_usage.int_clock_time_high," + - "layer_usage.int_clock_time_success," + - "layer_usage.int_clock_time_fail," + - "layer_mem.int_max_rss,"+ - "layer_resource.int_cores," + - "layer_resource.int_gpus " + - "FROM " + - "layer, " + - "job," + - "layer_stat, " + - "layer_resource, " + - "layer_usage, " + - "layer_mem " + - "WHERE " + - "layer.pk_job = job.pk_job " + - "AND " + - "layer.pk_layer = layer_stat.pk_layer "+ - "AND " + - "layer.pk_layer = layer_resource.pk_layer " + - "AND " + - "layer.pk_layer = layer_usage.pk_layer " + - "AND " + - "layer.pk_layer = layer_mem.pk_layer"; - - private static final String GET_LAYER_WITH_LIMITS = - "SELECT " + - "layer.*, " + - "layer_stat.int_total_count, " + - "layer_stat.int_waiting_count, " + - "layer_stat.int_running_count, " + - "layer_stat.int_dead_count, " + - "layer_stat.int_depend_count, " + - "layer_stat.int_eaten_count, " + - "layer_stat.int_succeeded_count, " + - "layer_usage.int_core_time_success, " + - "layer_usage.int_core_time_fail, " + - "layer_usage.int_gpu_time_success, " + - "layer_usage.int_gpu_time_fail, " + - "layer_usage.int_frame_success_count, " + - "layer_usage.int_frame_fail_count, " + - "layer_usage.int_clock_time_low, " + - "layer_usage.int_clock_time_high, " + - "layer_usage.int_clock_time_success, " + - "layer_usage.int_clock_time_fail, " + - "layer_mem.int_max_rss, " + - "layer_resource.int_cores, " + - "layer_resource.int_gpus, " + - "limit_names.str_limit_names " + - "FROM " + - "layer " + - "JOIN " + - "job ON layer.pk_job = job.pk_job " + - "JOIN " + - "layer_stat ON layer.pk_layer = layer_stat.pk_layer " + - "JOIN " + - "layer_resource ON layer.pk_layer = layer_resource.pk_layer " + - "JOIN " + - "layer_usage ON layer.pk_layer = layer_usage.pk_layer " + - "JOIN " + - "layer_mem ON layer.pk_layer = layer_mem.pk_layer " + - "LEFT JOIN " + - "(" + - "SELECT " + - "layer_limit.pk_layer, " + - "string_agg(limit_record.str_name, ',') AS str_limit_names " + - "FROM " + - "limit_record, " + - "layer_limit " + - "WHERE " + - "layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "GROUP BY " + - "layer_limit.pk_layer) AS limit_names " + - "ON layer.pk_layer = limit_names.pk_layer "; - - private static final String GET_LIMIT_NAMES = - "SELECT " + - "limit_record.str_name " + - "FROM " + - "layer_limit, " + - "limit_record " + - "WHERE " + - "layer_limit.pk_layer = ? " + - "AND " + - "limit_record.pk_limit_record = layer_limit.pk_limit_record "; - - private static final String GET_SHOW = - "SELECT " + - "show.pk_show," + - "show.str_name," + - "show.b_paused," + - "show.int_default_min_cores," + - "show.int_default_max_cores," + - "show.int_default_min_gpus," + - "show.int_default_max_gpus," + - "show.b_booking_enabled," + - "show.b_dispatch_enabled," + - "show.b_active," + - "show.str_comment_email," + - "show_stats.int_frame_insert_count," + - "show_stats.int_job_insert_count," + - "show_stats.int_frame_success_count," + - "show_stats.int_frame_fail_count," + - "COALESCE(vs_show_stat.int_pending_count,0) AS int_pending_count," + - "COALESCE(vs_show_stat.int_running_count,0) AS int_running_count," + - "COALESCE(vs_show_stat.int_dead_count,0) AS int_dead_count," + - "COALESCE(vs_show_resource.int_cores,0) AS int_cores, " + - "COALESCE(vs_show_resource.int_gpus,0) AS int_gpus, " + - "COALESCE(vs_show_stat.int_job_count,0) AS int_job_count " + - "FROM " + - "show " + - "JOIN show_stats ON (show.pk_show = show_stats.pk_show) " + - "LEFT JOIN vs_show_stat ON (vs_show_stat.pk_show = show.pk_show) " + - "LEFT JOIN vs_show_resource ON (vs_show_resource.pk_show=show.pk_show) " + - "WHERE " + - "1 = 1 "; - - private static final String GET_SERVICE = - "SELECT " + - "service.pk_service,"+ - "service.str_name," + - "service.b_threadable," + - "service.int_cores_min," + - "service.int_cores_max," + - "service.int_mem_min," + - "service.int_gpus_min," + - "service.int_gpus_max," + - "service.int_gpu_mem_min," + - "service.str_tags," + - "service.int_timeout," + - "service.int_timeout_llu," + - "service.int_min_memory_increase " + - "FROM "+ - "service "; - - private static final String GET_SERVICE_OVERRIDE = - "SELECT " + - "show_service.pk_show_service,"+ - "show_service.str_name," + - "show_service.b_threadable," + - "show_service.int_cores_min," + - "show_service.int_cores_max," + - "show_service.int_mem_min," + - "show_service.int_gpus_min," + - "show_service.int_gpus_max," + - "show_service.int_gpu_mem_min," + - "show_service.str_tags," + - "show_service.int_timeout," + - "show_service.int_timeout_llu," + - "show_service.int_min_memory_increase " + - "FROM "+ - "show_service, " + - "show " + - "WHERE " + - "show_service.pk_show = show.pk_show "; - - private static final String GET_TASK = - "SELECT " + - "task.pk_task," + - "task.str_shot,"+ - "task.int_min_cores + task.int_adjust_cores AS int_min_cores, "+ - "task.int_adjust_cores, " + - "task.int_min_gpus + task.int_adjust_gpus AS int_min_gpus, "+ - "task.int_adjust_gpus, " + - "dept.str_name AS str_dept "+ - "FROM " + - "task,"+ - "dept, " + - "point "+ - "WHERE " + - "task.pk_point = point.pk_point " + - "AND " + - "point.pk_dept = dept.pk_dept "; - - private static final String GET_HOST = - "SELECT " + - "host.pk_host, "+ - "host.str_name AS host_name," + - "host_stat.str_state AS host_state,"+ - "host.b_nimby,"+ - "host_stat.ts_booted,"+ - "host_stat.ts_ping,"+ - "host.int_cores,"+ - "host.int_cores_idle,"+ - "host.int_mem,"+ - "host.int_mem_idle,"+ - "host.int_gpus,"+ - "host.int_gpus_idle,"+ - "host.int_gpu_mem,"+ - "host.int_gpu_mem_idle,"+ - "host.str_tags,"+ - "host.str_lock_state,"+ - "host.b_comment,"+ - "host.int_thread_mode,"+ - "host_stat.str_os,"+ - "host_stat.int_mem_total,"+ - "host_stat.int_mem_free,"+ - "host_stat.int_swap_total,"+ - "host_stat.int_swap_free,"+ - "host_stat.int_mcp_total,"+ - "host_stat.int_mcp_free,"+ - "host_stat.int_gpu_mem_total,"+ - "host_stat.int_gpu_mem_free,"+ - "host_stat.int_load, " + - "alloc.str_name AS alloc_name " + - "FROM " + - "alloc," + - "facility, "+ - "host_stat,"+ - "host "+ - "WHERE " + - "host.pk_alloc = alloc.pk_alloc " + - "AND " + - "facility.pk_facility = alloc.pk_facility " + - "AND "+ - "host.pk_host = host_stat.pk_host "; - - private static final String GET_DEPEND = - "SELECT " + - "depend.pk_depend, "+ - "depend.str_type, "+ - "depend.b_active, "+ - "depend.b_any, "+ - "depend.str_target, "+ - "(SELECT str_name FROM job j WHERE j.pk_job = depend.pk_job_depend_on) AS depend_on_job, "+ - "(SELECT str_name FROM job j WHERE j.pk_job = depend.pk_job_depend_er) AS depend_er_job, "+ - "(SELECT str_name FROM layer l WHERE l.pk_layer = depend.pk_layer_depend_on) AS depend_on_layer, "+ - "(SELECT str_name FROM layer l WHERE l.pk_layer = depend.pk_layer_depend_er) AS depend_er_layer, "+ - "(SELECT str_name FROM frame f WHERE f.pk_frame = depend.pk_frame_depend_on) AS depend_on_frame, "+ - "(SELECT str_name FROM frame f WHERE f.pk_frame = depend.pk_frame_depend_er) AS depend_er_frame "+ - "FROM " + - "depend "; - - private static final String GET_SUBSCRIPTION = - "SELECT " + - "subscription.pk_subscription, " + - "(alloc.str_name || '.' || show.str_name) AS name, "+ - "subscription.int_burst, " + - "subscription.int_size, " + - "subscription.int_cores, " + - "subscription.int_gpus, " + - "show.str_name AS show_name, " + - "alloc.str_name AS alloc_name, " + - "facility.str_name AS facility_name " + - "FROM "+ - "show, " + - "alloc, " + - "facility,"+ - "subscription " + - "WHERE " + - "subscription.pk_show = show.pk_show " + - "AND " + - "subscription.pk_alloc = alloc.pk_alloc " + - "AND " + - "alloc.pk_facility = facility.pk_facility "; - - private static final String GET_PENDING_JOBS = - GET_JOB + - "AND " + - "job.str_state = 'PENDING' "; - - private static final String GET_FRAMES_CRITERIA = - - "SELECT " + - "frame.pk_frame, " + - "frame.int_exit_status,"+ - "frame.str_name,"+ - "frame.int_number,"+ - "frame.int_dispatch_order,"+ - "frame.ts_started,"+ - "frame.ts_stopped,"+ - "frame.ts_llu,"+ - "frame.int_retries,"+ - "frame.str_state,"+ - "frame.str_host,"+ - "frame.int_cores,"+ - "frame.int_mem_max_used," + - "frame.int_mem_used, " + - "frame.int_mem_reserved, " + - "frame.int_gpus,"+ - "frame.int_gpu_mem_max_used, " + - "frame.int_gpu_mem_used, " + - "frame.int_gpu_mem_reserved, " + - "frame.str_checkpoint_state,"+ - "frame.int_checkpoint_count,"+ - "frame.int_total_past_core_time,"+ - "frame.int_total_past_gpu_time,"+ - "layer.str_name AS layer_name," + - "job.str_name AS job_name, "+ - "frame_state_display_overrides.*, "+ - "ROW_NUMBER() OVER " + - "(ORDER BY frame.int_dispatch_order ASC, layer.int_dispatch_order ASC) AS row_number " + - "FROM "+ - "job, " + - "layer,"+ - "frame " + - "LEFT JOIN frame_state_display_overrides ON " + - "(frame.pk_frame = frame_state_display_overrides.pk_frame AND " + - "frame.str_state = frame_state_display_overrides.str_frame_state) " + - "WHERE " + - "frame.pk_layer = layer.pk_layer "+ - "AND "+ - "frame.pk_job= job.pk_job "; - - private static final String QUERY_FOR_DEED = - "SELECT " + - "host.str_name AS str_host,"+ - "show.str_name AS str_show,"+ - "owner.str_username," + - "deed.pk_deed " + - "FROM " + - "deed,"+ - "owner,"+ - "host,"+ - "show "+ - "WHERE " + - "deed.pk_host = host.pk_host " + - "AND " + - "deed.pk_owner = owner.pk_owner " + - "AND " + - "owner.pk_show = show.pk_show "; - - public FrameSearchFactory getFrameSearchFactory() { - return frameSearchFactory; - } - - public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { - this.frameSearchFactory = frameSearchFactory; - } - - public ProcSearchFactory getProcSearchFactory() { - return procSearchFactory; - } - - public void setProcSearchFactory(ProcSearchFactory procSearchFactory) { - this.procSearchFactory = procSearchFactory; - } + }; + + public static final RowMapper SHOW_MAPPER = new RowMapper() { + public Show mapRow(ResultSet rs, int rowNum) throws SQLException { + ShowStats stats = ShowStats.newBuilder().setPendingFrames(rs.getInt("int_pending_count")) + .setRunningFrames(rs.getInt("int_running_count")) + .setDeadFrames(rs.getInt("int_dead_count")) + .setCreatedFrameCount(rs.getLong("int_frame_insert_count")) + .setCreatedJobCount(rs.getLong("int_job_insert_count")) + .setRenderedFrameCount(rs.getLong("int_frame_success_count")) + .setFailedFrameCount(rs.getLong("int_frame_fail_count")) + .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) + .setReservedGpus(rs.getInt("int_gpus")).setPendingJobs(rs.getInt("int_job_count")) + .build(); + return Show.newBuilder().setId(SqlUtil.getString(rs, "pk_show")) + .setName(SqlUtil.getString(rs, "str_name")).setActive(rs.getBoolean("b_active")) + .setDefaultMaxCores(Convert.coreUnitsToCores(rs.getInt("int_default_max_cores"))) + .setDefaultMinCores(Convert.coreUnitsToCores(rs.getInt("int_default_min_cores"))) + .setDefaultMaxGpus(rs.getInt("int_default_max_gpus")) + .setDefaultMinGpus(rs.getInt("int_default_min_gpus")) + .setBookingEnabled(rs.getBoolean("b_booking_enabled")) + .setDispatchEnabled(rs.getBoolean("b_dispatch_enabled")) + .setCommentEmail(SqlUtil.getString(rs, "str_comment_email")).setShowStats(stats).build(); + } + }; + /* + * Queries + */ + + private static final String GET_JOB_NAMES = "SELECT " + "job.str_name " + "FROM " + "job," + + "show " + "WHERE " + "job.pk_show = show.pk_show " + "AND " + "job.str_state = 'PENDING' "; + + private static final String GET_HOST_COMMENTS = "SELECT " + "* " + "FROM " + "comments " + + "WHERE " + "pk_host=? " + "ORDER BY " + "ts_created ASC"; + + private static final String GET_FILTER = "SELECT " + "filter.* " + "FROM " + "filter," + "show " + + "WHERE " + "filter.pk_show = show.pk_show"; + + private static final String GET_FRAME = "SELECT " + "frame.pk_frame, " + "frame.int_exit_status," + + "frame.str_name," + "frame.int_number," + "frame.int_dispatch_order," + "frame.ts_started," + + "frame.ts_stopped," + "frame.ts_llu," + "frame.int_retries," + "frame.str_state," + + "frame.str_host," + "frame.int_cores," + "frame.int_gpus," + "frame.int_mem_max_used," + + "frame.int_mem_used, " + "frame.int_mem_reserved, " + "frame.int_gpu_mem_reserved, " + + "frame.str_checkpoint_state," + "frame.int_checkpoint_count," + + "frame.int_total_past_core_time," + "frame.int_total_past_gpu_time," + + "layer.str_name AS layer_name," + "job.str_name AS job_name," + + "frame_state_display_overrides.* " + "FROM " + "job, " + "layer, " + "frame " + + "LEFT JOIN frame_state_display_overrides ON " + + "(frame.pk_frame = frame_state_display_overrides.pk_frame AND " + + "frame.str_state = frame_state_display_overrides.str_frame_state) " + "WHERE " + + "frame.pk_layer = layer.pk_layer " + "AND " + "frame.pk_job= job.pk_job"; + + private static final String FIND_FRAME = GET_FRAME + " " + "AND " + "job.str_state='PENDING' " + + "AND " + "job.str_name=? " + "AND " + "layer.str_name=? " + "AND " + "frame.int_number=?"; + + private static final String GET_PROC = "SELECT " + "host.str_name AS host_name, " + + "job.str_name AS job_name, " + "job.str_log_dir, " + "folder.str_name as folder_name, " + + "show.str_name AS show_name, " + "frame.str_name AS frame_name, " + "layer.str_services, " + + "proc.pk_proc, " + "proc.pk_host, " + "proc.int_cores_reserved, " + + "proc.int_mem_reserved, " + "proc.int_mem_used, " + "proc.int_mem_max_used, " + + "proc.int_gpus_reserved, " + "proc.int_gpu_mem_reserved, " + "proc.int_gpu_mem_used, " + + "proc.int_gpu_mem_max_used, " + "proc.ts_ping, " + "proc.ts_booked, " + + "proc.ts_dispatched, " + "proc.b_unbooked, " + "proc.bytea_children, " + + "redirect.str_name AS str_redirect " + "FROM proc " + + "JOIN host ON proc.pk_host = host.pk_host " + + "JOIN alloc ON host.pk_alloc = alloc.pk_alloc " + + "JOIN frame ON proc.pk_frame = frame.pk_frame " + + "JOIN layer ON proc.pk_layer = layer.pk_layer " + "JOIN job ON proc.pk_job = job.pk_job " + + "JOIN folder ON job.pk_folder = folder.pk_folder " + + "JOIN show ON proc.pk_show = show.pk_show " + + "LEFT JOIN redirect ON proc.pk_proc = redirect.pk_proc " + "WHERE true "; + + private static final String GET_JOB_COMMENTS = "SELECT " + "* " + "FROM " + "comments " + "WHERE " + + "pk_job=? " + "ORDER BY " + "ts_created ASC"; + + private static final String GET_UPDATED_FRAME = + "SELECT " + "frame.pk_frame, " + "frame.int_exit_status," + "frame.ts_started," + + "frame.ts_stopped," + "frame.int_retries," + "frame.str_state," + "frame.str_host," + + "frame.int_cores," + "frame.int_gpus," + "frame.ts_llu," + + "COALESCE(proc.int_mem_max_used, frame.int_mem_max_used) AS int_mem_max_used," + + "COALESCE(proc.int_mem_used, frame.int_mem_used) AS int_mem_used," + + "frame_state_display_overrides.* " + "FROM " + "job, " + "layer," + "frame " + + "LEFT JOIN proc ON (proc.pk_frame = frame.pk_frame) " + + "LEFT JOIN frame_state_display_overrides ON " + + "(frame.pk_frame = frame_state_display_overrides.pk_frame AND " + + "frame.str_state = frame_state_display_overrides.str_frame_state) " + "WHERE " + + "frame.pk_layer = layer.pk_layer " + "AND " + "frame.pk_job= job.pk_job"; + + private static final String GET_ALLOCATION = "SELECT " + "alloc.pk_alloc, " + "alloc.str_name, " + + "alloc.str_tag, " + "alloc.b_billable," + "facility.str_name AS facility_name," + + "vs_alloc_usage.int_cores," + "vs_alloc_usage.int_idle_cores," + + "vs_alloc_usage.int_running_cores," + "vs_alloc_usage.int_available_cores," + + "vs_alloc_usage.int_locked_cores," + "vs_alloc_usage.int_gpus," + + "vs_alloc_usage.int_idle_gpus," + "vs_alloc_usage.int_running_gpus," + + "vs_alloc_usage.int_available_gpus," + "vs_alloc_usage.int_locked_gpus," + + "vs_alloc_usage.int_hosts," + "vs_alloc_usage.int_locked_hosts," + + "vs_alloc_usage.int_down_hosts " + "FROM " + "alloc, " + "facility, " + "vs_alloc_usage " + + "WHERE " + "alloc.pk_alloc = vs_alloc_usage.pk_alloc " + "AND " + + "alloc.pk_facility = facility.pk_facility " + "AND " + "alloc.b_enabled = true"; + + private static final String GET_MATCHER = "SELECT " + "filter.pk_show," + "matcher.* " + "FROM " + + "filter," + "matcher " + "WHERE " + "filter.pk_filter = matcher.pk_filter"; + + private static final String GET_DEPARTMENT = "SELECT " + "dept.str_name AS str_dept," + + "show.str_name || '.' || dept.str_name AS str_name, " + "pk_point," + "str_ti_task," + + "int_cores," + "int_min_cores," + "int_gpus," + "int_min_gpus," + "b_managed " + "FROM " + + "point," + "dept," + "show " + "WHERE " + "point.pk_show = show.pk_show " + "AND " + + "point.pk_dept = dept.pk_dept " + "AND " + "point.pk_show = ? " + "AND " + + "dept.str_name = ?"; + + private static final String GET_DEPARTMENTS = "SELECT " + "dept.str_name AS str_dept," + + "show.str_name || '.' || dept.str_name AS str_name, " + "pk_point," + "str_ti_task," + + "int_cores," + "int_min_cores," + "int_gpus," + "int_min_gpus," + "b_managed " + "FROM " + + "point," + "dept," + "show " + "WHERE " + "point.pk_show = show.pk_show " + "AND " + + "point.pk_dept = dept.pk_dept " + "AND " + "point.pk_show = ? "; + + private static final String QUERY_FOR_OWNER = + "SELECT " + "owner.pk_owner," + "owner.str_username," + "show.str_name AS str_show, " + + "(SELECT COUNT(1) FROM deed WHERE deed.pk_owner = owner.pk_owner) " + " AS host_count " + + "FROM " + "owner, " + "show " + "WHERE " + "owner.pk_show = show.pk_show"; + + private static final String QUERY_FOR_RENDER_PART = "SELECT " + "host_local.pk_host_local," + + "host_local.int_cores_idle," + "host_local.int_cores_max," + "host_local.int_gpus_idle," + + "host_local.int_gpus_max," + "host_local.int_threads," + "host_local.int_mem_idle," + + "host_local.int_mem_max," + "host_local.int_gpu_mem_idle," + "host_local.int_gpu_mem_max," + + "host_local.str_type," + + "(SELECT str_name FROM host WHERE host.pk_host = host_local.pk_host) " + "AS str_host_name," + + "(SELECT str_name FROM job WHERE job.pk_job = host_local.pk_job) " + "AS str_job_name," + + "(SELECT str_name FROM layer WHERE layer.pk_layer = host_local.pk_layer) " + + "AS str_layer_name," + + "(SELECT str_name FROM frame WHERE frame.pk_frame = host_local.pk_frame) " + + "AS str_frame_name " + "FROM " + "host_local "; + + private static final String QUERY_FOR_FACILITY = + "SELECT " + "facility.pk_facility," + "facility.str_name " + "FROM " + "facility "; + + private static final String QUERY_FOR_LIMIT = "SELECT " + "limit_record.pk_limit_record, " + + "limit_record.str_name, " + "limit_record.int_max_value, " + + "SUM(layer_stat.int_running_count) AS int_current_running " + "FROM " + "limit_record " + + "LEFT JOIN " + "layer_limit ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + + "LEFT JOIN " + "layer ON layer.pk_layer = layer_limit.pk_layer " + "LEFT JOIN " + + "layer_stat ON layer_stat.pk_layer = layer.pk_layer "; + + private static final String GET_LIMIT_FROM_LAYER_ID = "SELECT " + "limit_record.pk_limit_record, " + + "limit_record.str_name, " + "limit_record.int_max_value, " + + "SUM(layer_stat.int_running_count) AS int_current_running " + "FROM " + "limit_record " + + "LEFT JOIN " + "layer_limit ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + + "LEFT JOIN " + "layer ON layer.pk_layer = layer_limit.pk_layer " + "LEFT JOIN " + + "layer_stat ON layer_stat.pk_layer = layer.pk_layer " + "WHERE " + + "layer_limit.pk_layer = ? " + "GROUP BY " + "limit_record.str_name, " + + "limit_record.pk_limit_record, " + "limit_record.int_max_value"; + + public static final String GET_GROUPS = "SELECT " + "show.pk_show, " + + "show.str_name AS str_show," + "dept.str_name AS str_dept," + "folder.pk_folder," + + "folder.pk_parent_folder," + "folder.str_name AS group_name," + "folder.int_job_priority," + + "folder.int_job_min_cores," + "folder.int_job_max_cores," + "folder_resource.int_min_cores," + + "folder_resource.int_max_cores," + "folder.int_job_min_gpus," + "folder.int_job_max_gpus," + + "folder_resource.int_min_gpus," + "folder_resource.int_max_gpus," + "folder.b_default, " + + "folder_level.int_level, " + "c.int_waiting_count, " + "c.int_depend_count, " + + "c.int_running_count," + "c.int_dead_count," + "c.int_job_count," + "c.int_cores," + + "c.int_gpus " + "FROM " + "folder, " + "folder_level," + "folder_resource, " + + "vs_folder_counts c, " + "show," + "dept " + "WHERE " + "show.pk_show = folder.pk_show " + + "AND " + "folder.pk_folder = folder_level.pk_folder " + "AND " + + "folder.pk_folder = folder_resource.pk_folder " + "AND " + "folder.pk_folder = c.pk_folder " + + "AND " + "folder.pk_dept = dept.pk_dept "; + + private static final String GET_ACTION = "SELECT " + "filter.pk_show," + "action.* " + "FROM " + + "filter," + "action " + "WHERE " + "filter.pk_filter = action.pk_filter "; + + private static final String GET_JOB = "SELECT " + "job.pk_job," + "job.str_log_dir," + + "job_resource.int_max_cores," + "job_resource.int_min_cores," + "job_resource.int_max_gpus," + + "job_resource.int_min_gpus," + "job.str_name," + "job.str_shot," + "job.str_state," + + "job.int_uid," + "job.str_user," + "job.b_paused," + "job.ts_started," + "job.ts_stopped," + + "job.b_comment," + "job.b_autoeat," + "job.str_os," + "job_resource.int_priority," + + "job.int_frame_count, " + "job.int_layer_count, " + "show.str_name as str_show," + + "show.pk_show as id_show," + "facility.str_name AS facility_name," + + "folder.str_name AS group_name," + "job_stat.int_waiting_count, " + + "job_stat.int_running_count, " + "job_stat.int_dead_count, " + "job_stat.int_eaten_count," + + "job_stat.int_depend_count, " + "job_stat.int_succeeded_count, " + + "job_usage.int_core_time_success, " + "job_usage.int_core_time_fail, " + + "job_usage.int_gpu_time_success, " + "job_usage.int_gpu_time_fail, " + + "job_usage.int_frame_success_count, " + "job_usage.int_frame_fail_count, " + + "job_usage.int_clock_time_high," + "job_usage.int_clock_time_success," + + "job_mem.int_max_rss," + + "(job_resource.int_cores + job_resource.int_local_cores) AS int_cores," + + "(job_resource.int_gpus + job_resource.int_local_gpus) AS int_gpus " + "FROM " + "job," + + "folder," + "show," + "facility," + "job_stat," + "job_resource, " + "job_mem, " + + "job_usage " + "WHERE " + "job.pk_show = show.pk_show " + "AND " + + "job.pk_folder = folder.pk_folder " + "AND " + "job.pk_facility = facility.pk_facility " + + "AND " + "job.pk_job = job_stat.pk_job " + "AND " + "job.pk_job = job_resource.pk_job " + + "AND " + "job.pk_job = job_mem.pk_job " + "AND " + "job.pk_job = job_usage.pk_job "; + + private static final String GET_LAYER = "SELECT " + "layer.*," + "layer_stat.int_total_count," + + "layer_stat.int_waiting_count," + "layer_stat.int_running_count," + + "layer_stat.int_dead_count," + "layer_stat.int_depend_count," + + "layer_stat.int_eaten_count," + "layer_stat.int_succeeded_count," + + "layer_usage.int_core_time_success," + "layer_usage.int_core_time_fail, " + + "layer_usage.int_gpu_time_success," + "layer_usage.int_gpu_time_fail, " + + "layer_usage.int_frame_success_count, " + "layer_usage.int_frame_fail_count, " + + "layer_usage.int_clock_time_low, " + "layer_usage.int_clock_time_high," + + "layer_usage.int_clock_time_success," + "layer_usage.int_clock_time_fail," + + "layer_mem.int_max_rss," + "layer_resource.int_cores," + "layer_resource.int_gpus " + + "FROM " + "layer, " + "job," + "layer_stat, " + "layer_resource, " + "layer_usage, " + + "layer_mem " + "WHERE " + "layer.pk_job = job.pk_job " + "AND " + + "layer.pk_layer = layer_stat.pk_layer " + "AND " + + "layer.pk_layer = layer_resource.pk_layer " + "AND " + + "layer.pk_layer = layer_usage.pk_layer " + "AND " + "layer.pk_layer = layer_mem.pk_layer"; + + private static final String GET_LAYER_WITH_LIMITS = "SELECT " + "layer.*, " + + "layer_stat.int_total_count, " + "layer_stat.int_waiting_count, " + + "layer_stat.int_running_count, " + "layer_stat.int_dead_count, " + + "layer_stat.int_depend_count, " + "layer_stat.int_eaten_count, " + + "layer_stat.int_succeeded_count, " + "layer_usage.int_core_time_success, " + + "layer_usage.int_core_time_fail, " + "layer_usage.int_gpu_time_success, " + + "layer_usage.int_gpu_time_fail, " + "layer_usage.int_frame_success_count, " + + "layer_usage.int_frame_fail_count, " + "layer_usage.int_clock_time_low, " + + "layer_usage.int_clock_time_high, " + "layer_usage.int_clock_time_success, " + + "layer_usage.int_clock_time_fail, " + "layer_mem.int_max_rss, " + + "layer_resource.int_cores, " + "layer_resource.int_gpus, " + "limit_names.str_limit_names " + + "FROM " + "layer " + "JOIN " + "job ON layer.pk_job = job.pk_job " + "JOIN " + + "layer_stat ON layer.pk_layer = layer_stat.pk_layer " + "JOIN " + + "layer_resource ON layer.pk_layer = layer_resource.pk_layer " + "JOIN " + + "layer_usage ON layer.pk_layer = layer_usage.pk_layer " + "JOIN " + + "layer_mem ON layer.pk_layer = layer_mem.pk_layer " + "LEFT JOIN " + "(" + "SELECT " + + "layer_limit.pk_layer, " + "string_agg(limit_record.str_name, ',') AS str_limit_names " + + "FROM " + "limit_record, " + "layer_limit " + "WHERE " + + "layer_limit.pk_limit_record = limit_record.pk_limit_record " + "GROUP BY " + + "layer_limit.pk_layer) AS limit_names " + "ON layer.pk_layer = limit_names.pk_layer "; + + private static final String GET_LIMIT_NAMES = "SELECT " + "limit_record.str_name " + "FROM " + + "layer_limit, " + "limit_record " + "WHERE " + "layer_limit.pk_layer = ? " + "AND " + + "limit_record.pk_limit_record = layer_limit.pk_limit_record "; + + private static final String GET_SHOW = "SELECT " + "show.pk_show," + "show.str_name," + + "show.b_paused," + "show.int_default_min_cores," + "show.int_default_max_cores," + + "show.int_default_min_gpus," + "show.int_default_max_gpus," + "show.b_booking_enabled," + + "show.b_dispatch_enabled," + "show.b_active," + "show.str_comment_email," + + "show_stats.int_frame_insert_count," + "show_stats.int_job_insert_count," + + "show_stats.int_frame_success_count," + "show_stats.int_frame_fail_count," + + "COALESCE(vs_show_stat.int_pending_count,0) AS int_pending_count," + + "COALESCE(vs_show_stat.int_running_count,0) AS int_running_count," + + "COALESCE(vs_show_stat.int_dead_count,0) AS int_dead_count," + + "COALESCE(vs_show_resource.int_cores,0) AS int_cores, " + + "COALESCE(vs_show_resource.int_gpus,0) AS int_gpus, " + + "COALESCE(vs_show_stat.int_job_count,0) AS int_job_count " + "FROM " + "show " + + "JOIN show_stats ON (show.pk_show = show_stats.pk_show) " + + "LEFT JOIN vs_show_stat ON (vs_show_stat.pk_show = show.pk_show) " + + "LEFT JOIN vs_show_resource ON (vs_show_resource.pk_show=show.pk_show) " + "WHERE " + + "1 = 1 "; + + private static final String GET_SERVICE = "SELECT " + "service.pk_service," + "service.str_name," + + "service.b_threadable," + "service.int_cores_min," + "service.int_cores_max," + + "service.int_mem_min," + "service.int_gpus_min," + "service.int_gpus_max," + + "service.int_gpu_mem_min," + "service.str_tags," + "service.int_timeout," + + "service.int_timeout_llu," + "service.int_min_memory_increase " + "FROM " + "service "; + + private static final String GET_SERVICE_OVERRIDE = "SELECT " + "show_service.pk_show_service," + + "show_service.str_name," + "show_service.b_threadable," + "show_service.int_cores_min," + + "show_service.int_cores_max," + "show_service.int_mem_min," + "show_service.int_gpus_min," + + "show_service.int_gpus_max," + "show_service.int_gpu_mem_min," + "show_service.str_tags," + + "show_service.int_timeout," + "show_service.int_timeout_llu," + + "show_service.int_min_memory_increase " + "FROM " + "show_service, " + "show " + "WHERE " + + "show_service.pk_show = show.pk_show "; + + private static final String GET_TASK = "SELECT " + "task.pk_task," + "task.str_shot," + + "task.int_min_cores + task.int_adjust_cores AS int_min_cores, " + "task.int_adjust_cores, " + + "task.int_min_gpus + task.int_adjust_gpus AS int_min_gpus, " + "task.int_adjust_gpus, " + + "dept.str_name AS str_dept " + "FROM " + "task," + "dept, " + "point " + "WHERE " + + "task.pk_point = point.pk_point " + "AND " + "point.pk_dept = dept.pk_dept "; + + private static final String GET_HOST = "SELECT " + "host.pk_host, " + + "host.str_name AS host_name," + "host_stat.str_state AS host_state," + "host.b_nimby," + + "host_stat.ts_booted," + "host_stat.ts_ping," + "host.int_cores," + "host.int_cores_idle," + + "host.int_mem," + "host.int_mem_idle," + "host.int_gpus," + "host.int_gpus_idle," + + "host.int_gpu_mem," + "host.int_gpu_mem_idle," + "host.str_tags," + "host.str_lock_state," + + "host.b_comment," + "host.int_thread_mode," + "host_stat.str_os," + + "host_stat.int_mem_total," + "host_stat.int_mem_free," + "host_stat.int_swap_total," + + "host_stat.int_swap_free," + "host_stat.int_mcp_total," + "host_stat.int_mcp_free," + + "host_stat.int_gpu_mem_total," + "host_stat.int_gpu_mem_free," + "host_stat.int_load, " + + "alloc.str_name AS alloc_name " + "FROM " + "alloc," + "facility, " + "host_stat," + "host " + + "WHERE " + "host.pk_alloc = alloc.pk_alloc " + "AND " + + "facility.pk_facility = alloc.pk_facility " + "AND " + "host.pk_host = host_stat.pk_host "; + + private static final String GET_DEPEND = "SELECT " + "depend.pk_depend, " + "depend.str_type, " + + "depend.b_active, " + "depend.b_any, " + "depend.str_target, " + + "(SELECT str_name FROM job j WHERE j.pk_job = depend.pk_job_depend_on) AS depend_on_job, " + + "(SELECT str_name FROM job j WHERE j.pk_job = depend.pk_job_depend_er) AS depend_er_job, " + + "(SELECT str_name FROM layer l WHERE l.pk_layer = depend.pk_layer_depend_on) AS depend_on_layer, " + + "(SELECT str_name FROM layer l WHERE l.pk_layer = depend.pk_layer_depend_er) AS depend_er_layer, " + + "(SELECT str_name FROM frame f WHERE f.pk_frame = depend.pk_frame_depend_on) AS depend_on_frame, " + + "(SELECT str_name FROM frame f WHERE f.pk_frame = depend.pk_frame_depend_er) AS depend_er_frame " + + "FROM " + "depend "; + + private static final String GET_SUBSCRIPTION = "SELECT " + "subscription.pk_subscription, " + + "(alloc.str_name || '.' || show.str_name) AS name, " + "subscription.int_burst, " + + "subscription.int_size, " + "subscription.int_cores, " + "subscription.int_gpus, " + + "show.str_name AS show_name, " + "alloc.str_name AS alloc_name, " + + "facility.str_name AS facility_name " + "FROM " + "show, " + "alloc, " + "facility," + + "subscription " + "WHERE " + "subscription.pk_show = show.pk_show " + "AND " + + "subscription.pk_alloc = alloc.pk_alloc " + "AND " + + "alloc.pk_facility = facility.pk_facility "; + + private static final String GET_PENDING_JOBS = GET_JOB + "AND " + "job.str_state = 'PENDING' "; + + private static final String GET_FRAMES_CRITERIA = + + "SELECT " + "frame.pk_frame, " + "frame.int_exit_status," + "frame.str_name," + + "frame.int_number," + "frame.int_dispatch_order," + "frame.ts_started," + + "frame.ts_stopped," + "frame.ts_llu," + "frame.int_retries," + "frame.str_state," + + "frame.str_host," + "frame.int_cores," + "frame.int_mem_max_used," + + "frame.int_mem_used, " + "frame.int_mem_reserved, " + "frame.int_gpus," + + "frame.int_gpu_mem_max_used, " + "frame.int_gpu_mem_used, " + + "frame.int_gpu_mem_reserved, " + "frame.str_checkpoint_state," + + "frame.int_checkpoint_count," + "frame.int_total_past_core_time," + + "frame.int_total_past_gpu_time," + "layer.str_name AS layer_name," + + "job.str_name AS job_name, " + "frame_state_display_overrides.*, " + + "ROW_NUMBER() OVER " + + "(ORDER BY frame.int_dispatch_order ASC, layer.int_dispatch_order ASC) AS row_number " + + "FROM " + "job, " + "layer," + "frame " + "LEFT JOIN frame_state_display_overrides ON " + + "(frame.pk_frame = frame_state_display_overrides.pk_frame AND " + + "frame.str_state = frame_state_display_overrides.str_frame_state) " + "WHERE " + + "frame.pk_layer = layer.pk_layer " + "AND " + "frame.pk_job= job.pk_job "; + + private static final String QUERY_FOR_DEED = "SELECT " + "host.str_name AS str_host," + + "show.str_name AS str_show," + "owner.str_username," + "deed.pk_deed " + "FROM " + "deed," + + "owner," + "host," + "show " + "WHERE " + "deed.pk_host = host.pk_host " + "AND " + + "deed.pk_owner = owner.pk_owner " + "AND " + "owner.pk_show = show.pk_show "; + + public FrameSearchFactory getFrameSearchFactory() { + return frameSearchFactory; + } + + public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { + this.frameSearchFactory = frameSearchFactory; + } + + public ProcSearchFactory getProcSearchFactory() { + return procSearchFactory; + } + + public void setProcSearchFactory(ProcSearchFactory procSearchFactory) { + this.procSearchFactory = procSearchFactory; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/AbstractDepend.java b/cuebot/src/main/java/com/imageworks/spcue/depend/AbstractDepend.java index f3eec2c8c..1da4e4940 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/AbstractDepend.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/AbstractDepend.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.depend; import com.imageworks.spcue.grpc.depend.DependTarget; @@ -26,61 +22,59 @@ */ public abstract class AbstractDepend { - private boolean launch = false; - private boolean active = true; - private boolean anyFrame = false; + private boolean launch = false; + private boolean active = true; + private boolean anyFrame = false; - /** - * True if the dependency is just a container for other depends and cannot - * be satisfied by frames completing. Its essentially a way to group related - * depends. - */ - private boolean composite = false; + /** + * True if the dependency is just a container for other depends and cannot be satisfied by frames + * completing. Its essentially a way to group related depends. + */ + private boolean composite = false; - private String id = null; + private String id = null; - public String getId() { - return id; - } + public String getId() { + return id; + } - public boolean isActive() { - return active; - } + public boolean isActive() { + return active; + } - public boolean isAnyFrame() { - return anyFrame; - } + public boolean isAnyFrame() { + return anyFrame; + } - public void setAnyFrame(boolean anyFrame) { - this.anyFrame = anyFrame; - } + public void setAnyFrame(boolean anyFrame) { + this.anyFrame = anyFrame; + } - public void setActive(boolean active) { - this.active = active; - } + public void setActive(boolean active) { + this.active = active; + } - public void setId(String id) { - this.id = id; - } + public void setId(String id) { + this.id = id; + } - public boolean isLaunchDepend() { - return launch; - } + public boolean isLaunchDepend() { + return launch; + } - public void setLaunchDepend(boolean launch) { - this.launch = launch; - } + public void setLaunchDepend(boolean launch) { + this.launch = launch; + } - public boolean isComposite() { - return composite; - } + public boolean isComposite() { + return composite; + } - public void setComposite(boolean composite) { - this.composite = composite; - } + public void setComposite(boolean composite) { + this.composite = composite; + } - public abstract String getSignature(); + public abstract String getSignature(); - public abstract DependTarget getTarget(); + public abstract DependTarget getTarget(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/Depend.java b/cuebot/src/main/java/com/imageworks/spcue/depend/Depend.java index 710d8f0a3..aaa907d11 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/Depend.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/Depend.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.depend; /** @@ -24,7 +20,6 @@ */ public interface Depend { - void accept(DependVisitor dependVisitor); + void accept(DependVisitor dependVisitor); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/DependCreationVisitor.java b/cuebot/src/main/java/com/imageworks/spcue/depend/DependCreationVisitor.java index 6b0e3e8f1..66f0473b8 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/DependCreationVisitor.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/DependCreationVisitor.java @@ -2,91 +2,86 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.depend; import com.imageworks.spcue.service.DependManager; public class DependCreationVisitor implements DependVisitor { - DependManager dependManager; - - public DependCreationVisitor(DependManager dependManager) { - this.dependManager = dependManager; - } - - @Override - public void accept(FrameOnFrame depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(JobOnJob depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(JobOnLayer depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(JobOnFrame depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(LayerOnJob depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(LayerOnLayer depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(LayerOnFrame depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(FrameOnJob depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(FrameOnLayer depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(FrameByFrame depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(PreviousFrame depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(LayerOnSimFrame depend) { - dependManager.createDepend(depend); - } + DependManager dependManager; + + public DependCreationVisitor(DependManager dependManager) { + this.dependManager = dependManager; + } + + @Override + public void accept(FrameOnFrame depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(JobOnJob depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(JobOnLayer depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(JobOnFrame depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(LayerOnJob depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(LayerOnLayer depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(LayerOnFrame depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(FrameOnJob depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(FrameOnLayer depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(FrameByFrame depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(PreviousFrame depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(LayerOnSimFrame depend) { + dependManager.createDepend(depend); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/DependException.java b/cuebot/src/main/java/com/imageworks/spcue/depend/DependException.java index db244aa5a..a0956997b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/DependException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/DependException.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.depend; import com.imageworks.spcue.SpcueRuntimeException; @@ -24,24 +20,23 @@ @SuppressWarnings("serial") public class DependException extends SpcueRuntimeException { - public DependException() { - // TODO Auto-generated constructor stub - } + public DependException() { + // TODO Auto-generated constructor stub + } - public DependException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public DependException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public DependException(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public DependException(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public DependException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public DependException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/DependVisitor.java b/cuebot/src/main/java/com/imageworks/spcue/depend/DependVisitor.java index 7b385a2fa..40928bea2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/DependVisitor.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/DependVisitor.java @@ -2,39 +2,42 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.depend; public interface DependVisitor { - void accept(JobOnJob depend); - void accept(JobOnLayer depend); - void accept(JobOnFrame depend); + void accept(JobOnJob depend); - void accept(LayerOnJob depend); - void accept(LayerOnLayer depend); - void accept(LayerOnFrame depend); + void accept(JobOnLayer depend); - void accept(FrameOnJob depend); - void accept(FrameOnLayer depend); - void accept(FrameOnFrame depend); + void accept(JobOnFrame depend); - void accept(FrameByFrame depend); - void accept(PreviousFrame depend); - void accept(LayerOnSimFrame depend); -} + void accept(LayerOnJob depend); + + void accept(LayerOnLayer depend); + + void accept(LayerOnFrame depend); + + void accept(FrameOnJob depend); + void accept(FrameOnLayer depend); + + void accept(FrameOnFrame depend); + + void accept(FrameByFrame depend); + + void accept(PreviousFrame depend); + + void accept(LayerOnSimFrame depend); +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/FrameByFrame.java b/cuebot/src/main/java/com/imageworks/spcue/depend/FrameByFrame.java index 2a9806d82..5d7c2cb57 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/FrameByFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/FrameByFrame.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.depend; import com.imageworks.spcue.LayerInterface; @@ -26,54 +22,51 @@ public class FrameByFrame extends AbstractDepend implements Depend { - private final LayerInterface dependErLayer; - private final LayerInterface dependOnLayer; - - public FrameByFrame(LayerInterface dependErLayer, LayerInterface dependOnLayer) { - - if (dependErLayer.getLayerId().equals( - dependOnLayer.getLayerId())) { - throw new DependException("Cannot make the layer " + - dependErLayer.getName() + " depend on itself."); - } - - this.dependErLayer = dependErLayer; - this.dependOnLayer = dependOnLayer; - setComposite(true); - } + private final LayerInterface dependErLayer; + private final LayerInterface dependOnLayer; - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.FRAME_BY_FRAME.toString()); - key.append(dependErLayer.getJobId()); - key.append(dependOnLayer.getJobId()); - key.append(dependErLayer.getLayerId()); - key.append(dependOnLayer.getLayerId()); - return SqlUtil.genKeyByName(key.toString()); - } + public FrameByFrame(LayerInterface dependErLayer, LayerInterface dependOnLayer) { - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); + if (dependErLayer.getLayerId().equals(dependOnLayer.getLayerId())) { + throw new DependException( + "Cannot make the layer " + dependErLayer.getName() + " depend on itself."); } - @Override - public DependTarget getTarget() { - if (dependErLayer.getJobId().equals(dependOnLayer.getJobId())) { - return DependTarget.INTERNAL; - } - else { - return DependTarget.EXTERNAL; - } + this.dependErLayer = dependErLayer; + this.dependOnLayer = dependOnLayer; + setComposite(true); + } + + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.FRAME_BY_FRAME.toString()); + key.append(dependErLayer.getJobId()); + key.append(dependOnLayer.getJobId()); + key.append(dependErLayer.getLayerId()); + key.append(dependOnLayer.getLayerId()); + return SqlUtil.genKeyByName(key.toString()); + } + + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } + + @Override + public DependTarget getTarget() { + if (dependErLayer.getJobId().equals(dependOnLayer.getJobId())) { + return DependTarget.INTERNAL; + } else { + return DependTarget.EXTERNAL; } + } - public LayerInterface getDependErLayer() { - return dependErLayer; - } + public LayerInterface getDependErLayer() { + return dependErLayer; + } - public LayerInterface getDependOnLayer() { - return dependOnLayer; - } + public LayerInterface getDependOnLayer() { + return dependOnLayer; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnFrame.java b/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnFrame.java index fde19a29b..b64511c13 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnFrame.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.depend; import com.imageworks.spcue.FrameInterface; @@ -26,63 +22,62 @@ public class FrameOnFrame extends AbstractDepend implements Depend { - private final FrameInterface dependErFrame; - private final FrameInterface dependOnFrame; - private AbstractDepend parent = null; - - public FrameOnFrame(FrameInterface dependErFrame, - FrameInterface dependOnFrame, AbstractDepend parent) { - - if (dependOnFrame.getFrameId().equals(dependErFrame.getFrameId())) { - throw new DependException("The frame " + dependErFrame.getName() + - " cannot depend on itself."); - } - - this.dependErFrame = dependErFrame; - this.dependOnFrame = dependOnFrame; - this.parent = parent; - } - public FrameOnFrame(FrameInterface dependErFrame, - FrameInterface dependOnFrame) { - this.dependErFrame = dependErFrame; - this.dependOnFrame = dependOnFrame; - } - public FrameInterface getDependErFrame() { - return dependErFrame; - } - - public FrameInterface getDependOnFrame() { - return dependOnFrame; - } + private final FrameInterface dependErFrame; + private final FrameInterface dependOnFrame; + private AbstractDepend parent = null; - public AbstractDepend getParent() { - return parent; - } + public FrameOnFrame(FrameInterface dependErFrame, FrameInterface dependOnFrame, + AbstractDepend parent) { - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); + if (dependOnFrame.getFrameId().equals(dependErFrame.getFrameId())) { + throw new DependException( + "The frame " + dependErFrame.getName() + " cannot depend on itself."); } - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.FRAME_ON_FRAME.toString()); - key.append(dependErFrame.getJobId()); - key.append(dependOnFrame.getJobId()); - key.append(dependErFrame.getFrameId()); - key.append(dependOnFrame.getFrameId()); - return SqlUtil.genKeyByName(key.toString()); - } - - @Override - public DependTarget getTarget() { - if (dependErFrame.getJobId().equals(dependOnFrame.getJobId())) { - return DependTarget.INTERNAL; - } - else { - return DependTarget.EXTERNAL; - } + this.dependErFrame = dependErFrame; + this.dependOnFrame = dependOnFrame; + this.parent = parent; + } + + public FrameOnFrame(FrameInterface dependErFrame, FrameInterface dependOnFrame) { + this.dependErFrame = dependErFrame; + this.dependOnFrame = dependOnFrame; + } + + public FrameInterface getDependErFrame() { + return dependErFrame; + } + + public FrameInterface getDependOnFrame() { + return dependOnFrame; + } + + public AbstractDepend getParent() { + return parent; + } + + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } + + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.FRAME_ON_FRAME.toString()); + key.append(dependErFrame.getJobId()); + key.append(dependOnFrame.getJobId()); + key.append(dependErFrame.getFrameId()); + key.append(dependOnFrame.getFrameId()); + return SqlUtil.genKeyByName(key.toString()); + } + + @Override + public DependTarget getTarget() { + if (dependErFrame.getJobId().equals(dependOnFrame.getJobId())) { + return DependTarget.INTERNAL; + } else { + return DependTarget.EXTERNAL; } + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnJob.java b/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnJob.java index b69bb51f1..03866e182 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnJob.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnJob.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.depend; import com.imageworks.spcue.FrameInterface; @@ -27,44 +23,43 @@ public class FrameOnJob extends AbstractDepend implements Depend { - private final FrameInterface dependErFrame; - private final JobInterface dependOnJob; - - public FrameOnJob(FrameInterface dependErFrame, JobInterface dependOnJob) { - - if (dependErFrame.getJobId().equals(dependOnJob.getJobId())) { - throw new DependException("A frame cannot depend on its own job."); - } + private final FrameInterface dependErFrame; + private final JobInterface dependOnJob; - this.dependErFrame = dependErFrame; - this.dependOnJob = dependOnJob; - } - - public FrameInterface getDependErFrame() { - return dependErFrame; - } + public FrameOnJob(FrameInterface dependErFrame, JobInterface dependOnJob) { - public JobInterface getDependOnJob() { - return dependOnJob; + if (dependErFrame.getJobId().equals(dependOnJob.getJobId())) { + throw new DependException("A frame cannot depend on its own job."); } - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.FRAME_BY_FRAME.toString()); - key.append(dependErFrame.getFrameId()); - key.append(dependOnJob.getJobId()); - return SqlUtil.genKeyByName(key.toString()); - } - - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); - } - - @Override - public DependTarget getTarget() { - return DependTarget.EXTERNAL; - } + this.dependErFrame = dependErFrame; + this.dependOnJob = dependOnJob; + } + + public FrameInterface getDependErFrame() { + return dependErFrame; + } + + public JobInterface getDependOnJob() { + return dependOnJob; + } + + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.FRAME_BY_FRAME.toString()); + key.append(dependErFrame.getFrameId()); + key.append(dependOnJob.getJobId()); + return SqlUtil.genKeyByName(key.toString()); + } + + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } + + @Override + public DependTarget getTarget() { + return DependTarget.EXTERNAL; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnLayer.java b/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnLayer.java index f57deb5d3..a8b001d57 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnLayer.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnLayer.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.depend; import com.imageworks.spcue.FrameInterface; @@ -27,51 +23,48 @@ public class FrameOnLayer extends AbstractDepend implements Depend { - private final FrameInterface dependErFrame; - private final LayerInterface dependOnLayer; - - public FrameOnLayer(FrameInterface dependErFrame, LayerInterface dependOnLayer) { - - if (dependErFrame.getLayerId().equals(dependOnLayer.getLayerId())) { - throw new DependException("The frame " + - dependErFrame.getName() + " cannot depend " + - " on its own layer."); - } - - this.dependErFrame = dependErFrame; - this.dependOnLayer = dependOnLayer; - } - - public FrameInterface getDependErFrame() { - return dependErFrame; - } - - public LayerInterface getDependOnLayer() { - return dependOnLayer; - } + private final FrameInterface dependErFrame; + private final LayerInterface dependOnLayer; - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.FRAME_BY_FRAME.toString()); - key.append(dependErFrame.getFrameId()); - key.append(dependOnLayer.getLayerId()); - return SqlUtil.genKeyByName(key.toString()); - } + public FrameOnLayer(FrameInterface dependErFrame, LayerInterface dependOnLayer) { - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); + if (dependErFrame.getLayerId().equals(dependOnLayer.getLayerId())) { + throw new DependException( + "The frame " + dependErFrame.getName() + " cannot depend " + " on its own layer."); } - @Override - public DependTarget getTarget() { - if (dependErFrame.getJobId().equals(dependOnLayer.getJobId())) { - return DependTarget.INTERNAL; - } - else { - return DependTarget.EXTERNAL; - } + this.dependErFrame = dependErFrame; + this.dependOnLayer = dependOnLayer; + } + + public FrameInterface getDependErFrame() { + return dependErFrame; + } + + public LayerInterface getDependOnLayer() { + return dependOnLayer; + } + + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.FRAME_BY_FRAME.toString()); + key.append(dependErFrame.getFrameId()); + key.append(dependOnLayer.getLayerId()); + return SqlUtil.genKeyByName(key.toString()); + } + + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } + + @Override + public DependTarget getTarget() { + if (dependErFrame.getJobId().equals(dependOnLayer.getJobId())) { + return DependTarget.INTERNAL; + } else { + return DependTarget.EXTERNAL; } + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnFrame.java b/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnFrame.java index 1fee300b1..464428842 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnFrame.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.depend; import com.imageworks.spcue.FrameInterface; @@ -27,45 +23,43 @@ public class JobOnFrame extends AbstractDepend implements Depend { - private final JobInterface dependErJob; - private final FrameInterface dependOnFrame; - - public JobOnFrame(JobInterface dependErJob, FrameInterface dependOnFrame) { - - if (dependErJob.getJobId().equals(dependOnFrame.getJobId())) { - throw new DependException( - "A job cannot depend on one of its own frames."); - } + private final JobInterface dependErJob; + private final FrameInterface dependOnFrame; - this.dependErJob = dependErJob; - this.dependOnFrame = dependOnFrame; - } - - public JobInterface getDependErJob() { - return dependErJob; - } + public JobOnFrame(JobInterface dependErJob, FrameInterface dependOnFrame) { - public FrameInterface getDependOnFrame() { - return dependOnFrame; + if (dependErJob.getJobId().equals(dependOnFrame.getJobId())) { + throw new DependException("A job cannot depend on one of its own frames."); } - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.FRAME_BY_FRAME.toString()); - key.append(dependErJob.getJobId()); - key.append(dependOnFrame.getFrameId()); - return SqlUtil.genKeyByName(key.toString()); - } - - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); - } - - @Override - public DependTarget getTarget() { - return DependTarget.EXTERNAL; - } + this.dependErJob = dependErJob; + this.dependOnFrame = dependOnFrame; + } + + public JobInterface getDependErJob() { + return dependErJob; + } + + public FrameInterface getDependOnFrame() { + return dependOnFrame; + } + + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.FRAME_BY_FRAME.toString()); + key.append(dependErJob.getJobId()); + key.append(dependOnFrame.getFrameId()); + return SqlUtil.genKeyByName(key.toString()); + } + + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } + + @Override + public DependTarget getTarget() { + return DependTarget.EXTERNAL; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnJob.java b/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnJob.java index dfa975588..bd1922283 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnJob.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnJob.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.depend; import com.imageworks.spcue.JobInterface; @@ -26,44 +22,43 @@ public class JobOnJob extends AbstractDepend implements Depend { - private final JobInterface dependErJob; - private final JobInterface dependOnJob; - - public JobOnJob(JobInterface dependErJob, JobInterface dependOnJob) { - - if (dependErJob.getJobId().equals(dependOnJob.getJobId())) { - throw new DependException("A job cannot depend on itself."); - } + private final JobInterface dependErJob; + private final JobInterface dependOnJob; - this.dependErJob = dependErJob; - this.dependOnJob = dependOnJob; - } - - public JobInterface getDependErJob() { - return dependErJob; - } + public JobOnJob(JobInterface dependErJob, JobInterface dependOnJob) { - public JobInterface getDependOnJob() { - return dependOnJob; + if (dependErJob.getJobId().equals(dependOnJob.getJobId())) { + throw new DependException("A job cannot depend on itself."); } - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.JOB_ON_JOB.toString()); - key.append(dependErJob.getJobId()); - key.append(dependOnJob.getJobId()); - return SqlUtil.genKeyByName(key.toString()); - } - - @Override - public void accept(DependVisitor dependCreator) { - dependCreator.accept(this); - } - - @Override - public DependTarget getTarget() { - return DependTarget.EXTERNAL; - } + this.dependErJob = dependErJob; + this.dependOnJob = dependOnJob; + } + + public JobInterface getDependErJob() { + return dependErJob; + } + + public JobInterface getDependOnJob() { + return dependOnJob; + } + + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.JOB_ON_JOB.toString()); + key.append(dependErJob.getJobId()); + key.append(dependOnJob.getJobId()); + return SqlUtil.genKeyByName(key.toString()); + } + + @Override + public void accept(DependVisitor dependCreator) { + dependCreator.accept(this); + } + + @Override + public DependTarget getTarget() { + return DependTarget.EXTERNAL; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnLayer.java b/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnLayer.java index 5f51f04c1..588f90aca 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnLayer.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnLayer.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.depend; import com.imageworks.spcue.JobInterface; @@ -27,43 +23,43 @@ public class JobOnLayer extends AbstractDepend implements Depend { - private final JobInterface dependErJob; - private final LayerInterface dependOnLayer; - - public JobOnLayer(JobInterface dependErJob, LayerInterface dependOnLayer) { + private final JobInterface dependErJob; + private final LayerInterface dependOnLayer; - if (dependErJob.getJobId().equals(dependOnLayer.getJobId())) { - throw new DependException( - "A job cannot depend on one of its own layers."); - } + public JobOnLayer(JobInterface dependErJob, LayerInterface dependOnLayer) { - this.dependErJob = dependErJob; - this.dependOnLayer = dependOnLayer; - } - public JobInterface getDependErJob() { - return dependErJob; + if (dependErJob.getJobId().equals(dependOnLayer.getJobId())) { + throw new DependException("A job cannot depend on one of its own layers."); } - public LayerInterface getDependOnLayer() { - return dependOnLayer; - } + this.dependErJob = dependErJob; + this.dependOnLayer = dependOnLayer; + } - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.JOB_ON_JOB.toString()); - key.append(dependErJob.getJobId()); - key.append(dependOnLayer.getLayerId()); - return SqlUtil.genKeyByName(key.toString()); - } + public JobInterface getDependErJob() { + return dependErJob; + } - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); - } - @Override - public DependTarget getTarget() { - return DependTarget.EXTERNAL; - } -} + public LayerInterface getDependOnLayer() { + return dependOnLayer; + } + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.JOB_ON_JOB.toString()); + key.append(dependErJob.getJobId()); + key.append(dependOnLayer.getLayerId()); + return SqlUtil.genKeyByName(key.toString()); + } + + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } + + @Override + public DependTarget getTarget() { + return DependTarget.EXTERNAL; + } +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnFrame.java b/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnFrame.java index 95c6bb710..b98de5618 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnFrame.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.depend; import com.imageworks.spcue.FrameInterface; @@ -27,49 +23,47 @@ public class LayerOnFrame extends AbstractDepend implements Depend { - private final LayerInterface dependErLayer; - private final FrameInterface dependOnFrame; - - public LayerOnFrame(LayerInterface dependErLayer, FrameInterface dependOnFrame) { - - if (dependErLayer.getLayerId().equals(dependOnFrame.getLayerId())) { - throw new DependException("A layer cannot depend on one of its own frames."); - } - - this.dependErLayer = dependErLayer; - this.dependOnFrame = dependOnFrame; - } - - public LayerInterface getDependErLayer() { - return dependErLayer; - } - - public FrameInterface getDependOnFrame() { - return dependOnFrame; - } + private final LayerInterface dependErLayer; + private final FrameInterface dependOnFrame; - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.JOB_ON_JOB.toString()); - key.append(dependErLayer.getLayerId()); - key.append(dependOnFrame.getFrameId()); - return SqlUtil.genKeyByName(key.toString()); - } + public LayerOnFrame(LayerInterface dependErLayer, FrameInterface dependOnFrame) { - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); + if (dependErLayer.getLayerId().equals(dependOnFrame.getLayerId())) { + throw new DependException("A layer cannot depend on one of its own frames."); } - @Override - public DependTarget getTarget() { - if (dependErLayer.getJobId().equals(dependOnFrame.getJobId())) { - return DependTarget.INTERNAL; - } - else { - return DependTarget.EXTERNAL; - } + this.dependErLayer = dependErLayer; + this.dependOnFrame = dependOnFrame; + } + + public LayerInterface getDependErLayer() { + return dependErLayer; + } + + public FrameInterface getDependOnFrame() { + return dependOnFrame; + } + + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.JOB_ON_JOB.toString()); + key.append(dependErLayer.getLayerId()); + key.append(dependOnFrame.getFrameId()); + return SqlUtil.genKeyByName(key.toString()); + } + + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } + + @Override + public DependTarget getTarget() { + if (dependErLayer.getJobId().equals(dependOnFrame.getJobId())) { + return DependTarget.INTERNAL; + } else { + return DependTarget.EXTERNAL; } + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnJob.java b/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnJob.java index 67cc26fd7..721aed4f4 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnJob.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnJob.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.depend; import com.imageworks.spcue.JobInterface; @@ -27,44 +23,43 @@ public class LayerOnJob extends AbstractDepend implements Depend { - private final LayerInterface dependErLayer; - private final JobInterface dependOnJob; - - public LayerOnJob(LayerInterface dependErLayer, JobInterface dependOnJob) { - - if (dependErLayer.getJobId().equals(dependOnJob.getJobId())) { - throw new DependException("A layer cannot depend on its own job."); - } + private final LayerInterface dependErLayer; + private final JobInterface dependOnJob; - this.dependErLayer = dependErLayer; - this.dependOnJob = dependOnJob; - } - - public LayerInterface getDependErLayer() { - return dependErLayer; - } + public LayerOnJob(LayerInterface dependErLayer, JobInterface dependOnJob) { - public JobInterface getDependOnJob() { - return dependOnJob; + if (dependErLayer.getJobId().equals(dependOnJob.getJobId())) { + throw new DependException("A layer cannot depend on its own job."); } - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.FRAME_BY_FRAME.toString()); - key.append(dependErLayer.getLayerId()); - key.append(dependOnJob.getJobId()); - return SqlUtil.genKeyByName(key.toString()); - } - - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); - } - - @Override - public DependTarget getTarget() { - return DependTarget.EXTERNAL; - } + this.dependErLayer = dependErLayer; + this.dependOnJob = dependOnJob; + } + + public LayerInterface getDependErLayer() { + return dependErLayer; + } + + public JobInterface getDependOnJob() { + return dependOnJob; + } + + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.FRAME_BY_FRAME.toString()); + key.append(dependErLayer.getLayerId()); + key.append(dependOnJob.getJobId()); + return SqlUtil.genKeyByName(key.toString()); + } + + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } + + @Override + public DependTarget getTarget() { + return DependTarget.EXTERNAL; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnLayer.java b/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnLayer.java index 33353b737..d4a61a5e8 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnLayer.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnLayer.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.depend; import com.imageworks.spcue.LayerInterface; @@ -26,53 +22,50 @@ public class LayerOnLayer extends AbstractDepend implements Depend { - public final LayerInterface dependErLayer; - public final LayerInterface dependOnLayer; - - public LayerOnLayer(LayerInterface dependErLayer, LayerInterface dependOnLayer) { - - if (dependErLayer.getLayerId().equals( - dependOnLayer.getLayerId())) { - throw new DependException("Cannot make the layer " + - dependErLayer.getName() + " depend on itself."); - } - - this.dependErLayer = dependErLayer; - this.dependOnLayer = dependOnLayer; - } - - public LayerInterface getDependErLayer() { - return dependErLayer; - } - - public LayerInterface getDependOnLayer() { - return dependOnLayer; - } + public final LayerInterface dependErLayer; + public final LayerInterface dependOnLayer; - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.LAYER_ON_LAYER.toString()); - key.append(dependErLayer.getJobId()); - key.append(dependOnLayer.getJobId()); - key.append(dependErLayer.getLayerId()); - key.append(dependOnLayer.getLayerId()); - return SqlUtil.genKeyByName(key.toString()); - } + public LayerOnLayer(LayerInterface dependErLayer, LayerInterface dependOnLayer) { - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); + if (dependErLayer.getLayerId().equals(dependOnLayer.getLayerId())) { + throw new DependException( + "Cannot make the layer " + dependErLayer.getName() + " depend on itself."); } - @Override - public DependTarget getTarget() { - if (dependErLayer.getJobId().equals(dependOnLayer.getJobId())) { - return DependTarget.INTERNAL; - } - else { - return DependTarget.EXTERNAL; - } + this.dependErLayer = dependErLayer; + this.dependOnLayer = dependOnLayer; + } + + public LayerInterface getDependErLayer() { + return dependErLayer; + } + + public LayerInterface getDependOnLayer() { + return dependOnLayer; + } + + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.LAYER_ON_LAYER.toString()); + key.append(dependErLayer.getJobId()); + key.append(dependOnLayer.getJobId()); + key.append(dependErLayer.getLayerId()); + key.append(dependOnLayer.getLayerId()); + return SqlUtil.genKeyByName(key.toString()); + } + + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } + + @Override + public DependTarget getTarget() { + if (dependErLayer.getJobId().equals(dependOnLayer.getJobId())) { + return DependTarget.INTERNAL; + } else { + return DependTarget.EXTERNAL; } + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnSimFrame.java b/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnSimFrame.java index 013410164..8af36ba95 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnSimFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnSimFrame.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.depend; import com.imageworks.spcue.FrameInterface; @@ -27,55 +23,51 @@ public class LayerOnSimFrame extends AbstractDepend implements Depend { - private final LayerInterface dependErLayer; - private final FrameInterface dependOnFrame; - - public LayerOnSimFrame(LayerInterface dependErLayer, FrameInterface dependOnFrame) { - - if (dependErLayer.getLayerId().equals( - dependOnFrame.getLayerId())) { - throw new DependException("Cannot make the layer " + - dependErLayer.getName() + " depend on itself."); - } - - this.dependErLayer = dependErLayer; - this.dependOnFrame = dependOnFrame; - setComposite(true); - } - - - public LayerInterface getDependErLayer() { - return dependErLayer; - } + private final LayerInterface dependErLayer; + private final FrameInterface dependOnFrame; - public FrameInterface getDependOnFrame() { - return dependOnFrame; - } + public LayerOnSimFrame(LayerInterface dependErLayer, FrameInterface dependOnFrame) { - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.LAYER_ON_SIM_FRAME.toString()); - key.append(dependErLayer.getJobId()); - key.append(dependOnFrame.getJobId()); - key.append(dependErLayer.getLayerId()); - key.append(dependOnFrame.getLayerId()); - return SqlUtil.genKeyByName(key.toString()); + if (dependErLayer.getLayerId().equals(dependOnFrame.getLayerId())) { + throw new DependException( + "Cannot make the layer " + dependErLayer.getName() + " depend on itself."); } - @Override - public DependTarget getTarget() { - if (dependErLayer.getJobId().equals(dependOnFrame.getJobId())) { - return DependTarget.INTERNAL; - } - else { - return DependTarget.EXTERNAL; - } + this.dependErLayer = dependErLayer; + this.dependOnFrame = dependOnFrame; + setComposite(true); + } + + public LayerInterface getDependErLayer() { + return dependErLayer; + } + + public FrameInterface getDependOnFrame() { + return dependOnFrame; + } + + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.LAYER_ON_SIM_FRAME.toString()); + key.append(dependErLayer.getJobId()); + key.append(dependOnFrame.getJobId()); + key.append(dependErLayer.getLayerId()); + key.append(dependOnFrame.getLayerId()); + return SqlUtil.genKeyByName(key.toString()); + } + + @Override + public DependTarget getTarget() { + if (dependErLayer.getJobId().equals(dependOnFrame.getJobId())) { + return DependTarget.INTERNAL; + } else { + return DependTarget.EXTERNAL; } + } - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); - } + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/PreviousFrame.java b/cuebot/src/main/java/com/imageworks/spcue/depend/PreviousFrame.java index b38e7c474..c58a824b0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/PreviousFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/PreviousFrame.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.depend; import com.imageworks.spcue.LayerInterface; @@ -26,47 +22,45 @@ public class PreviousFrame extends AbstractDepend implements Depend { - public final LayerInterface dependErLayer; - public final LayerInterface dependOnLayer; + public final LayerInterface dependErLayer; + public final LayerInterface dependOnLayer; - public PreviousFrame(LayerInterface dependErLayer, LayerInterface dependOnLayer) { - this.dependErLayer = dependErLayer; - this.dependOnLayer = dependOnLayer; - setComposite(true); - } + public PreviousFrame(LayerInterface dependErLayer, LayerInterface dependOnLayer) { + this.dependErLayer = dependErLayer; + this.dependOnLayer = dependOnLayer; + setComposite(true); + } - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.PREVIOUS_FRAME.toString()); - key.append(dependErLayer.getJobId()); - key.append(dependOnLayer.getJobId()); - key.append(dependErLayer.getLayerId()); - key.append(dependOnLayer.getLayerId()); - return SqlUtil.genKeyByName(key.toString()); - } + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.PREVIOUS_FRAME.toString()); + key.append(dependErLayer.getJobId()); + key.append(dependOnLayer.getJobId()); + key.append(dependErLayer.getLayerId()); + key.append(dependOnLayer.getLayerId()); + return SqlUtil.genKeyByName(key.toString()); + } - @Override - public DependTarget getTarget() { - if (dependErLayer.getJobId().equals(dependOnLayer.getJobId())) { - return DependTarget.INTERNAL; - } - else { - return DependTarget.EXTERNAL; - } + @Override + public DependTarget getTarget() { + if (dependErLayer.getJobId().equals(dependOnLayer.getJobId())) { + return DependTarget.INTERNAL; + } else { + return DependTarget.EXTERNAL; } + } - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); - } + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } - public LayerInterface getDependErLayer() { - return dependErLayer; - } + public LayerInterface getDependErLayer() { + return dependErLayer; + } - public LayerInterface getDependOnLayer() { - return dependOnLayer; - } + public LayerInterface getDependOnLayer() { + return dependOnLayer; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/QueueDependOperation.java b/cuebot/src/main/java/com/imageworks/spcue/depend/QueueDependOperation.java index b353aa4a6..4bc5d7fa3 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/QueueDependOperation.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/QueueDependOperation.java @@ -2,36 +2,31 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.depend; public class QueueDependOperation implements Runnable { - private DependVisitor visitor; - private Depend depend; + private DependVisitor visitor; + private Depend depend; - public QueueDependOperation(Depend depend, DependVisitor visitor) { - this.depend = depend; - this.visitor = visitor; - } + public QueueDependOperation(Depend depend, DependVisitor visitor) { + this.depend = depend; + this.visitor = visitor; + } - @Override - public void run() { - depend.accept(visitor); - } + @Override + public void run() { + depend.accept(visitor); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/AbstractDispatcher.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/AbstractDispatcher.java index 687a14ce5..d6dce119e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/AbstractDispatcher.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/AbstractDispatcher.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; import org.apache.logging.log4j.Logger; @@ -29,224 +25,197 @@ import com.imageworks.spcue.util.CueUtil; /** - * A class to build dispatchers on that contains core dispatching methods that - * should be the same for all dispatchers. + * A class to build dispatchers on that contains core dispatching methods that should be the same + * for all dispatchers. */ public abstract class AbstractDispatcher { - private static final Logger logger = LogManager.getLogger(AbstractDispatcher.class); - - public DispatchSupport dispatchSupport; - public RqdClient rqdClient; - - public boolean testMode = false; - - public boolean dispatchProc(DispatchFrame frame, VirtualProc proc) { - - try { - dispatch(frame, proc); - dispatchSummary(proc, frame, "Dispatch"); - DispatchSupport.dispatchedProcs.getAndIncrement(); - - return true; - - } catch (FrameReservationException fre) { - /* - * This usually just means another thread got the frame first, so - * just retry on the next frame. - */ - DispatchSupport.bookingRetries.incrementAndGet(); - String msg = - "frame reservation error, " - + "dispatchProcToJob failed to book next frame, " - + fre; - logger.info(msg); - } catch (ResourceDuplicationFailureException rrfe) { - /* - * There is a resource already assigned to the frame we reserved! - * Don't clear the frame, let it keep running and continue to the - * next frame. - */ - DispatchSupport.bookingErrors.incrementAndGet(); - dispatchSupport.fixFrame(frame); - - String msg = - "proc update error, dispatchProcToJob failed " - + "to assign proc to job " + frame + ", " + proc - + " already assigned to another frame." + rrfe; - - logger.info(msg); - } catch (ResourceReservationFailureException rrfe) { - /* - * This should technically never happen since the proc is already - * allocated at this point, but, if it does it should be unbooked. - */ - DispatchSupport.bookingErrors.incrementAndGet(); - String msg = - "proc update error, " - + "dispatchProcToJob failed to assign proc to job " - + frame + ", " + rrfe; - logger.info(msg); - dispatchSupport.unbookProc(proc); - dispatchSupport.clearFrame(frame); - - throw new DispatcherException("proc reservation error, " - + "unable to allocate proc " + proc + "that " - + "was already allocated."); - } catch (Exception e) { - /* - * Everything else means that the host/frame record was updated but - * another error occurred and the proc should be cleared. It could - * also be running, so use the jobManagerSupprot to kill it just in - * case. - */ - DispatchSupport.bookingErrors.incrementAndGet(); - String msg = - "dispatchProcToJob failed booking proc " + proc - + " on job " + frame; - logger.info(msg); - dispatchSupport.unbookProc(proc); - dispatchSupport.clearFrame(frame); - - try { - rqdClient.killFrame(proc, "An accounting error occured " - + "when booking this frame."); - } catch (RqdClientException rqde) { - /* - * Its almost expected that this will fail, as this is just a - * precaution if the frame did actually launch. - */ - } - throw new DispatcherException("proc reservation error, " - + "unable to communicate with proc " + proc); - } - - return false; - } - - public boolean dispatchHost(DispatchFrame frame, VirtualProc proc) { - try { - dispatch(frame, proc); - dispatchSummary(proc, frame, "Booking"); - DispatchSupport.bookedProcs.getAndIncrement(); - DispatchSupport.bookedCores.addAndGet(proc.coresReserved); - DispatchSupport.bookedGpus.addAndGet(proc.gpusReserved); - return true; - } catch (FrameReservationException fre) { - /* - * This usually just means another thread got the frame first, so - * just retry on the next frame. - */ - DispatchSupport.bookingRetries.incrementAndGet(); - logger.info("frame reservation error, " - + "dispatchHostToJob failed to book new frame: " + fre); - } catch (ResourceDuplicationFailureException rrfe) { - /* - * There is a resource already assigned to the frame we reserved! - * Don't clear the frame, let it keep running and continue to the - * next frame. - */ - DispatchSupport.bookingErrors.incrementAndGet(); - dispatchSupport.fixFrame(frame); - - String msg = - "proc update error, dispatchProcToJob failed " - + "to assign proc to job " + frame + ", " + proc - + " already assigned to another frame." + rrfe; - - logger.info(msg); - } catch (ResourceReservationFailureException rrfe) { - /* - * This generally means that the resources we're booked by another - * thread. We can be fairly certain another thread is working with - * the current host, so bail out. Also note here the proc was never - * committed so there is not point to clearing or unbooking it. - */ - DispatchSupport.bookingErrors.incrementAndGet(); - dispatchSupport.clearFrame(frame); - - /* Throw an exception to stop booking * */ - throw new DispatcherException("host reservation error, " - + "dispatchHostToJob failed to allocate a new proc " + rrfe); - } catch (Exception e) { - /* - * Any other exception means that the frame/host records have been - * updated, so, we need to clear the proc. Its possible the frame is - * actually running, so try to kill it. - */ - DispatchSupport.bookingErrors.incrementAndGet(); - dispatchSupport.unbookProc(proc); - dispatchSupport.clearFrame(frame); - - try { - rqdClient.killFrame(proc, "An accounting error occured " - + "when booking this frame."); - } catch (RqdClientException rqde) { - /* - * Its almost expected that this will fail, as this is just a - * precaution if the frame did actually launch. - */ - } - /* Thrown an exception to stop booking */ - throw new DispatcherException("stopped dispatching host " + proc - + ", " + e); - } - - return false; - } - - public void dispatch(DispatchFrame frame, VirtualProc proc) { + private static final Logger logger = LogManager.getLogger(AbstractDispatcher.class); + + public DispatchSupport dispatchSupport; + public RqdClient rqdClient; + + public boolean testMode = false; + + public boolean dispatchProc(DispatchFrame frame, VirtualProc proc) { + + try { + dispatch(frame, proc); + dispatchSummary(proc, frame, "Dispatch"); + DispatchSupport.dispatchedProcs.getAndIncrement(); + + return true; + + } catch (FrameReservationException fre) { + /* + * This usually just means another thread got the frame first, so just retry on the next + * frame. + */ + DispatchSupport.bookingRetries.incrementAndGet(); + String msg = + "frame reservation error, " + "dispatchProcToJob failed to book next frame, " + fre; + logger.info(msg); + } catch (ResourceDuplicationFailureException rrfe) { + /* + * There is a resource already assigned to the frame we reserved! Don't clear the frame, let + * it keep running and continue to the next frame. + */ + DispatchSupport.bookingErrors.incrementAndGet(); + dispatchSupport.fixFrame(frame); + + String msg = "proc update error, dispatchProcToJob failed " + "to assign proc to job " + frame + + ", " + proc + " already assigned to another frame." + rrfe; + + logger.info(msg); + } catch (ResourceReservationFailureException rrfe) { + /* + * This should technically never happen since the proc is already allocated at this point, + * but, if it does it should be unbooked. + */ + DispatchSupport.bookingErrors.incrementAndGet(); + String msg = "proc update error, " + "dispatchProcToJob failed to assign proc to job " + frame + + ", " + rrfe; + logger.info(msg); + dispatchSupport.unbookProc(proc); + dispatchSupport.clearFrame(frame); + + throw new DispatcherException("proc reservation error, " + "unable to allocate proc " + proc + + "that " + "was already allocated."); + } catch (Exception e) { + /* + * Everything else means that the host/frame record was updated but another error occurred and + * the proc should be cleared. It could also be running, so use the jobManagerSupprot to kill + * it just in case. + */ + DispatchSupport.bookingErrors.incrementAndGet(); + String msg = "dispatchProcToJob failed booking proc " + proc + " on job " + frame; + logger.info(msg); + dispatchSupport.unbookProc(proc); + dispatchSupport.clearFrame(frame); + + try { + rqdClient.killFrame(proc, "An accounting error occured " + "when booking this frame."); + } catch (RqdClientException rqde) { /* - * Start frame and create proc on the database. + * Its almost expected that this will fail, as this is just a precaution if the frame did + * actually launch. */ - dispatchSupport.startFrameAndProc(proc, frame); + } + throw new DispatcherException( + "proc reservation error, " + "unable to communicate with proc " + proc); + } + return false; + } + + public boolean dispatchHost(DispatchFrame frame, VirtualProc proc) { + try { + dispatch(frame, proc); + dispatchSummary(proc, frame, "Booking"); + DispatchSupport.bookedProcs.getAndIncrement(); + DispatchSupport.bookedCores.addAndGet(proc.coresReserved); + DispatchSupport.bookedGpus.addAndGet(proc.gpusReserved); + return true; + } catch (FrameReservationException fre) { + /* + * This usually just means another thread got the frame first, so just retry on the next + * frame. + */ + DispatchSupport.bookingRetries.incrementAndGet(); + logger + .info("frame reservation error, " + "dispatchHostToJob failed to book new frame: " + fre); + } catch (ResourceDuplicationFailureException rrfe) { + /* + * There is a resource already assigned to the frame we reserved! Don't clear the frame, let + * it keep running and continue to the next frame. + */ + DispatchSupport.bookingErrors.incrementAndGet(); + dispatchSupport.fixFrame(frame); + + String msg = "proc update error, dispatchProcToJob failed " + "to assign proc to job " + frame + + ", " + proc + " already assigned to another frame." + rrfe; + + logger.info(msg); + } catch (ResourceReservationFailureException rrfe) { + /* + * This generally means that the resources we're booked by another thread. We can be fairly + * certain another thread is working with the current host, so bail out. Also note here the + * proc was never committed so there is not point to clearing or unbooking it. + */ + DispatchSupport.bookingErrors.incrementAndGet(); + dispatchSupport.clearFrame(frame); + + /* Throw an exception to stop booking * */ + throw new DispatcherException( + "host reservation error, " + "dispatchHostToJob failed to allocate a new proc " + rrfe); + } catch (Exception e) { + /* + * Any other exception means that the frame/host records have been updated, so, we need to + * clear the proc. Its possible the frame is actually running, so try to kill it. + */ + DispatchSupport.bookingErrors.incrementAndGet(); + dispatchSupport.unbookProc(proc); + dispatchSupport.clearFrame(frame); + + try { + rqdClient.killFrame(proc, "An accounting error occured " + "when booking this frame."); + } catch (RqdClientException rqde) { /* - * Communicate with RQD to run the frame. + * Its almost expected that this will fail, as this is just a precaution if the frame did + * actually launch. */ - if (!testMode) { - dispatchSupport.runFrame(proc, frame); - } - + } + /* Thrown an exception to stop booking */ + throw new DispatcherException("stopped dispatching host " + proc + ", " + e); } - private static void dispatchSummary(VirtualProc p, DispatchFrame f, String type) { - String msg = type + " summary: " + - p.coresReserved + - " cores / " + - CueUtil.KbToMb(p.memoryReserved) + - " memory / " + - p.gpusReserved + - " gpus / " + - CueUtil.KbToMb(p.gpuMemoryReserved) + - " gpu memory " + - p.getName() + - " to " + f.show + "/" + f.shot; - logger.info(msg); - } + return false; + } - public DispatchSupport getDispatchSupport() { - return dispatchSupport; - } + public void dispatch(DispatchFrame frame, VirtualProc proc) { + /* + * Start frame and create proc on the database. + */ + dispatchSupport.startFrameAndProc(proc, frame); - public void setDispatchSupport(DispatchSupport dispatchSupport) { - this.dispatchSupport = dispatchSupport; + /* + * Communicate with RQD to run the frame. + */ + if (!testMode) { + dispatchSupport.runFrame(proc, frame); } - public RqdClient getRqdClient() { - return rqdClient; - } + } - public void setRqdClient(RqdClient rqdClient) { - this.rqdClient = rqdClient; - } + private static void dispatchSummary(VirtualProc p, DispatchFrame f, String type) { + String msg = + type + " summary: " + p.coresReserved + " cores / " + CueUtil.KbToMb(p.memoryReserved) + + " memory / " + p.gpusReserved + " gpus / " + CueUtil.KbToMb(p.gpuMemoryReserved) + + " gpu memory " + p.getName() + " to " + f.show + "/" + f.shot; + logger.info(msg); + } - public boolean isTestMode() { - return testMode; - } + public DispatchSupport getDispatchSupport() { + return dispatchSupport; + } - public void setTestMode(boolean testMode) { - this.testMode = testMode; - } -} + public void setDispatchSupport(DispatchSupport dispatchSupport) { + this.dispatchSupport = dispatchSupport; + } + public RqdClient getRqdClient() { + return rqdClient; + } + + public void setRqdClient(RqdClient rqdClient) { + this.rqdClient = rqdClient; + } + + public boolean isTestMode() { + return testMode; + } + + public void setTestMode(boolean testMode) { + this.testMode = testMode; + } +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/BookingQueue.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/BookingQueue.java index 6f950e327..17eae95ab 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/BookingQueue.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/BookingQueue.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.dispatcher; @@ -28,94 +26,88 @@ public class BookingQueue implements QueueHealthCheck { - private final int healthThreshold; - private final int minUnhealthyPeriodMin; - private final int queueCapacity; - private final int corePoolSize; - private final int maxPoolSize; - // Base value for calculating the job sleep time - // this is used to slow down the booking queue to avoid racing conditions - private static final int BASE_SLEEP_TIME_MILLIS = 300; - - private static final Logger logger = LogManager.getLogger("HEALTH"); - private HealthyThreadPool healthyThreadPool; - - public BookingQueue(int healthThreshold, int minUnhealthyPeriodMin, int queueCapacity, - int corePoolSize, int maxPoolSize) { - this.healthThreshold = healthThreshold; - this.minUnhealthyPeriodMin = minUnhealthyPeriodMin; - this.queueCapacity = queueCapacity; - this.corePoolSize = corePoolSize; - this.maxPoolSize = maxPoolSize; + private final int healthThreshold; + private final int minUnhealthyPeriodMin; + private final int queueCapacity; + private final int corePoolSize; + private final int maxPoolSize; + // Base value for calculating the job sleep time + // this is used to slow down the booking queue to avoid racing conditions + private static final int BASE_SLEEP_TIME_MILLIS = 300; + + private static final Logger logger = LogManager.getLogger("HEALTH"); + private HealthyThreadPool healthyThreadPool; + + public BookingQueue(int healthThreshold, int minUnhealthyPeriodMin, int queueCapacity, + int corePoolSize, int maxPoolSize) { + this.healthThreshold = healthThreshold; + this.minUnhealthyPeriodMin = minUnhealthyPeriodMin; + this.queueCapacity = queueCapacity; + this.corePoolSize = corePoolSize; + this.maxPoolSize = maxPoolSize; + initThreadPool(); + } + + public void initThreadPool() { + healthyThreadPool = new HealthyThreadPool("BookingQueue", healthThreshold, + minUnhealthyPeriodMin, queueCapacity, corePoolSize, maxPoolSize, BASE_SLEEP_TIME_MILLIS); + } + + public void shutdownUnhealthy() { + try { + if (!healthyThreadPool.shutdownUnhealthy()) { + logger.warn("BookingQueue: Unhealthy queue terminated, starting a new one"); initThreadPool(); + } + } catch (InterruptedException e) { + // TODO: evaluate crashing the whole springbook context here + // to force a container restart cycle + logger.error("Failed to restart BookingThreadPool", e); } + } - public void initThreadPool() { - healthyThreadPool = new HealthyThreadPool( - "BookingQueue", - healthThreshold, - minUnhealthyPeriodMin, - queueCapacity, - corePoolSize, - maxPoolSize, - BASE_SLEEP_TIME_MILLIS); - } + public boolean isHealthy() { + return healthyThreadPool.healthCheck(); + } - public void shutdownUnhealthy() { - try { - if (!healthyThreadPool.shutdownUnhealthy()) { - logger.warn("BookingQueue: Unhealthy queue terminated, starting a new one"); - initThreadPool(); - } - } catch (InterruptedException e) { - // TODO: evaluate crashing the whole springbook context here - // to force a container restart cycle - logger.error("Failed to restart BookingThreadPool", e); - } - } + public void execute(KeyRunnable r) { + healthyThreadPool.execute(r); + } - public boolean isHealthy() { - return healthyThreadPool.healthCheck(); - } + public long getRejectedTaskCount() { + return healthyThreadPool.getRejectedTaskCount(); + } - public void execute(KeyRunnable r) { - healthyThreadPool.execute(r); - } + public int getQueueCapacity() { + return queueCapacity; + } - public long getRejectedTaskCount() { - return healthyThreadPool.getRejectedTaskCount(); - } + public void shutdown() { + healthyThreadPool.shutdown(); + } - public int getQueueCapacity() { - return queueCapacity; - } + public int getSize() { + return healthyThreadPool.getQueue().size(); + } - public void shutdown() { - healthyThreadPool.shutdown(); - } + public int getRemainingCapacity() { + return healthyThreadPool.getQueue().remainingCapacity(); + } - public int getSize() { - return healthyThreadPool.getQueue().size(); - } + public int getActiveCount() { + return healthyThreadPool.getActiveCount(); + } - public int getRemainingCapacity() { - return healthyThreadPool.getQueue().remainingCapacity(); - } - - public int getActiveCount() { - return healthyThreadPool.getActiveCount(); - } + public long getCompletedTaskCount() { + return healthyThreadPool.getCompletedTaskCount(); + } - public long getCompletedTaskCount() { - return healthyThreadPool.getCompletedTaskCount(); - } + public long getCorePoolSize() { + return corePoolSize; + } - public long getCorePoolSize() { - return corePoolSize; - } - - public long getMaximumPoolSize() { - return maxPoolSize; - } + public long getMaximumPoolSize() { + return maxPoolSize; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/CoreUnitDispatcher.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/CoreUnitDispatcher.java index b8abe83e0..bb57d45f0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/CoreUnitDispatcher.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/CoreUnitDispatcher.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; import java.util.ArrayList; @@ -50,513 +46,458 @@ /** * The Core Unit Dispatcher. * - * The dispatching pipeline is a 3 stage process. Steps - * 1 and 2 are separate DB transactions. + * The dispatching pipeline is a 3 stage process. Steps 1 and 2 are separate DB transactions. * - * 1. Attempt to start the frame by updating to the - * Running state. If another thread gets there first, - * a FrameReservationException is thrown. + * 1. Attempt to start the frame by updating to the Running state. If another thread gets there + * first, a FrameReservationException is thrown. * - * 2. Reserve processor resources and update resource counts - * on the host, subscription, job, layer, group, and shot. + * 2. Reserve processor resources and update resource counts on the host, subscription, job, layer, + * group, and shot. * * 3. Contact RQD and launch the frame. * * Error Handling * - * Depending on where the error happens and what the error is, - * you might have to take the necessary steps to undo the dispatch - * because the transaction is not held open the entire time. + * Depending on where the error happens and what the error is, you might have to take the necessary + * steps to undo the dispatch because the transaction is not held open the entire time. * * FrameReservationException - You don't have to undo anything. * - * ResourceDuplicationFailureException - If there is ever a bug that - * causes the dispatcher to try and dispatch a frame with a proc - * already assigned to it, catching this and continuing to the next - * frame will ensure your dispatcher doesn't get stuck trying to - * launch just one frame. You can also try to fix the frame by - * running dispatchSupport.fixFrame(). + * ResourceDuplicationFailureException - If there is ever a bug that causes the dispatcher to try + * and dispatch a frame with a proc already assigned to it, catching this and continuing to the next + * frame will ensure your dispatcher doesn't get stuck trying to launch just one frame. You can also + * try to fix the frame by running dispatchSupport.fixFrame(). * - * ResourceReservationFailureException - This means the host didn't have - * the resources the dispatcher expected it to have. In this case - * you have to dispatchSupport.clearFrame(frame) to set the frame back - * to the Waiting state. + * ResourceReservationFailureException - This means the host didn't have the resources the + * dispatcher expected it to have. In this case you have to dispatchSupport.clearFrame(frame) to set + * the frame back to the Waiting state. * - * For all other exceptions, both the frame and the proc have to be - * manually removed. + * For all other exceptions, both the frame and the proc have to be manually removed. */ public class CoreUnitDispatcher implements Dispatcher { - private static final Logger logger = - LogManager.getLogger(CoreUnitDispatcher.class); - - private DispatchSupport dispatchSupport; - - private JobManager jobManager; - - private RqdClient rqdClient; - - private HostManager hostManager; - - public boolean testMode = false; - - private final long MEM_RESERVED_MIN; - private final long MEM_GPU_RESERVED_DEFAULT; - private final long MEM_GPU_RESERVED_MIN; - - private Environment env; - - /* - * Keeps a map of unique job IDs that should be skipped - * over for booking until the record has expired. - */ - private Cache jobLock; - - @Autowired - public CoreUnitDispatcher(Environment env) { - this.env = env; - MEM_RESERVED_MIN = getLongProperty("dispatcher.memory.mem_reserved_min"); - MEM_GPU_RESERVED_DEFAULT = getLongProperty("dispatcher.memory.mem_gpu_reserved_default"); - MEM_GPU_RESERVED_MIN = getLongProperty("dispatcher.memory.mem_gpu_reserved_min"); + private static final Logger logger = LogManager.getLogger(CoreUnitDispatcher.class); + + private DispatchSupport dispatchSupport; + + private JobManager jobManager; + + private RqdClient rqdClient; + + private HostManager hostManager; + + public boolean testMode = false; + + private final long MEM_RESERVED_MIN; + private final long MEM_GPU_RESERVED_DEFAULT; + private final long MEM_GPU_RESERVED_MIN; + + private Environment env; + + /* + * Keeps a map of unique job IDs that should be skipped over for booking until the record has + * expired. + */ + private Cache jobLock; + + @Autowired + public CoreUnitDispatcher(Environment env) { + this.env = env; + MEM_RESERVED_MIN = getLongProperty("dispatcher.memory.mem_reserved_min"); + MEM_GPU_RESERVED_DEFAULT = getLongProperty("dispatcher.memory.mem_gpu_reserved_default"); + MEM_GPU_RESERVED_MIN = getLongProperty("dispatcher.memory.mem_gpu_reserved_min"); + } + + /* + * Return an integer value from the opencue.properties given a key + */ + private int getIntProperty(String property) { + return env.getRequiredProperty(property, Integer.class); + } + + /* + * Return an integer value from the opencue.properties given a key + */ + private long getLongProperty(String property) { + return env.getRequiredProperty(property, Long.class); + } + + private Cache getOrCreateJobLock() { + if (jobLock == null) { + this.jobLock = CacheBuilder.newBuilder() + .concurrencyLevel(getIntProperty("dispatcher.job_lock_concurrency_level")) + .expireAfterWrite(getIntProperty("dispatcher.job_lock_expire_seconds"), TimeUnit.SECONDS) + .build(); } + return jobLock; + } - /* - * Return an integer value from the opencue.properties given a key - */ - private int getIntProperty(String property) { - return env.getRequiredProperty(property, Integer.class); - } + private List dispatchJobs(DispatchHost host, Set jobs) { + List procs = new ArrayList(); - /* - * Return an integer value from the opencue.properties given a key - */ - private long getLongProperty(String property) { - return env.getRequiredProperty(property, Long.class); - } + try { + for (String jobid : jobs) { - private Cache getOrCreateJobLock() { - if (jobLock == null) { - this.jobLock = CacheBuilder.newBuilder() - .concurrencyLevel(getIntProperty("dispatcher.job_lock_concurrency_level")) - .expireAfterWrite(getIntProperty("dispatcher.job_lock_expire_seconds"), - TimeUnit.SECONDS) - .build(); + if (!host.hasAdditionalResources(CORE_POINTS_RESERVED_MIN, MEM_RESERVED_MIN, + GPU_UNITS_RESERVED_MIN, MEM_GPU_RESERVED_MIN)) { + return procs; } - return jobLock; - } + if (procs.size() >= getIntProperty("dispatcher.host_frame_dispatch_max")) { + break; + } - private List dispatchJobs(DispatchHost host, Set jobs) { - List procs = new ArrayList(); + if (getIntProperty("dispatcher.job_lock_expire_seconds") > 0) { + if (getOrCreateJobLock().getIfPresent(jobid) != null) { + continue; + } - try { - for (String jobid: jobs) { - - if (!host.hasAdditionalResources( - CORE_POINTS_RESERVED_MIN, - MEM_RESERVED_MIN, - GPU_UNITS_RESERVED_MIN, - MEM_GPU_RESERVED_MIN)) { - return procs; - } - - if (procs.size() >= getIntProperty("dispatcher.host_frame_dispatch_max")) { - break; - } - - if (getIntProperty("dispatcher.job_lock_expire_seconds") > 0) { - if (getOrCreateJobLock().getIfPresent(jobid) != null) { - continue; - } - - jobLock.put(jobid, jobid); - } - - DispatchJob job = jobManager.getDispatchJob(jobid); - try { - procs.addAll(dispatchHost(host, job)); - } - catch (JobDispatchException e) { - logger.info("job dispatch exception," + e); - } - } - - } catch (DispatcherException e) { - logger.info(host.name + " dispatcher exception," + e); + jobLock.put(jobid, jobid); } - host.restoreGpu(); + DispatchJob job = jobManager.getDispatchJob(jobid); + try { + procs.addAll(dispatchHost(host, job)); + } catch (JobDispatchException e) { + logger.info("job dispatch exception," + e); + } + } - return procs; + } catch (DispatcherException e) { + logger.info(host.name + " dispatcher exception," + e); } - private Set getGpuJobs(DispatchHost host, ShowInterface show) { - Set jobs = null; - - // If the host has gpu idle, first do a query to find gpu jobs - // If no gpu jobs found remove resources to leave room for a gpu frame - if (host.hasAdditionalResources( - Dispatcher.CORE_POINTS_RESERVED_DEFAULT, - this.MEM_RESERVED_MIN, - Dispatcher.GPU_UNITS_RESERVED_DEFAULT, - this.MEM_GPU_RESERVED_DEFAULT)) { - if (show == null) - jobs = dispatchSupport.findDispatchJobs(host, - getIntProperty("dispatcher.job_query_max")); - else - jobs = dispatchSupport.findDispatchJobs(host, show, - getIntProperty("dispatcher.job_query_max")); - - if (jobs.size() == 0) { - host.removeGpu(); - jobs = null; - } - } + host.restoreGpu(); - return jobs; - } + return procs; + } + + private Set getGpuJobs(DispatchHost host, ShowInterface show) { + Set jobs = null; - @Override - public List dispatchHostToAllShows(DispatchHost host) { - Set jobs = dispatchSupport.findDispatchJobsForAllShows( - host, - getIntProperty("dispatcher.job_query_max")); + // If the host has gpu idle, first do a query to find gpu jobs + // If no gpu jobs found remove resources to leave room for a gpu frame + if (host.hasAdditionalResources(Dispatcher.CORE_POINTS_RESERVED_DEFAULT, this.MEM_RESERVED_MIN, + Dispatcher.GPU_UNITS_RESERVED_DEFAULT, this.MEM_GPU_RESERVED_DEFAULT)) { + if (show == null) + jobs = dispatchSupport.findDispatchJobs(host, getIntProperty("dispatcher.job_query_max")); + else + jobs = dispatchSupport.findDispatchJobs(host, show, + getIntProperty("dispatcher.job_query_max")); - return dispatchJobs(host, jobs); + if (jobs.size() == 0) { + host.removeGpu(); + jobs = null; + } } - @Override - public List dispatchHost(DispatchHost host) { + return jobs; + } - Set jobs = getGpuJobs(host, null); + @Override + public List dispatchHostToAllShows(DispatchHost host) { + Set jobs = dispatchSupport.findDispatchJobsForAllShows(host, + getIntProperty("dispatcher.job_query_max")); - if (jobs == null) - jobs = dispatchSupport.findDispatchJobs(host, getIntProperty("dispatcher.job_query_max")); + return dispatchJobs(host, jobs); + } - return dispatchJobs(host, jobs); - } + @Override + public List dispatchHost(DispatchHost host) { - @Override - public List dispatchHost(DispatchHost host, ShowInterface show) { + Set jobs = getGpuJobs(host, null); - Set jobs = getGpuJobs(host, show); + if (jobs == null) + jobs = dispatchSupport.findDispatchJobs(host, getIntProperty("dispatcher.job_query_max")); - if (jobs == null) - jobs = dispatchSupport.findDispatchJobs(host, show, - getIntProperty("dispatcher.job_query_max")); + return dispatchJobs(host, jobs); + } - return dispatchJobs(host, jobs); - } + @Override + public List dispatchHost(DispatchHost host, ShowInterface show) { - @Override - public List dispatchHost(DispatchHost host, GroupInterface group) { + Set jobs = getGpuJobs(host, show); - Set jobs = getGpuJobs(host, null); + if (jobs == null) + jobs = + dispatchSupport.findDispatchJobs(host, show, getIntProperty("dispatcher.job_query_max")); - if (jobs == null) - jobs = dispatchSupport.findDispatchJobs(host, group); + return dispatchJobs(host, jobs); + } - return dispatchJobs(host, jobs); - } + @Override + public List dispatchHost(DispatchHost host, GroupInterface group) { - @Override - public List dispatchHost(DispatchHost host, JobInterface job) { + Set jobs = getGpuJobs(host, null); - List procs = new ArrayList(); + if (jobs == null) + jobs = dispatchSupport.findDispatchJobs(host, group); - if (host.strandedCores == 0 && - dispatchSupport.isShowAtOrOverBurst(job, host)) { - return procs; - } + return dispatchJobs(host, jobs); + } - List frames = dispatchSupport.findNextDispatchFrames(job, - host, getIntProperty("dispatcher.frame_query_max")); - - logger.info("Frames found: " + frames.size() + " for host " + - host.getName() + " " + host.idleCores + "/" + host.idleMemory + - " on job " + job.getName()); - - String[] selfishServices = env.getProperty("dispatcher.frame.selfish.services", "").split(","); - for (DispatchFrame frame: frames) { - - VirtualProc proc = VirtualProc.build(host, frame, selfishServices); - - if (frame.minCores <= 0 && !proc.canHandleNegativeCoresRequest) { - logger.debug("Cannot dispatch job, host is busy."); - break; - } - - if (host.idleCores < host.handleNegativeCoresRequirement(frame.minCores) || - host.idleMemory < frame.getMinMemory() || - host.idleGpus < frame.minGpus || - host.idleGpuMemory < frame.minGpuMemory) { - logger.debug("Cannot dispatch, insufficient resources."); - break; - } - - if (!dispatchSupport.isJobBookable(job, proc.coresReserved, proc.gpusReserved)) { - break; - } - - - if (host.strandedCores == 0 && - dispatchSupport.isShowAtOrOverBurst(job, host)) { - return procs; - } - - boolean success = new DispatchFrameTemplate(proc, job, frame, false) { - public void wrapDispatchFrame() { - logger.debug("Dispatching frame with " + frame.minCores + " minCores on proc with " + - proc.coresReserved + " coresReserved"); - dispatch(frame, proc); - dispatchSummary(proc, frame, "Booking"); - return; - } - }.execute(); - - if (success) { - procs.add(proc); - - DispatchSupport.bookedProcs.getAndIncrement(); - DispatchSupport.bookedCores.addAndGet(proc.coresReserved); - DispatchSupport.bookedGpus.addAndGet(proc.gpusReserved); - - if (host.strandedCores > 0) { - dispatchSupport.pickupStrandedCores(host); - break; - } - - host.useResources(proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved); - if (!host.hasAdditionalResources( - Dispatcher.CORE_POINTS_RESERVED_MIN, - MEM_RESERVED_MIN, - Dispatcher.GPU_UNITS_RESERVED_MIN, - MEM_GPU_RESERVED_MIN)) { - break; - } - else if (procs.size() >= getIntProperty("dispatcher.job_frame_dispatch_max")) { - break; - } - else if (procs.size() >= getIntProperty("dispatcher.host_frame_dispatch_max")) { - break; - } - } - } + @Override + public List dispatchHost(DispatchHost host, JobInterface job) { - return procs; + List procs = new ArrayList(); + if (host.strandedCores == 0 && dispatchSupport.isShowAtOrOverBurst(job, host)) { + return procs; } - public void dispatchProcToJob(VirtualProc proc, JobInterface job) - { - - // Do not throttle this method - for (DispatchFrame frame: - dispatchSupport.findNextDispatchFrames(job, proc, - getIntProperty("dispatcher.frame_query_max"))) { - try { - boolean success = new DispatchFrameTemplate(proc, job, frame, true) { - public void wrapDispatchFrame() { - dispatch(frame, proc); - dispatchSummary(proc, frame, "Dispatch"); - return; - } - }.execute(); - if (success) - return; - } - catch (DispatcherException e) { - return; - } - } + List frames = dispatchSupport.findNextDispatchFrames(job, host, + getIntProperty("dispatcher.frame_query_max")); - dispatchSupport.unbookProc(proc); - } + logger.info("Frames found: " + frames.size() + " for host " + host.getName() + " " + + host.idleCores + "/" + host.idleMemory + " on job " + job.getName()); - @Override - public List dispatchHost(DispatchHost host, LayerInterface layer) { - throw new RuntimeException("not implemented)"); - } + String[] selfishServices = env.getProperty("dispatcher.frame.selfish.services", "").split(","); + for (DispatchFrame frame : frames) { - @Override - public List dispatchHost(DispatchHost host, FrameInterface frame) { - throw new RuntimeException("not implemented)"); - } + VirtualProc proc = VirtualProc.build(host, frame, selfishServices); - @Override - public void dispatch(DispatchFrame frame, VirtualProc proc) { - // Allocate frame on the database - dispatchSupport.startFrameAndProc(proc, frame); + if (frame.minCores <= 0 && !proc.canHandleNegativeCoresRequest) { + logger.debug("Cannot dispatch job, host is busy."); + break; + } - // Communicate with RQD to run the frame. - if (!testMode) { - dispatchSupport.runFrame(proc,frame); - } - } + if (host.idleCores < host.handleNegativeCoresRequirement(frame.minCores) + || host.idleMemory < frame.getMinMemory() || host.idleGpus < frame.minGpus + || host.idleGpuMemory < frame.minGpuMemory) { + logger.debug("Cannot dispatch, insufficient resources."); + break; + } - @Override - public boolean isTestMode() { - return testMode; - } + if (!dispatchSupport.isJobBookable(job, proc.coresReserved, proc.gpusReserved)) { + break; + } + if (host.strandedCores == 0 && dispatchSupport.isShowAtOrOverBurst(job, host)) { + return procs; + } + + boolean success = new DispatchFrameTemplate(proc, job, frame, false) { + public void wrapDispatchFrame() { + logger.debug("Dispatching frame with " + frame.minCores + " minCores on proc with " + + proc.coresReserved + " coresReserved"); + dispatch(frame, proc); + dispatchSummary(proc, frame, "Booking"); + return; + } + }.execute(); - @Override - public void setTestMode(boolean enabled) { - testMode = enabled; - dispatchSupport.clearCache(); - } + if (success) { + procs.add(proc); - /** - * Log a summary of each dispatch. - * - * @param p the VirtualProc that was used - * @param f the DispatchFrame that that was used - * @param type the type of dispatch - */ - private void dispatchSummary(VirtualProc p, DispatchFrame f, String type) { - String msg = type + " summary: " + - p.coresReserved + - " cores / " + - CueUtil.KbToMb(p.memoryReserved) + - " memory / " + - p.gpusReserved + - " gpus / " + - CueUtil.KbToMb(p.gpuMemoryReserved) + - " gpu memory " + - p.getName() + - " to " + f.show + "/" + f.shot; - logger.trace(msg); - } + DispatchSupport.bookedProcs.getAndIncrement(); + DispatchSupport.bookedCores.addAndGet(proc.coresReserved); + DispatchSupport.bookedGpus.addAndGet(proc.gpusReserved); + if (host.strandedCores > 0) { + dispatchSupport.pickupStrandedCores(host); + break; + } - public DispatchSupport getDispatchSupport() { - return dispatchSupport; + host.useResources(proc.coresReserved, proc.memoryReserved, proc.gpusReserved, + proc.gpuMemoryReserved); + if (!host.hasAdditionalResources(Dispatcher.CORE_POINTS_RESERVED_MIN, MEM_RESERVED_MIN, + Dispatcher.GPU_UNITS_RESERVED_MIN, MEM_GPU_RESERVED_MIN)) { + break; + } else if (procs.size() >= getIntProperty("dispatcher.job_frame_dispatch_max")) { + break; + } else if (procs.size() >= getIntProperty("dispatcher.host_frame_dispatch_max")) { + break; + } + } } - public void setDispatchSupport(DispatchSupport dispatchSupport) { - this.dispatchSupport = dispatchSupport; + return procs; + + } + + public void dispatchProcToJob(VirtualProc proc, JobInterface job) { + + // Do not throttle this method + for (DispatchFrame frame : dispatchSupport.findNextDispatchFrames(job, proc, + getIntProperty("dispatcher.frame_query_max"))) { + try { + boolean success = new DispatchFrameTemplate(proc, job, frame, true) { + public void wrapDispatchFrame() { + dispatch(frame, proc); + dispatchSummary(proc, frame, "Dispatch"); + return; + } + }.execute(); + if (success) + return; + } catch (DispatcherException e) { + return; + } } - public JobManager getJobManager() { - return jobManager; - } + dispatchSupport.unbookProc(proc); + } - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } + @Override + public List dispatchHost(DispatchHost host, LayerInterface layer) { + throw new RuntimeException("not implemented)"); + } - public HostManager getHostManager() { - return hostManager; - } + @Override + public List dispatchHost(DispatchHost host, FrameInterface frame) { + throw new RuntimeException("not implemented)"); + } - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; - } + @Override + public void dispatch(DispatchFrame frame, VirtualProc proc) { + // Allocate frame on the database + dispatchSupport.startFrameAndProc(proc, frame); - public RqdClient getRqdClient() { - return rqdClient; + // Communicate with RQD to run the frame. + if (!testMode) { + dispatchSupport.runFrame(proc, frame); } - - public void setRqdClient(RqdClient rqdClient) { - this.rqdClient = rqdClient; + } + + @Override + public boolean isTestMode() { + return testMode; + } + + @Override + public void setTestMode(boolean enabled) { + testMode = enabled; + dispatchSupport.clearCache(); + } + + /** + * Log a summary of each dispatch. + * + * @param p the VirtualProc that was used + * @param f the DispatchFrame that that was used + * @param type the type of dispatch + */ + private void dispatchSummary(VirtualProc p, DispatchFrame f, String type) { + String msg = + type + " summary: " + p.coresReserved + " cores / " + CueUtil.KbToMb(p.memoryReserved) + + " memory / " + p.gpusReserved + " gpus / " + CueUtil.KbToMb(p.gpuMemoryReserved) + + " gpu memory " + p.getName() + " to " + f.show + "/" + f.shot; + logger.trace(msg); + } + + public DispatchSupport getDispatchSupport() { + return dispatchSupport; + } + + public void setDispatchSupport(DispatchSupport dispatchSupport) { + this.dispatchSupport = dispatchSupport; + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public HostManager getHostManager() { + return hostManager; + } + + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; + } + + public RqdClient getRqdClient() { + return rqdClient; + } + + public void setRqdClient(RqdClient rqdClient) { + this.rqdClient = rqdClient; + } + + private abstract class DispatchFrameTemplate { + protected VirtualProc proc; + protected JobInterface job; + protected DispatchFrame frame; + boolean procIndb = true; + + public DispatchFrameTemplate(VirtualProc p, JobInterface j, DispatchFrame f, boolean inDb) { + proc = p; + job = j; + frame = f; + procIndb = inDb; } - private abstract class DispatchFrameTemplate { - protected VirtualProc proc; - protected JobInterface job; - protected DispatchFrame frame; - boolean procIndb = true; - - public DispatchFrameTemplate(VirtualProc p, - JobInterface j, - DispatchFrame f, - boolean inDb) { - proc = p; - job = j; - frame = f; - procIndb = inDb; + public abstract void wrapDispatchFrame(); + + public boolean execute() { + try { + wrapDispatchFrame(); + } catch (FrameReservationException fre) { + /* + * This usually just means another thread got the frame first, so just retry on the next + * frame. + */ + DispatchSupport.bookingRetries.incrementAndGet(); + String msg = + "frame reservation error, " + "dispatchProcToJob failed to book next frame, " + fre; + logger.info(msg); + return false; + } catch (ResourceDuplicationFailureException rrfe) { + /* + * There is a resource already assigned to the frame we reserved! Don't clear the frame, let + * it keep running and continue to the next frame. + */ + DispatchSupport.bookingErrors.incrementAndGet(); + dispatchSupport.fixFrame(frame); + + String msg = "proc update error, dispatchProcToJob failed " + "to assign proc to job " + job + + ", " + proc + " already assigned to another frame." + rrfe; + + logger.info(msg); + return false; + } catch (ResourceReservationFailureException rrfe) { + /* + * This should technically never happen since the proc is already allocated at this point, + * but, if it does it should be unbooked. + */ + DispatchSupport.bookingErrors.incrementAndGet(); + String msg = "proc update error, " + "dispatchProcToJob failed to assign proc to job " + job + + ", " + rrfe; + logger.info(msg); + if (procIndb) { + dispatchSupport.unbookProc(proc); } + dispatchSupport.clearFrame(frame); + /* Throw an exception to stop booking **/ + throw new DispatcherException( + "host reservation error, " + "dispatchHostToJob failed to allocate a new proc " + rrfe); + } catch (Exception e) { + /* + * Everything else means that the host/frame record was updated but another error occurred + * and the proc should be cleared. It could also be running, so use the jobManagerSupprot to + * kill it just in case. + */ + DispatchSupport.bookingErrors.incrementAndGet(); + String msg = "dispatchProcToJob failed booking proc " + proc + " on job " + job; + logger.info(msg, e); + dispatchSupport.unbookProc(proc); + dispatchSupport.clearFrame(frame); - public abstract void wrapDispatchFrame(); - - public boolean execute() { - try { - wrapDispatchFrame(); - } catch (FrameReservationException fre) { - /* - * This usually just means another thread got the frame - * first, so just retry on the next frame. - */ - DispatchSupport.bookingRetries.incrementAndGet(); - String msg = "frame reservation error, " + - "dispatchProcToJob failed to book next frame, " + fre; - logger.info(msg); - return false; - } - catch (ResourceDuplicationFailureException rrfe) { - /* - * There is a resource already assigned to the - * frame we reserved! Don't clear the frame, - * let it keep running and continue to the - * next frame. - */ - DispatchSupport.bookingErrors.incrementAndGet(); - dispatchSupport.fixFrame(frame); - - String msg = "proc update error, dispatchProcToJob failed " + - "to assign proc to job " + job + ", " + proc + - " already assigned to another frame." + rrfe; - - logger.info(msg); - return false; - } - catch (ResourceReservationFailureException rrfe) { - /* - * This should technically never happen since the proc - * is already allocated at this point, but, if it does - * it should be unbooked. - */ - DispatchSupport.bookingErrors.incrementAndGet(); - String msg = "proc update error, " + - "dispatchProcToJob failed to assign proc to job " + - job + ", " + rrfe; - logger.info(msg); - if (procIndb) { - dispatchSupport.unbookProc(proc); - } - dispatchSupport.clearFrame(frame); - /* Throw an exception to stop booking **/ - throw new DispatcherException("host reservation error, " + - "dispatchHostToJob failed to allocate a new proc " + rrfe); - } - catch (Exception e) { - /* - * Everything else means that the host/frame record was - * updated but another error occurred and the proc - * should be cleared. It could also be running, so - * use the jobManagerSupprot to kill it just in case. - */ - DispatchSupport.bookingErrors.incrementAndGet(); - String msg = "dispatchProcToJob failed booking proc " + - proc + " on job " + job; - logger.info(msg, e); - dispatchSupport.unbookProc(proc); - dispatchSupport.clearFrame(frame); - - try { - rqdClient.killFrame(proc, "An accounting error occured " + - "when booking this frame."); - } catch (RqdClientException rqde) { - /* - * Its almost expected that this will fail, as this is - * just a precaution if the frame did actually launch. - */ - } - /* Thrown an exception to stop booking */ - throw new DispatcherException( - "stopped dispatching host, " + e); - } - - return true; + try { + rqdClient.killFrame(proc, "An accounting error occured " + "when booking this frame."); + } catch (RqdClientException rqde) { + /* + * Its almost expected that this will fail, as this is just a precaution if the frame did + * actually launch. + */ } + /* Thrown an exception to stop booking */ + throw new DispatcherException("stopped dispatching host, " + e); + } + + return true; } + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueue.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueue.java index fdd66dfb2..25097de7a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueue.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueue.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; import java.util.concurrent.atomic.AtomicBoolean; @@ -28,81 +24,75 @@ public class DispatchQueue implements QueueHealthCheck { - private int healthThreshold; - private int minUnhealthyPeriodMin; - private int queueCapacity; - private int corePoolSize; - private int maxPoolSize; - - private static final Logger logger = LogManager.getLogger("HEALTH"); - private String name = "Default"; - private HealthyThreadPool healthyDispatchPool; - - public DispatchQueue(String name, int healthThreshold, int minUnhealthyPeriodMin, int queueCapacity, - int corePoolSize, int maxPoolSize) { - this.name = name; - this.healthThreshold = healthThreshold; - this.minUnhealthyPeriodMin = minUnhealthyPeriodMin; - this.queueCapacity = queueCapacity; - this.corePoolSize = corePoolSize; - this.maxPoolSize = maxPoolSize; + private int healthThreshold; + private int minUnhealthyPeriodMin; + private int queueCapacity; + private int corePoolSize; + private int maxPoolSize; + + private static final Logger logger = LogManager.getLogger("HEALTH"); + private String name = "Default"; + private HealthyThreadPool healthyDispatchPool; + + public DispatchQueue(String name, int healthThreshold, int minUnhealthyPeriodMin, + int queueCapacity, int corePoolSize, int maxPoolSize) { + this.name = name; + this.healthThreshold = healthThreshold; + this.minUnhealthyPeriodMin = minUnhealthyPeriodMin; + this.queueCapacity = queueCapacity; + this.corePoolSize = corePoolSize; + this.maxPoolSize = maxPoolSize; + initThreadPool(); + } + + public void initThreadPool() { + healthyDispatchPool = new HealthyThreadPool(name, healthThreshold, minUnhealthyPeriodMin, + queueCapacity, corePoolSize, maxPoolSize); + } + + public void shutdownUnhealthy() { + try { + if (!healthyDispatchPool.shutdownUnhealthy()) { + logger.warn("DispatchQueue_" + name + ": Unhealthy queue terminated, starting a new one"); initThreadPool(); + } + } catch (InterruptedException e) { + // TODO: evaluate crashing the whole springbook context here + // to force a container restart cycle + logger.error("DispatchQueue_" + name + ":Failed to restart DispatchThreadPool", e); } + } - public void initThreadPool() { - healthyDispatchPool = new HealthyThreadPool( - name, - healthThreshold, - minUnhealthyPeriodMin, - queueCapacity, - corePoolSize, - maxPoolSize); - } + public boolean isHealthy() { + return healthyDispatchPool.healthCheck(); + } - public void shutdownUnhealthy() { - try { - if (!healthyDispatchPool.shutdownUnhealthy()) { - logger.warn("DispatchQueue_" + name + ": Unhealthy queue terminated, starting a new one"); - initThreadPool(); - } - } catch (InterruptedException e) { - // TODO: evaluate crashing the whole springbook context here - // to force a container restart cycle - logger.error("DispatchQueue_" + name + ":Failed to restart DispatchThreadPool", e); - } - } + public void execute(KeyRunnable r) { + healthyDispatchPool.execute(r); + } - public boolean isHealthy() { - return healthyDispatchPool.healthCheck(); - } + public long getRejectedTaskCount() { + return healthyDispatchPool.getRejectedTaskCount(); + } - public void execute(KeyRunnable r) { - healthyDispatchPool.execute(r); - } + public void shutdown() { + healthyDispatchPool.shutdown(); + } - public long getRejectedTaskCount() { - return healthyDispatchPool.getRejectedTaskCount(); - } + public int getSize() { + return healthyDispatchPool.getQueue().size(); + } - public void shutdown() { - healthyDispatchPool.shutdown(); - } + public int getRemainingCapacity() { + return healthyDispatchPool.getQueue().remainingCapacity(); + } - public int getSize() { - return healthyDispatchPool.getQueue().size(); - } - - public int getRemainingCapacity() { - return healthyDispatchPool.getQueue().remainingCapacity(); - } + public int getActiveCount() { + return healthyDispatchPool.getActiveCount(); + } - public int getActiveCount() { - return healthyDispatchPool.getActiveCount(); - } - - public long getCompletedTaskCount() { - return healthyDispatchPool.getCompletedTaskCount(); - } + public long getCompletedTaskCount() { + return healthyDispatchPool.getCompletedTaskCount(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueueTaskRejectionException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueueTaskRejectionException.java index 1c3172d3b..f7d51442b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueueTaskRejectionException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueueTaskRejectionException.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; import com.imageworks.spcue.SpcueRuntimeException; @@ -24,24 +20,23 @@ @SuppressWarnings("serial") public class DispatchQueueTaskRejectionException extends SpcueRuntimeException { - public DispatchQueueTaskRejectionException() { - // TODO Auto-generated constructor stub - } + public DispatchQueueTaskRejectionException() { + // TODO Auto-generated constructor stub + } - public DispatchQueueTaskRejectionException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public DispatchQueueTaskRejectionException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public DispatchQueueTaskRejectionException(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public DispatchQueueTaskRejectionException(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public DispatchQueueTaskRejectionException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public DispatchQueueTaskRejectionException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupport.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupport.java index 106d413ce..2df583109 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupport.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; import java.util.List; @@ -46,512 +42,478 @@ */ public interface DispatchSupport { - /** - * Number of procs rebalanced - */ - static final AtomicLong balanceSuccess = new AtomicLong(0); - - /** - * Number of procs - */ - static final AtomicLong balanceFailed = new AtomicLong(0); - - /** - * Number of times the worst offender was killed - */ - static final AtomicLong killedOffenderProcs = new AtomicLong(0); - - /** - * Number of frames killed because the machine totally ran out of memory. - * This may or may not be the worst offender. - */ - static final AtomicLong killedOomProcs= new AtomicLong(0); - - /** - * Long for counting how many procs have been dispatched - */ - static final AtomicLong dispatchedProcs = new AtomicLong(0); - - /** - * Long for counting how many cores have been booked - */ - static final AtomicLong bookedCores = new AtomicLong(0); - - /** - * Long for counting how many gpus have been booked - */ - static final AtomicLong bookedGpus = new AtomicLong(0); - - /** - * Long for counting how many procs have been booked - */ - static final AtomicLong bookedProcs = new AtomicLong(0); - - /** - * Long for counting unbooked procs. - */ - static final AtomicLong unbookedProcs = new AtomicLong(0); - - /** - * A proc is cleared when an error occurs - */ - static final AtomicLong clearedProcs = new AtomicLong(0); - - /** - * Long for counting dispatch errors - */ - static final AtomicLong bookingErrors = new AtomicLong(0); - - /** - * Long for counting dispatch retries - */ - static final AtomicLong bookingRetries = new AtomicLong(0); - - /** - * Incremented when RQD and the Cue DB are out of sync. - */ - static final AtomicLong accountingErrors = new AtomicLong(0); - - /** - * Incremented when RQD and the Cue DB are out of sync. - */ - static final AtomicLong fixedFrames = new AtomicLong(0); - - /** - * Count number of picked up cores. - */ - static final AtomicLong pickedUpCoresCount = new AtomicLong(0); - - /** - * Count number of stranded cores. - */ - static final AtomicLong strandedCoresCount = new AtomicLong(0); - - /** - * Count number of picked up gpus. - */ - static final AtomicLong pickedUpGpusCount = new AtomicLong(0); - - /** - * Count number of stranded gpus. - */ - static final AtomicLong strandedGpusCount = new AtomicLong(0); - - /** - * Set the proc's frame assignment to null; - * - * @param proc - * @return - */ - boolean clearVirtualProcAssignement(ProcInterface proc); - - /** - * Stops the specified frame and sets a new frame state - * and exit status. - * - * @param frame - * @param state - * @param exitStatus - */ - boolean stopFrame(FrameInterface frame, FrameState state, int exitStatus); - - /** - * Updates a frame with completed stats. - * - * @param frame - * @param state - * @param exitStatus - * @param maxrss - * @return - */ - boolean stopFrame(FrameInterface frame, FrameState state, - int exitStatus, long maxrss); - - /** - * Updates the frame to the Running state and Reserve the resources - * in the specified proc for the specified frame. If the proc does - * not exist, its inserted, otherwise its updated. - * - * When a proc is created, the subscription, host, - * job, layer, folder, and shot proc counts get updated. - * This may cause some contention. - * - * @param proc - * @param frame - */ - public void startFrameAndProc(VirtualProc proc, DispatchFrame frame); - - /** - * This method clears out a proc that was lost track of. - * This can happen if the host fails and the proc fails - * to report in, a network outage occurs, or something - * of that nature. - * - * @param proc - * @param reason - * @param exitStatus - */ - void lostProc(VirtualProc proc, String reason, int exitStatus); - - /** - * Unbooks a proc with no message - * - * @param proc - */ - void unbookProc(VirtualProc proc); - - /** - * Unbooks a virtual proc. Takes a reason which is - * printed to the console. - */ - void unbookProc(VirtualProc proc, String reason); - - /** - * Returns the next N frames to be dispatched from the - * specified job. - * - * @param job - * @param proc - * @param limit - * @return - */ - List findNextDispatchFrames(JobInterface job, - VirtualProc proc, int limit); - - /** - * - * Returns the next N frames to be dispatched from the - * specified job. - * - * @param job - * @param host - * @param limit - * @return - */ - List findNextDispatchFrames(JobInterface job, DispatchHost host, - int limit); - - /** - * Return the next N frames to be dispatched from the specified layer. - * - * @param layer - * @param host - * @param limit - * @return - */ - List findNextDispatchFrames(LayerInterface layer, DispatchHost host, - int limit); - - /** - * Return the next N frames to be dispatched from the specified layer. - * - * @param layer - * @param proc - * @param limit - * @return - */ - List findNextDispatchFrames(LayerInterface layer, VirtualProc proc, - int limit); - /** - * - * @param excludeJob - * @param proc - * @return - */ - boolean findUnderProcedJob(JobInterface excludeJob, VirtualProc proc); - - /** - * Return true if there are higher priority jobs to run. - * - * @param baseJob - * @param proc - * @return boolean - */ - boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc); - - /** - * Run the frame on the specified proc. - * - * @param proc - * @param frame - * @throws DispatcherException if an error occurs during dispatching - */ - void runFrame(VirtualProc proc, DispatchFrame frame); - - /** - * Return true if the specified show is over its burst - * size of the given proc's allocation. - * - * @param proc - * @return - */ - boolean isShowOverBurst(VirtualProc proc); - - - /** - * Returns the job that can utilize the specified host. - * - * @param host - * @return - */ - Set findDispatchJobsForAllShows(DispatchHost host, int numJobs); - - /** - * Returns the highest priority job that can utilize - * the specified host - * - * @param host - * @return - */ - Set findDispatchJobs(DispatchHost host, int numJobs); - - /** - * Returns the highest priority jobs that can utilize - * the specified host in the specified group. - * - * @param host - * @return A set of unique job ids. - */ - Set findDispatchJobs(DispatchHost host, GroupInterface p); - - /** - * - * @param host - * @return A set of unique job ids. - */ - Set findLocalDispatchJobs(DispatchHost host); - - /** - * Creates and returns and RQD RunFrame object. - * - * Once the RunFrame object is created string replacement is done - * in the frame command to replace any tags in the command. - * - * Currently these tags are supported: [IFRAME] - integer frame (no padding) - * [ZFRAME] - 4 padded frame [LAYER] - the layer name [JOB] - the job name - * [IFRAME] - the full frame name - * [JOB] - the job name - * [LAYER] - the layer name - * - * @param proc - * @param frame - * @return RunFrame - */ - RunFrame prepareRqdRunFrame(VirtualProc proc, DispatchFrame frame); - - - - /** - * Checks to see if job passes basic tests for dispatchability. - * Tests include if the proc is over its max, if it has pending - * frames, and if its paused. - * - * @param job - * @param local indicates a local dispatch or not - * @return boolean - */ - boolean isJobDispatchable(JobInterface job, boolean local); - - /** - * returns true of the cue has jobs with pending frames - * that are not paused or in a non bookable state. - * - * @return - */ - boolean isCueBookable(FacilityInterface f); - - /** - * Increases the amount of memory reserved for a running frame. - * Returns true if the memory value actually increased. If the value - * is lower than current reserved memory it is ignored. - * - * @param proc - * @param value - */ - boolean increaseReservedMemory(ProcInterface proc, long value); - - /** - * Attempts to balance the reserved memory on a proc by - * taking away reserved memory from frames that are well under - * their reservation. - * - * @param proc - * @param value - */ - boolean balanceReservedMemory(ProcInterface proc, long value); - - /** - * Update the jobs usage counters. - * - * @param frame - * @param exitStatus - */ - void updateUsageCounters(FrameInterface frame, int exitStatus); - - /** - * Sets a frame to running if there is a proc with the frame. - * - * @param frame - */ - void fixFrame(DispatchFrame frame); - - /** - * Sets the frame state to waiting for a frame with - * no running proc. - * - * @param frame - */ - void clearFrame(DispatchFrame frame); - - /** - * Sets the frame state exitStatus to EXIT_STATUS_MEMORY_FAILURE - * - * @param frame - * @return whether the frame has been updated - */ - boolean updateFrameMemoryError(FrameInterface frame); - - /** - * Update Memory usage data and LLU time for the given frame. - * - * @param frame - * @param rss - * @param maxRss - * @param lluTime - */ - void updateFrameMemoryUsageAndLluTime(FrameInterface frame, long rss, long maxRss, - long lluTime); - - /** - * Update memory usage data for a given frame's proc record. The - * frame is used to update the proc so the update fails if the proc - * has been rebooked onto a new frame. - * - * @param frame - * @param rss - * @param maxRss - * @param vsize - * @param maxVsize - * @param usedGpuMemory - * @param maxUsedGpuMemory - */ - void updateProcMemoryUsage(FrameInterface frame, long rss, long maxRss, - long vsize, long maxVsize, long usedGpuMemory, - long maxUsedGpuMemory, long usedSwapMemory, - byte[] children); - - /** - * Return true if adding the given core units would put the show - * over its burst value. - * - * @param show - * @param alloc - * @param coreUnits - * @return - */ - boolean isShowOverBurst(ShowInterface show, AllocationInterface alloc, int coreUnits); - - /** - * Return true if the job can take new procs. - * - * @param job - * @return - */ - boolean isJobBookable(JobInterface job); - - /** - * Return true if the job can take the given number of new core units. - * - * @param job - * @return - */ - boolean isJobBookable(JobInterface job, int coreUnits, int gpuUnits); - - /** - * Return true if the specified show is at or over its - * burst value for the given allocation. - * - * @param show - * @param alloc - * @return - */ - boolean isShowAtOrOverBurst(ShowInterface show, AllocationInterface alloc); - - /** - * Return true if the specified show is over its - * guaranteed subscription size. - * - * @param proc - * @return - */ - boolean isShowOverSize(VirtualProc proc); - - /** - * Pickup any cores that were stranded on the given host. - * - * @param host - */ - void pickupStrandedCores(DispatchHost host); - - /** - * Return true if the host has stranded cores. - * - * @param host - * @return - */ - boolean hasStrandedCores(HostInterface host); - - /** - * Add stranded cores for the given host. Stranded - * cores will automatically be added to the next frame dispatched - * from the host to make up for cores stranded with no memory. - * - * @param host - * @param cores - */ - void strandCores(DispatchHost host, int cores); - - /** - * Lowers the perceived idle cores on a machine if - * the load is over certain threshold. - * - * @param host - * @param load - */ - void determineIdleCores(DispatchHost host, int load); - - /** - * Return a set of job IDs that can take the given host. - * - * @param host - * @param show - * @param numJobs - * @return - */ - Set findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs); - - /** - * Return true of the job has pending frames. - * - * @param job - * @return - */ - boolean hasPendingFrames(JobInterface job); - - /** - * Return true if the layer has pending frames. - * - * @param layer - * @return - */ - boolean hasPendingFrames(LayerInterface layer); - - /** - * Clear bookableShows cache - * - * @return - */ - void clearCache(); + /** + * Number of procs rebalanced + */ + static final AtomicLong balanceSuccess = new AtomicLong(0); + + /** + * Number of procs + */ + static final AtomicLong balanceFailed = new AtomicLong(0); + + /** + * Number of times the worst offender was killed + */ + static final AtomicLong killedOffenderProcs = new AtomicLong(0); + + /** + * Number of frames killed because the machine totally ran out of memory. This may or may not be + * the worst offender. + */ + static final AtomicLong killedOomProcs = new AtomicLong(0); + + /** + * Long for counting how many procs have been dispatched + */ + static final AtomicLong dispatchedProcs = new AtomicLong(0); + + /** + * Long for counting how many cores have been booked + */ + static final AtomicLong bookedCores = new AtomicLong(0); + + /** + * Long for counting how many gpus have been booked + */ + static final AtomicLong bookedGpus = new AtomicLong(0); + + /** + * Long for counting how many procs have been booked + */ + static final AtomicLong bookedProcs = new AtomicLong(0); + + /** + * Long for counting unbooked procs. + */ + static final AtomicLong unbookedProcs = new AtomicLong(0); + + /** + * A proc is cleared when an error occurs + */ + static final AtomicLong clearedProcs = new AtomicLong(0); + + /** + * Long for counting dispatch errors + */ + static final AtomicLong bookingErrors = new AtomicLong(0); + + /** + * Long for counting dispatch retries + */ + static final AtomicLong bookingRetries = new AtomicLong(0); + + /** + * Incremented when RQD and the Cue DB are out of sync. + */ + static final AtomicLong accountingErrors = new AtomicLong(0); + + /** + * Incremented when RQD and the Cue DB are out of sync. + */ + static final AtomicLong fixedFrames = new AtomicLong(0); + + /** + * Count number of picked up cores. + */ + static final AtomicLong pickedUpCoresCount = new AtomicLong(0); + + /** + * Count number of stranded cores. + */ + static final AtomicLong strandedCoresCount = new AtomicLong(0); + + /** + * Count number of picked up gpus. + */ + static final AtomicLong pickedUpGpusCount = new AtomicLong(0); + + /** + * Count number of stranded gpus. + */ + static final AtomicLong strandedGpusCount = new AtomicLong(0); + + /** + * Set the proc's frame assignment to null; + * + * @param proc + * @return + */ + boolean clearVirtualProcAssignement(ProcInterface proc); + + /** + * Stops the specified frame and sets a new frame state and exit status. + * + * @param frame + * @param state + * @param exitStatus + */ + boolean stopFrame(FrameInterface frame, FrameState state, int exitStatus); + + /** + * Updates a frame with completed stats. + * + * @param frame + * @param state + * @param exitStatus + * @param maxrss + * @return + */ + boolean stopFrame(FrameInterface frame, FrameState state, int exitStatus, long maxrss); + + /** + * Updates the frame to the Running state and Reserve the resources in the specified proc for the + * specified frame. If the proc does not exist, its inserted, otherwise its updated. + * + * When a proc is created, the subscription, host, job, layer, folder, and shot proc counts get + * updated. This may cause some contention. + * + * @param proc + * @param frame + */ + public void startFrameAndProc(VirtualProc proc, DispatchFrame frame); + + /** + * This method clears out a proc that was lost track of. This can happen if the host fails and the + * proc fails to report in, a network outage occurs, or something of that nature. + * + * @param proc + * @param reason + * @param exitStatus + */ + void lostProc(VirtualProc proc, String reason, int exitStatus); + + /** + * Unbooks a proc with no message + * + * @param proc + */ + void unbookProc(VirtualProc proc); + + /** + * Unbooks a virtual proc. Takes a reason which is printed to the console. + */ + void unbookProc(VirtualProc proc, String reason); + + /** + * Returns the next N frames to be dispatched from the specified job. + * + * @param job + * @param proc + * @param limit + * @return + */ + List findNextDispatchFrames(JobInterface job, VirtualProc proc, int limit); + + /** + * + * Returns the next N frames to be dispatched from the specified job. + * + * @param job + * @param host + * @param limit + * @return + */ + List findNextDispatchFrames(JobInterface job, DispatchHost host, int limit); + + /** + * Return the next N frames to be dispatched from the specified layer. + * + * @param layer + * @param host + * @param limit + * @return + */ + List findNextDispatchFrames(LayerInterface layer, DispatchHost host, int limit); + + /** + * Return the next N frames to be dispatched from the specified layer. + * + * @param layer + * @param proc + * @param limit + * @return + */ + List findNextDispatchFrames(LayerInterface layer, VirtualProc proc, int limit); + + /** + * + * @param excludeJob + * @param proc + * @return + */ + boolean findUnderProcedJob(JobInterface excludeJob, VirtualProc proc); + + /** + * Return true if there are higher priority jobs to run. + * + * @param baseJob + * @param proc + * @return boolean + */ + boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc); + + /** + * Run the frame on the specified proc. + * + * @param proc + * @param frame + * @throws DispatcherException if an error occurs during dispatching + */ + void runFrame(VirtualProc proc, DispatchFrame frame); + + /** + * Return true if the specified show is over its burst size of the given proc's allocation. + * + * @param proc + * @return + */ + boolean isShowOverBurst(VirtualProc proc); + + /** + * Returns the job that can utilize the specified host. + * + * @param host + * @return + */ + Set findDispatchJobsForAllShows(DispatchHost host, int numJobs); + + /** + * Returns the highest priority job that can utilize the specified host + * + * @param host + * @return + */ + Set findDispatchJobs(DispatchHost host, int numJobs); + + /** + * Returns the highest priority jobs that can utilize the specified host in the specified group. + * + * @param host + * @return A set of unique job ids. + */ + Set findDispatchJobs(DispatchHost host, GroupInterface p); + + /** + * + * @param host + * @return A set of unique job ids. + */ + Set findLocalDispatchJobs(DispatchHost host); + + /** + * Creates and returns and RQD RunFrame object. + * + * Once the RunFrame object is created string replacement is done in the frame command to replace + * any tags in the command. + * + * Currently these tags are supported: [IFRAME] - integer frame (no padding) [ZFRAME] - 4 padded + * frame [LAYER] - the layer name [JOB] - the job name [IFRAME] - the full frame name [JOB] - the + * job name [LAYER] - the layer name + * + * @param proc + * @param frame + * @return RunFrame + */ + RunFrame prepareRqdRunFrame(VirtualProc proc, DispatchFrame frame); + + /** + * Checks to see if job passes basic tests for dispatchability. Tests include if the proc is over + * its max, if it has pending frames, and if its paused. + * + * @param job + * @param local indicates a local dispatch or not + * @return boolean + */ + boolean isJobDispatchable(JobInterface job, boolean local); + + /** + * returns true of the cue has jobs with pending frames that are not paused or in a non bookable + * state. + * + * @return + */ + boolean isCueBookable(FacilityInterface f); + + /** + * Increases the amount of memory reserved for a running frame. Returns true if the memory value + * actually increased. If the value is lower than current reserved memory it is ignored. + * + * @param proc + * @param value + */ + boolean increaseReservedMemory(ProcInterface proc, long value); + + /** + * Attempts to balance the reserved memory on a proc by taking away reserved memory from frames + * that are well under their reservation. + * + * @param proc + * @param value + */ + boolean balanceReservedMemory(ProcInterface proc, long value); + + /** + * Update the jobs usage counters. + * + * @param frame + * @param exitStatus + */ + void updateUsageCounters(FrameInterface frame, int exitStatus); + + /** + * Sets a frame to running if there is a proc with the frame. + * + * @param frame + */ + void fixFrame(DispatchFrame frame); + + /** + * Sets the frame state to waiting for a frame with no running proc. + * + * @param frame + */ + void clearFrame(DispatchFrame frame); + + /** + * Sets the frame state exitStatus to EXIT_STATUS_MEMORY_FAILURE + * + * @param frame + * @return whether the frame has been updated + */ + boolean updateFrameMemoryError(FrameInterface frame); + + /** + * Update Memory usage data and LLU time for the given frame. + * + * @param frame + * @param rss + * @param maxRss + * @param lluTime + */ + void updateFrameMemoryUsageAndLluTime(FrameInterface frame, long rss, long maxRss, long lluTime); + + /** + * Update memory usage data for a given frame's proc record. The frame is used to update the proc + * so the update fails if the proc has been rebooked onto a new frame. + * + * @param frame + * @param rss + * @param maxRss + * @param vsize + * @param maxVsize + * @param usedGpuMemory + * @param maxUsedGpuMemory + */ + void updateProcMemoryUsage(FrameInterface frame, long rss, long maxRss, long vsize, long maxVsize, + long usedGpuMemory, long maxUsedGpuMemory, long usedSwapMemory, byte[] children); + + /** + * Return true if adding the given core units would put the show over its burst value. + * + * @param show + * @param alloc + * @param coreUnits + * @return + */ + boolean isShowOverBurst(ShowInterface show, AllocationInterface alloc, int coreUnits); + + /** + * Return true if the job can take new procs. + * + * @param job + * @return + */ + boolean isJobBookable(JobInterface job); + + /** + * Return true if the job can take the given number of new core units. + * + * @param job + * @return + */ + boolean isJobBookable(JobInterface job, int coreUnits, int gpuUnits); + + /** + * Return true if the specified show is at or over its burst value for the given allocation. + * + * @param show + * @param alloc + * @return + */ + boolean isShowAtOrOverBurst(ShowInterface show, AllocationInterface alloc); + + /** + * Return true if the specified show is over its guaranteed subscription size. + * + * @param proc + * @return + */ + boolean isShowOverSize(VirtualProc proc); + + /** + * Pickup any cores that were stranded on the given host. + * + * @param host + */ + void pickupStrandedCores(DispatchHost host); + + /** + * Return true if the host has stranded cores. + * + * @param host + * @return + */ + boolean hasStrandedCores(HostInterface host); + + /** + * Add stranded cores for the given host. Stranded cores will automatically be added to the next + * frame dispatched from the host to make up for cores stranded with no memory. + * + * @param host + * @param cores + */ + void strandCores(DispatchHost host, int cores); + + /** + * Lowers the perceived idle cores on a machine if the load is over certain threshold. + * + * @param host + * @param load + */ + void determineIdleCores(DispatchHost host, int load); + + /** + * Return a set of job IDs that can take the given host. + * + * @param host + * @param show + * @param numJobs + * @return + */ + Set findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs); + + /** + * Return true of the job has pending frames. + * + * @param job + * @return + */ + boolean hasPendingFrames(JobInterface job); + + /** + * Return true if the layer has pending frames. + * + * @param layer + * @return + */ + boolean hasPendingFrames(LayerInterface layer); + + /** + * Clear bookableShows cache + * + * @return + */ + void clearCache(); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupportService.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupportService.java index 80a1ff362..74c3d212b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupportService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupportService.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; import java.util.List; @@ -66,656 +62,617 @@ @Transactional(propagation = Propagation.REQUIRED) public class DispatchSupportService implements DispatchSupport { - private static final Logger logger = LogManager.getLogger(DispatchSupportService.class); - - private JobDao jobDao; - private FrameDao frameDao; - private LayerDao layerDao; - private ProcDao procDao; - private HostDao hostDao; - private ShowDao showDao; - private DispatcherDao dispatcherDao; - private DependManager dependManager; - private SubscriptionDao subscriptionDao; - private RqdClient rqdClient; - private RedirectManager redirectManager; - private BookingManager bookingManager; - private BookingDao bookingDao; - - private ConcurrentHashMap strandedCores = - new ConcurrentHashMap(); - - @Override - public void pickupStrandedCores(DispatchHost host) { - logger.info(host + "picked up stranded cores"); - pickedUpCoresCount.getAndIncrement(); - strandedCores.remove(host.getHostId()); - } - - @Override - public boolean hasStrandedCores(HostInterface host) { - StrandedCores stranded = strandedCores.get(host.getHostId()); - if (stranded == null) { - return false; - } - if (stranded.isExpired()) { - return false; - } - - return true; - } - - @Override - public void strandCores(DispatchHost host, int cores) { - logger.info(host + " found " + cores + ", stranded cores"); - host.strandedCores = cores; - if (host.threadMode != ThreadMode.VARIABLE.getNumber()) { - host.threadMode = ThreadMode.ALL.getNumber(); - } - strandedCores.putIfAbsent(host.getHostId(), new StrandedCores(cores)); - strandedCoresCount.getAndIncrement(); - } - - @Transactional(readOnly = true) - public List findNextDispatchFrames(JobInterface job, VirtualProc proc, int limit) { - return dispatcherDao.findNextDispatchFrames(job, proc, limit); - } - - @Transactional(readOnly = true) - public List findNextDispatchFrames(JobInterface job, DispatchHost host, int limit) { - return dispatcherDao.findNextDispatchFrames(job, host, limit); - } - - @Override - @Transactional(readOnly = true) - public List findNextDispatchFrames(LayerInterface layer, - DispatchHost host, int limit) { - return dispatcherDao.findNextDispatchFrames(layer, host, limit); - } - - @Override - @Transactional(readOnly = true) - public List findNextDispatchFrames(LayerInterface layer, - VirtualProc proc, int limit) { - return dispatcherDao.findNextDispatchFrames(layer, proc, limit); - } + private static final Logger logger = LogManager.getLogger(DispatchSupportService.class); + + private JobDao jobDao; + private FrameDao frameDao; + private LayerDao layerDao; + private ProcDao procDao; + private HostDao hostDao; + private ShowDao showDao; + private DispatcherDao dispatcherDao; + private DependManager dependManager; + private SubscriptionDao subscriptionDao; + private RqdClient rqdClient; + private RedirectManager redirectManager; + private BookingManager bookingManager; + private BookingDao bookingDao; + + private ConcurrentHashMap strandedCores = + new ConcurrentHashMap(); + + @Override + public void pickupStrandedCores(DispatchHost host) { + logger.info(host + "picked up stranded cores"); + pickedUpCoresCount.getAndIncrement(); + strandedCores.remove(host.getHostId()); + } + + @Override + public boolean hasStrandedCores(HostInterface host) { + StrandedCores stranded = strandedCores.get(host.getHostId()); + if (stranded == null) { + return false; + } + if (stranded.isExpired()) { + return false; + } + + return true; + } + + @Override + public void strandCores(DispatchHost host, int cores) { + logger.info(host + " found " + cores + ", stranded cores"); + host.strandedCores = cores; + if (host.threadMode != ThreadMode.VARIABLE.getNumber()) { + host.threadMode = ThreadMode.ALL.getNumber(); + } + strandedCores.putIfAbsent(host.getHostId(), new StrandedCores(cores)); + strandedCoresCount.getAndIncrement(); + } + + @Transactional(readOnly = true) + public List findNextDispatchFrames(JobInterface job, VirtualProc proc, int limit) { + return dispatcherDao.findNextDispatchFrames(job, proc, limit); + } + + @Transactional(readOnly = true) + public List findNextDispatchFrames(JobInterface job, DispatchHost host, + int limit) { + return dispatcherDao.findNextDispatchFrames(job, host, limit); + } + + @Override + @Transactional(readOnly = true) + public List findNextDispatchFrames(LayerInterface layer, DispatchHost host, + int limit) { + return dispatcherDao.findNextDispatchFrames(layer, host, limit); + } + + @Override + @Transactional(readOnly = true) + public List findNextDispatchFrames(LayerInterface layer, VirtualProc proc, + int limit) { + return dispatcherDao.findNextDispatchFrames(layer, proc, limit); + } + + @Transactional(readOnly = true) + public boolean findUnderProcedJob(JobInterface excludeJob, VirtualProc proc) { + return dispatcherDao.findUnderProcedJob(excludeJob, proc); + } + + @Transactional(readOnly = true) + public boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc) { + return dispatcherDao.higherPriorityJobExists(baseJob, proc); + } + + @Transactional(readOnly = true) + public Set findDispatchJobsForAllShows(DispatchHost host, int numJobs) { + return dispatcherDao.findDispatchJobsForAllShows(host, numJobs); + } + + @Transactional(readOnly = true) + public Set findDispatchJobs(DispatchHost host, int numJobs) { + return dispatcherDao.findDispatchJobs(host, numJobs); + } + + @Transactional(readOnly = true) + public Set findDispatchJobs(DispatchHost host, GroupInterface g) { + return dispatcherDao.findDispatchJobs(host, g); + } + + @Override + @Transactional(readOnly = true) + public Set findLocalDispatchJobs(DispatchHost host) { + return dispatcherDao.findLocalDispatchJobs(host); + } + + @Override + @Transactional(readOnly = true) + public Set findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs) { + return dispatcherDao.findDispatchJobs(host, show, numJobs); + } - @Transactional(readOnly = true) - public boolean findUnderProcedJob(JobInterface excludeJob, VirtualProc proc) { - return dispatcherDao.findUnderProcedJob(excludeJob, proc); - } + @Transactional(propagation = Propagation.REQUIRED) + public boolean increaseReservedMemory(ProcInterface p, long value) { + return procDao.increaseReservedMemory(p, value); + } - @Transactional(readOnly = true) - public boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc) { - return dispatcherDao.higherPriorityJobExists(baseJob, proc); + @Override + public boolean clearVirtualProcAssignement(ProcInterface proc) { + try { + return procDao.clearVirtualProcAssignment(proc); + } catch (DataAccessException e) { + return false; } + } - @Transactional(readOnly = true) - public Set findDispatchJobsForAllShows(DispatchHost host, int numJobs) { - return dispatcherDao.findDispatchJobsForAllShows(host, numJobs); + @Transactional(propagation = Propagation.REQUIRED) + public boolean balanceReservedMemory(ProcInterface targetProc, long targetMem) { + boolean result = procDao.balanceUnderUtilizedProcs(targetProc, targetMem); + if (result) { + DispatchSupport.balanceSuccess.incrementAndGet(); + } else { + DispatchSupport.balanceFailed.incrementAndGet(); } + return result; + } - @Transactional(readOnly = true) - public Set findDispatchJobs(DispatchHost host, int numJobs) { - return dispatcherDao.findDispatchJobs(host, numJobs); + @Transactional(propagation = Propagation.NEVER) + public void runFrame(VirtualProc proc, DispatchFrame frame) { + try { + rqdClient.launchFrame(prepareRqdRunFrame(proc, frame), proc); + dispatchedProcs.getAndIncrement(); + } catch (Exception e) { + throw new DispatcherException( + proc.getName() + " could not be booked on " + frame.getName() + ", " + e); } + } - @Transactional(readOnly = true) - public Set findDispatchJobs(DispatchHost host, GroupInterface g) { - return dispatcherDao.findDispatchJobs(host, g); - } + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void startFrameAndProc(VirtualProc proc, DispatchFrame frame) { + logger.trace("starting frame: " + frame); - @Override - @Transactional(readOnly = true) - public Set findLocalDispatchJobs(DispatchHost host) { - return dispatcherDao.findLocalDispatchJobs(host); - } + frameDao.updateFrameStarted(proc, frame); - @Override - @Transactional(readOnly = true) - public Set findDispatchJobs(DispatchHost host, ShowInterface show, - int numJobs) { - return dispatcherDao.findDispatchJobs(host, show, numJobs); - } + reserveProc(proc, frame); + } - @Transactional(propagation = Propagation.REQUIRED) - public boolean increaseReservedMemory(ProcInterface p, long value) { - return procDao.increaseReservedMemory(p, value); - } + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isCueBookable(FacilityInterface f) { + return jobDao.cueHasPendingJobs(f); + } - @Override - public boolean clearVirtualProcAssignement(ProcInterface proc) { - try { - return procDao.clearVirtualProcAssignment(proc); - } catch (DataAccessException e) { - return false; - } - } + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isJobDispatchable(JobInterface job, boolean local) { - @Transactional(propagation = Propagation.REQUIRED) - public boolean balanceReservedMemory(ProcInterface targetProc, long targetMem) { - boolean result = procDao.balanceUnderUtilizedProcs(targetProc, targetMem); - if (result) { - DispatchSupport.balanceSuccess.incrementAndGet(); - } - else { - DispatchSupport.balanceFailed.incrementAndGet(); - } - return result; + if (!jobDao.hasPendingFrames(job)) { + return false; } - @Transactional(propagation = Propagation.NEVER) - public void runFrame(VirtualProc proc, DispatchFrame frame) { - try { - rqdClient.launchFrame(prepareRqdRunFrame(proc, frame), proc); - dispatchedProcs.getAndIncrement(); - } catch (Exception e) { - throw new DispatcherException(proc.getName() + - " could not be booked on " + frame.getName() + ", " + e); - } + if (!local && jobDao.isOverMaxCores(job)) { + return false; } - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void startFrameAndProc(VirtualProc proc, DispatchFrame frame) { - logger.trace("starting frame: " + frame); - - frameDao.updateFrameStarted(proc, frame); - - reserveProc(proc, frame); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isCueBookable(FacilityInterface f) { - return jobDao.cueHasPendingJobs(f); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isJobDispatchable(JobInterface job, boolean local) { - - if (!jobDao.hasPendingFrames(job)) { - return false; - } - - if (!local && jobDao.isOverMaxCores(job)) { - return false; - } - - return true; - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isJobBookable(JobInterface job) { - - if (!jobDao.hasPendingFrames(job)) { - return false; - } - - if (jobDao.isAtMaxCores(job)) { - return false; - } - - return true; - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isJobBookable(JobInterface job, int coreUnits, int gpuUnits) { - - if (!jobDao.hasPendingFrames(job)) { - return false; - } - - if (jobDao.isOverMaxCores(job, coreUnits)) { - return false; - } - - if (jobDao.isOverMaxGpus(job, gpuUnits)) { - return false; - } - - return true; - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean hasPendingFrames(JobInterface job) { - - if (!jobDao.hasPendingFrames(job)) { - return false; - } - - return true; - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean hasPendingFrames(LayerInterface layer) { - return layerDao.isLayerDispatchable(layer); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isShowOverBurst(VirtualProc proc) { - return subscriptionDao.isShowOverBurst((ShowInterface) proc, (AllocationInterface) proc, 0); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isShowOverBurst(ShowInterface show, AllocationInterface alloc, int coreUnits) { - return subscriptionDao.isShowOverBurst(show, alloc, coreUnits); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isShowAtOrOverBurst(ShowInterface show, AllocationInterface alloc) { - return subscriptionDao.isShowAtOrOverBurst(show, alloc); - } - - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isShowOverSize(VirtualProc proc) { - return subscriptionDao.isShowOverSize(proc); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED) - public boolean stopFrame(FrameInterface frame, FrameState state, - int exitStatus) { - logger.trace("stopping frame " + frame); - if (frameDao.updateFrameStopped(frame, state, exitStatus)) { - procDao.clearVirtualProcAssignment(frame); - return true; - } - - return false; - } + return true; + } - @Override - @Transactional(propagation = Propagation.REQUIRED) - public boolean stopFrame(FrameInterface frame, FrameState state, - int exitStatus, long maxRss) { - logger.trace("stopping frame: " + frame); - if (frameDao.updateFrameStopped(frame, state, - exitStatus, maxRss)) { - // Update max rss up the chain. - layerDao.updateLayerMaxRSS(frame, maxRss, false); - jobDao.updateMaxRSS(frame, maxRss); - - procDao.clearVirtualProcAssignment(frame); - return true; - } + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isJobBookable(JobInterface job) { - return false; + if (!jobDao.hasPendingFrames(job)) { + return false; } - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void clearFrame(DispatchFrame frame) { - logger.trace("clearing frame: " + frame); - frameDao.updateFrameCleared(frame); + if (jobDao.isAtMaxCores(job)) { + return false; } - @Override - @Transactional(propagation = Propagation.REQUIRED) - public boolean updateFrameMemoryError(FrameInterface frame) { - return frameDao.updateFrameMemoryError(frame); - } + return true; + } - @Transactional(propagation = Propagation.SUPPORTS) - public RunFrame prepareRqdRunFrame(VirtualProc proc, DispatchFrame frame) { - int threads = proc.coresReserved / 100; - if (threads < 1) { - threads = 1; - } + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isJobBookable(JobInterface job, int coreUnits, int gpuUnits) { - int frameNumber = Integer.valueOf(frame.name.substring(0,frame.name.indexOf("-"))); - String zFrameNumber = String.format("%04d", frameNumber); - - FrameSet fs = new FrameSet(frame.range); - int startFrameIndex = fs.index(frameNumber); - String frameSpec = fs.getChunk(startFrameIndex, frame.chunkSize); - - FrameSet chunkFrameSet = new FrameSet(frameSpec); - int chunkEndFrame = chunkFrameSet.get(chunkFrameSet.size()-1); - - RunFrame.Builder builder = RunFrame.newBuilder() - .setShot(frame.shot) - .setShow(frame.show) - .setUserName(frame.owner) - .setLogDir(frame.logDir) - .setJobId(frame.jobId) - .setJobName(frame.jobName) - .setFrameId(frame.id) - .setFrameName(frame.name) - .setLayerId(frame.getLayerId()) - .setResourceId(proc.getProcId()) - .setNumCores(proc.coresReserved) - .setNumGpus(proc.gpusReserved) - .setStartTime(System.currentTimeMillis()) - .setIgnoreNimby(proc.isLocalDispatch) - .setOs(proc.os) - .setSoftMemoryLimit(frame.softMemoryLimit) - .setHardMemoryLimit(frame.hardMemoryLimit) - .putAllEnvironment(jobDao.getEnvironment(frame)) - .putAllEnvironment(layerDao.getLayerEnvironment(frame)) - .putEnvironment("CUE3", "1") - .putEnvironment("CUE_THREADS", String.valueOf(threads)) - .putEnvironment("CUE_MEMORY", String.valueOf(proc.memoryReserved)) - .putEnvironment("CUE_GPUS", String.valueOf(proc.gpusReserved)) - .putEnvironment("CUE_GPU_MEMORY", String.valueOf(proc.gpuMemoryReserved)) - .putEnvironment("CUE_LOG_PATH", frame.logDir) - .putEnvironment("CUE_RANGE", frame.range) - .putEnvironment("CUE_CHUNK", String.valueOf(frame.chunkSize)) - .putEnvironment("CUE_IFRAME", String.valueOf(frameNumber)) - .putEnvironment("CUE_LAYER", frame.layerName) - .putEnvironment("CUE_JOB", frame.jobName) - .putEnvironment("CUE_FRAME", frame.name) - .putEnvironment("CUE_SHOW", frame.show) - .putEnvironment("CUE_SHOT", frame.shot) - .putEnvironment("CUE_USER", frame.owner) - .putEnvironment("CUE_JOB_ID", frame.jobId) - .putEnvironment("CUE_LAYER_ID", frame.layerId) - .putEnvironment("CUE_FRAME_ID", frame.id) - .putEnvironment("CUE_THREADABLE", frame.threadable ? "1" : "0") - .setCommand( - frame.command - .replaceAll("#ZFRAME#", zFrameNumber) - .replaceAll("#IFRAME#", String.valueOf(frameNumber)) - .replaceAll("#FRAME_START#", String.valueOf(frameNumber)) - .replaceAll("#FRAME_END#", String.valueOf(chunkEndFrame)) - .replaceAll("#FRAME_CHUNK#", String.valueOf(frame.chunkSize)) - .replaceAll("#LAYER#", frame.layerName) - .replaceAll("#JOB#", frame.jobName) - .replaceAll("#FRAMESPEC#", frameSpec) - .replaceAll("#FRAME#", frame.name)); - /* The special command tokens above (#ZFRAME# and others) are provided to the user in cuesubmit. - * see: cuesubmit/cuesubmit/Constants.py - * Update the Constant.py file when updating tokens here, they will appear in the cuesubmit tooltip popup. - */ - - frame.uid.ifPresent(builder::setUid); - - return builder.build(); + if (!jobDao.hasPendingFrames(job)) { + return false; } - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void fixFrame(DispatchFrame frame) { - long numFixed = DispatchSupport.fixedFrames.incrementAndGet(); - - logger.trace("fixing frame #: " + numFixed + " ," + frame); - - VirtualProc proc = null; - try { - proc = procDao.findVirtualProc(frame); - } - catch (Exception e) { - // Can't even find the damn proc, which i'm - logger.info("attempted to fix a frame but the proc " + - "wasn't found!"); - return; - } - - if (frameDao.updateFrameFixed(proc, frame)){ - logger.info("the frame " + frame.getId() + " was fixed."); - } + if (jobDao.isOverMaxCores(job, coreUnits)) { + return false; } - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void updateUsageCounters(FrameInterface frame, int exitStatus) { - try { - ResourceUsage usage = frameDao.getResourceUsage(frame); - showDao.updateFrameCounters(frame, exitStatus); - jobDao.updateUsage(frame, usage, exitStatus); - layerDao.updateUsage(frame, usage, exitStatus); - } catch (Exception e) { - logger.info("Unable to find and update resource usage for " + - "frame, " + frame + " while updating frame with " + - "exit status " + exitStatus + "," + e); - } + if (jobDao.isOverMaxGpus(job, gpuUnits)) { + return false; } - private void reserveProc(VirtualProc proc, DispatchFrame frame) { + return true; + } - proc.jobId = frame.getJobId(); - proc.frameId = frame.getFrameId(); - proc.layerId = frame.getLayerId(); - proc.showId = frame.getShowId(); + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean hasPendingFrames(JobInterface job) { - if (proc.isNew()) { - logger.info("creating proc " + proc.getName() + " for " + - frame.getName()); - procDao.insertVirtualProc(proc); - } - else { - logger.info("updated proc " + proc.getName() + " for " + - frame.getName()); - procDao.updateVirtualProcAssignment(proc); - } - } - - @Transactional(propagation = Propagation.REQUIRED) - public void unbookProc(VirtualProc proc) { - unbookProc(proc, "was unbooked"); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void unbookProc(VirtualProc proc, String reason) { - if (proc == null) { return; } - if (proc.isNew()) { return; } - proc.unbooked = true; - procDao.deleteVirtualProc(proc); - DispatchSupport.unbookedProcs.getAndIncrement(); - logger.info(proc + " " + reason); - - /* - * Remove the local dispatch record if it has gone inactive. - */ - if (proc.isLocalDispatch) { - try { - bookingManager.removeInactiveLocalHostAssignment( - bookingDao.getLocalJobAssignment(proc.getHostId(), proc.getJobId())); - } - catch (EmptyResultDataAccessException e) { - // Eat the exception. - } - } + if (!jobDao.hasPendingFrames(job)) { + return false; } - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void lostProc(VirtualProc proc, String reason, int exitStatus) { - long numCleared = clearedProcs.incrementAndGet(); - - unbookProc(proc,"proc " + proc.getName() + - " is #" + numCleared + " cleared: " + reason); - - if (proc.frameId != null) { - FrameInterface f = frameDao.getFrame(proc.frameId); - /* - * Set the checkpoint state to disabled before stopping the - * the frame because it will go to the checkpoint state. - * This is not desirable when we're clearing off processes - * that were lost due to a machine crash. - */ - frameDao.updateFrameCheckpointState(f, CheckpointState.DISABLED); - /* - * If the proc has a frame, stop the frame. Frames - * can only be stopped that are running. - */ - if (frameDao.updateFrameStopped(f, - FrameState.WAITING, exitStatus)) { - updateUsageCounters(proc, exitStatus); - } - /* - * If the frame is not running, check if frame is in dead state, - * frames that died due to host going down should be put back - * into WAITING status. - */ - else { - FrameDetail frameDetail = frameDao.getFrameDetail(f); - if ((frameDetail.state == FrameState.DEAD) && - (Dispatcher.EXIT_STATUS_DOWN_HOST == exitStatus)) { - if (frameDao.updateFrameHostDown(f)) { - logger.info("update frame " + f.getFrameId() + - "to WAITING status for down host"); - } - } - } - } else { - logger.info("Frame ID is NULL, not updating Frame state"); - } - } + return true; + } - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void updateProcMemoryUsage(FrameInterface frame, long rss, long maxRss, - long vsize, long maxVsize, long usedGpuMemory, - long maxUsedGpuMemory, long usedSwapMemory, - byte[] children) { - procDao.updateProcMemoryUsage(frame, rss, maxRss, vsize, maxVsize, - usedGpuMemory, maxUsedGpuMemory, usedSwapMemory, - children); - } + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean hasPendingFrames(LayerInterface layer) { + return layerDao.isLayerDispatchable(layer); + } - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void updateFrameMemoryUsageAndLluTime(FrameInterface frame, long rss, long maxRss, - long lluTime) { + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isShowOverBurst(VirtualProc proc) { + return subscriptionDao.isShowOverBurst((ShowInterface) proc, (AllocationInterface) proc, 0); + } - try { - frameDao.updateFrameMemoryUsageAndLluTime(frame, maxRss, rss, lluTime); - } - catch (FrameReservationException ex) { - // Eat this, the frame was not in the correct state or - // was locked by another thread. The only reason it would - // be locked by another thread would be if the state is - // changing. - logger.warn("failed to update memory usage and LLU time for frame: " + frame); - } - } + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isShowOverBurst(ShowInterface show, AllocationInterface alloc, int coreUnits) { + return subscriptionDao.isShowOverBurst(show, alloc, coreUnits); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isShowAtOrOverBurst(ShowInterface show, AllocationInterface alloc) { + return subscriptionDao.isShowAtOrOverBurst(show, alloc); + } - @Override - public void determineIdleCores(DispatchHost host, int load) { - int maxLoad = host.cores + ((host.cores / 100) * - Dispatcher.CORE_LOAD_THRESHOLD); + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isShowOverSize(VirtualProc proc) { + return subscriptionDao.isShowOverSize(proc); + } - int idleCores = maxLoad - load; - if (idleCores < host.idleCores) { - host.idleCores = idleCores; + @Override + @Transactional(propagation = Propagation.REQUIRED) + public boolean stopFrame(FrameInterface frame, FrameState state, int exitStatus) { + logger.trace("stopping frame " + frame); + if (frameDao.updateFrameStopped(frame, state, exitStatus)) { + procDao.clearVirtualProcAssignment(frame); + return true; + } + + return false; + } + + @Override + @Transactional(propagation = Propagation.REQUIRED) + public boolean stopFrame(FrameInterface frame, FrameState state, int exitStatus, long maxRss) { + logger.trace("stopping frame: " + frame); + if (frameDao.updateFrameStopped(frame, state, exitStatus, maxRss)) { + // Update max rss up the chain. + layerDao.updateLayerMaxRSS(frame, maxRss, false); + jobDao.updateMaxRSS(frame, maxRss); + + procDao.clearVirtualProcAssignment(frame); + return true; + } + + return false; + } + + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void clearFrame(DispatchFrame frame) { + logger.trace("clearing frame: " + frame); + frameDao.updateFrameCleared(frame); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED) + public boolean updateFrameMemoryError(FrameInterface frame) { + return frameDao.updateFrameMemoryError(frame); + } + + @Transactional(propagation = Propagation.SUPPORTS) + public RunFrame prepareRqdRunFrame(VirtualProc proc, DispatchFrame frame) { + int threads = proc.coresReserved / 100; + if (threads < 1) { + threads = 1; + } + + int frameNumber = Integer.valueOf(frame.name.substring(0, frame.name.indexOf("-"))); + String zFrameNumber = String.format("%04d", frameNumber); + + FrameSet fs = new FrameSet(frame.range); + int startFrameIndex = fs.index(frameNumber); + String frameSpec = fs.getChunk(startFrameIndex, frame.chunkSize); + + FrameSet chunkFrameSet = new FrameSet(frameSpec); + int chunkEndFrame = chunkFrameSet.get(chunkFrameSet.size() - 1); + + RunFrame.Builder builder = RunFrame.newBuilder().setShot(frame.shot).setShow(frame.show) + .setUserName(frame.owner).setLogDir(frame.logDir).setJobId(frame.jobId) + .setJobName(frame.jobName).setFrameId(frame.id).setFrameName(frame.name) + .setLayerId(frame.getLayerId()).setResourceId(proc.getProcId()) + .setNumCores(proc.coresReserved).setNumGpus(proc.gpusReserved) + .setStartTime(System.currentTimeMillis()).setIgnoreNimby(proc.isLocalDispatch) + .setOs(proc.os).setSoftMemoryLimit(frame.softMemoryLimit) + .setHardMemoryLimit(frame.hardMemoryLimit).putAllEnvironment(jobDao.getEnvironment(frame)) + .putAllEnvironment(layerDao.getLayerEnvironment(frame)).putEnvironment("CUE3", "1") + .putEnvironment("CUE_THREADS", String.valueOf(threads)) + .putEnvironment("CUE_MEMORY", String.valueOf(proc.memoryReserved)) + .putEnvironment("CUE_GPUS", String.valueOf(proc.gpusReserved)) + .putEnvironment("CUE_GPU_MEMORY", String.valueOf(proc.gpuMemoryReserved)) + .putEnvironment("CUE_LOG_PATH", frame.logDir).putEnvironment("CUE_RANGE", frame.range) + .putEnvironment("CUE_CHUNK", String.valueOf(frame.chunkSize)) + .putEnvironment("CUE_IFRAME", String.valueOf(frameNumber)) + .putEnvironment("CUE_LAYER", frame.layerName).putEnvironment("CUE_JOB", frame.jobName) + .putEnvironment("CUE_FRAME", frame.name).putEnvironment("CUE_SHOW", frame.show) + .putEnvironment("CUE_SHOT", frame.shot).putEnvironment("CUE_USER", frame.owner) + .putEnvironment("CUE_JOB_ID", frame.jobId).putEnvironment("CUE_LAYER_ID", frame.layerId) + .putEnvironment("CUE_FRAME_ID", frame.id) + .putEnvironment("CUE_THREADABLE", frame.threadable ? "1" : "0") + .setCommand(frame.command.replaceAll("#ZFRAME#", zFrameNumber) + .replaceAll("#IFRAME#", String.valueOf(frameNumber)) + .replaceAll("#FRAME_START#", String.valueOf(frameNumber)) + .replaceAll("#FRAME_END#", String.valueOf(chunkEndFrame)) + .replaceAll("#FRAME_CHUNK#", String.valueOf(frame.chunkSize)) + .replaceAll("#LAYER#", frame.layerName).replaceAll("#JOB#", frame.jobName) + .replaceAll("#FRAMESPEC#", frameSpec).replaceAll("#FRAME#", frame.name)); + /* + * The special command tokens above (#ZFRAME# and others) are provided to the user in cuesubmit. + * see: cuesubmit/cuesubmit/Constants.py Update the Constant.py file when updating tokens here, + * they will appear in the cuesubmit tooltip popup. + */ + + frame.uid.ifPresent(builder::setUid); + + return builder.build(); + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void fixFrame(DispatchFrame frame) { + long numFixed = DispatchSupport.fixedFrames.incrementAndGet(); + + logger.trace("fixing frame #: " + numFixed + " ," + frame); + + VirtualProc proc = null; + try { + proc = procDao.findVirtualProc(frame); + } catch (Exception e) { + // Can't even find the damn proc, which i'm + logger.info("attempted to fix a frame but the proc " + "wasn't found!"); + return; + } + + if (frameDao.updateFrameFixed(proc, frame)) { + logger.info("the frame " + frame.getId() + " was fixed."); + } + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void updateUsageCounters(FrameInterface frame, int exitStatus) { + try { + ResourceUsage usage = frameDao.getResourceUsage(frame); + showDao.updateFrameCounters(frame, exitStatus); + jobDao.updateUsage(frame, usage, exitStatus); + layerDao.updateUsage(frame, usage, exitStatus); + } catch (Exception e) { + logger.info("Unable to find and update resource usage for " + "frame, " + frame + + " while updating frame with " + "exit status " + exitStatus + "," + e); + } + } + + private void reserveProc(VirtualProc proc, DispatchFrame frame) { + + proc.jobId = frame.getJobId(); + proc.frameId = frame.getFrameId(); + proc.layerId = frame.getLayerId(); + proc.showId = frame.getShowId(); + + if (proc.isNew()) { + logger.info("creating proc " + proc.getName() + " for " + frame.getName()); + procDao.insertVirtualProc(proc); + } else { + logger.info("updated proc " + proc.getName() + " for " + frame.getName()); + procDao.updateVirtualProcAssignment(proc); + } + } + + @Transactional(propagation = Propagation.REQUIRED) + public void unbookProc(VirtualProc proc) { + unbookProc(proc, "was unbooked"); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void unbookProc(VirtualProc proc, String reason) { + if (proc == null) { + return; + } + if (proc.isNew()) { + return; + } + proc.unbooked = true; + procDao.deleteVirtualProc(proc); + DispatchSupport.unbookedProcs.getAndIncrement(); + logger.info(proc + " " + reason); + + /* + * Remove the local dispatch record if it has gone inactive. + */ + if (proc.isLocalDispatch) { + try { + bookingManager.removeInactiveLocalHostAssignment( + bookingDao.getLocalJobAssignment(proc.getHostId(), proc.getJobId())); + } catch (EmptyResultDataAccessException e) { + // Eat the exception. + } + } + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void lostProc(VirtualProc proc, String reason, int exitStatus) { + long numCleared = clearedProcs.incrementAndGet(); + + unbookProc(proc, "proc " + proc.getName() + " is #" + numCleared + " cleared: " + reason); + + if (proc.frameId != null) { + FrameInterface f = frameDao.getFrame(proc.frameId); + /* + * Set the checkpoint state to disabled before stopping the the frame because it will go to + * the checkpoint state. This is not desirable when we're clearing off processes that were + * lost due to a machine crash. + */ + frameDao.updateFrameCheckpointState(f, CheckpointState.DISABLED); + /* + * If the proc has a frame, stop the frame. Frames can only be stopped that are running. + */ + if (frameDao.updateFrameStopped(f, FrameState.WAITING, exitStatus)) { + updateUsageCounters(proc, exitStatus); + } + /* + * If the frame is not running, check if frame is in dead state, frames that died due to host + * going down should be put back into WAITING status. + */ + else { + FrameDetail frameDetail = frameDao.getFrameDetail(f); + if ((frameDetail.state == FrameState.DEAD) + && (Dispatcher.EXIT_STATUS_DOWN_HOST == exitStatus)) { + if (frameDao.updateFrameHostDown(f)) { + logger.info("update frame " + f.getFrameId() + "to WAITING status for down host"); + } } - } + } + } else { + logger.info("Frame ID is NULL, not updating Frame state"); + } + } + + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void updateProcMemoryUsage(FrameInterface frame, long rss, long maxRss, long vsize, + long maxVsize, long usedGpuMemory, long maxUsedGpuMemory, long usedSwapMemory, + byte[] children) { + procDao.updateProcMemoryUsage(frame, rss, maxRss, vsize, maxVsize, usedGpuMemory, + maxUsedGpuMemory, usedSwapMemory, children); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void updateFrameMemoryUsageAndLluTime(FrameInterface frame, long rss, long maxRss, + long lluTime) { + + try { + frameDao.updateFrameMemoryUsageAndLluTime(frame, maxRss, rss, lluTime); + } catch (FrameReservationException ex) { + // Eat this, the frame was not in the correct state or + // was locked by another thread. The only reason it would + // be locked by another thread would be if the state is + // changing. + logger.warn("failed to update memory usage and LLU time for frame: " + frame); + } + } + + @Override + public void determineIdleCores(DispatchHost host, int load) { + int maxLoad = host.cores + ((host.cores / 100) * Dispatcher.CORE_LOAD_THRESHOLD); + + int idleCores = maxLoad - load; + if (idleCores < host.idleCores) { + host.idleCores = idleCores; + } + } + + public DispatcherDao getDispatcherDao() { + return dispatcherDao; + } + + public void setDispatcherDao(DispatcherDao dispatcherDao) { + this.dispatcherDao = dispatcherDao; + } + + public FrameDao getFrameDao() { + return frameDao; + } + + public void setFrameDao(FrameDao frameDao) { + this.frameDao = frameDao; + } + + public JobDao getJobDao() { + return jobDao; + } + + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } + + public ProcDao getProcDao() { + return procDao; + } + + public void setProcDao(ProcDao procDao) { + this.procDao = procDao; + } + + public DependManager getDependManager() { + return dependManager; + } + + public void setDependManager(DependManager dependManager) { + this.dependManager = dependManager; + } + + public LayerDao getLayerDao() { + return layerDao; + } + + public void setLayerDao(LayerDao layerDao) { + this.layerDao = layerDao; + } + + public HostDao getHostDao() { + return hostDao; + } + + public void setHostDao(HostDao hostDao) { + this.hostDao = hostDao; + } + + public RqdClient getRqdClient() { + return rqdClient; + } + + public void setRqdClient(RqdClient rqdClient) { + this.rqdClient = rqdClient; + } + + public SubscriptionDao getSubscriptionDao() { + return subscriptionDao; + } + + public void setSubscriptionDao(SubscriptionDao subscriptionDao) { + this.subscriptionDao = subscriptionDao; + } + + public RedirectManager getRedirectManager() { + return redirectManager; + } + + public void setRedirectManager(RedirectManager redirectManager) { + this.redirectManager = redirectManager; + } + + public ShowDao getShowDao() { + return showDao; + } - public DispatcherDao getDispatcherDao() { - return dispatcherDao; - } + public void setShowDao(ShowDao showDao) { + this.showDao = showDao; + } - public void setDispatcherDao(DispatcherDao dispatcherDao) { - this.dispatcherDao = dispatcherDao; - } + public BookingManager getBookingManager() { + return bookingManager; + } - public FrameDao getFrameDao() { - return frameDao; - } + public void setBookingManager(BookingManager bookingManager) { + this.bookingManager = bookingManager; + } - public void setFrameDao(FrameDao frameDao) { - this.frameDao = frameDao; - } + public BookingDao getBookingDao() { + return bookingDao; + } - public JobDao getJobDao() { - return jobDao; - } + public void setBookingDao(BookingDao bookingDao) { + this.bookingDao = bookingDao; + } - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } - - public ProcDao getProcDao() { - return procDao; - } - - public void setProcDao(ProcDao procDao) { - this.procDao = procDao; - } - - public DependManager getDependManager() { - return dependManager; - } - - public void setDependManager(DependManager dependManager) { - this.dependManager = dependManager; - } - - public LayerDao getLayerDao() { - return layerDao; - } - - public void setLayerDao(LayerDao layerDao) { - this.layerDao = layerDao; - } - - public HostDao getHostDao() { - return hostDao; - } - - public void setHostDao(HostDao hostDao) { - this.hostDao = hostDao; - } - - public RqdClient getRqdClient() { - return rqdClient; - } - - public void setRqdClient(RqdClient rqdClient) { - this.rqdClient = rqdClient; - } - - public SubscriptionDao getSubscriptionDao() { - return subscriptionDao; - } - - public void setSubscriptionDao(SubscriptionDao subscriptionDao) { - this.subscriptionDao = subscriptionDao; - } - - public RedirectManager getRedirectManager() { - return redirectManager; - } - - public void setRedirectManager(RedirectManager redirectManager) { - this.redirectManager = redirectManager; - } - - public ShowDao getShowDao() { - return showDao; - } - - public void setShowDao(ShowDao showDao) { - this.showDao = showDao; - } - - public BookingManager getBookingManager() { - return bookingManager; - } - - public void setBookingManager(BookingManager bookingManager) { - this.bookingManager = bookingManager; - } - - public BookingDao getBookingDao() { - return bookingDao; - } - - public void setBookingDao(BookingDao bookingDao) { - this.bookingDao = bookingDao; - } - - @Override - public void clearCache() { - dispatcherDao.clearCache(); - } + @Override + public void clearCache() { + dispatcherDao.clearCache(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/Dispatcher.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/Dispatcher.java index f045a5ce5..4b9dc76c5 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/Dispatcher.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/Dispatcher.java @@ -1,22 +1,19 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; + import java.util.List; import com.imageworks.spcue.DispatchFrame; @@ -32,192 +29,188 @@ public interface Dispatcher { - // Maximum number of core points that can be assigned to a frame - public static final int CORE_POINTS_RESERVED_MAX = 2400; + // Maximum number of core points that can be assigned to a frame + public static final int CORE_POINTS_RESERVED_MAX = 2400; - // The default number of core points assigned to a frame, if no core - // point value is specified - public static final int CORE_POINTS_RESERVED_DEFAULT = 100; + // The default number of core points assigned to a frame, if no core + // point value is specified + public static final int CORE_POINTS_RESERVED_DEFAULT = 100; - // The minimum amount of core points you can assign to a frame. - public static final int CORE_POINTS_RESERVED_MIN = 10; + // The minimum amount of core points you can assign to a frame. + public static final int CORE_POINTS_RESERVED_MIN = 10; - // The minimum amount of gpu points you can assign to a frame. - public static final int GPU_UNITS_RESERVED_DEFAULT = 0; - public static final int GPU_UNITS_RESERVED_MIN = 0; + // The minimum amount of gpu points you can assign to a frame. + public static final int GPU_UNITS_RESERVED_DEFAULT = 0; + public static final int GPU_UNITS_RESERVED_MIN = 0; - // Amount of load per core a host can have before the perceived - // number of idle cores is modified to reflect load conditions - // on the host. - public static final int CORE_LOAD_THRESHOLD = 5; - - // Amount of memory that has to be idle for the rest of the cores - // on the machine to be considered stranded. - public static final long MEM_STRANDED_THRESHHOLD = CueUtil.GB + CueUtil.MB512; - - // Determines the service default minimum memory per frame. - public static final long MEM_SERVICE_RESERVED_DEFAULT = CueUtil.GB4; - - // Determines the service default minimum gpu per frame. - public static final long MEM_SERVICE_GPU_RESERVED_DEFAULT = 0; - - // Return value for cleared frame - public static final int EXIT_STATUS_FRAME_CLEARED = 299; - - /* - * An orphan proc occurs when a proc is left with - * no frame assignment. - */ - public static final int EXIT_STATUS_FRAME_ORPHAN = 301; - - /* - * A failed kill occurs when a user tries to kill a frame - * and RQD throws an exception. - */ - public static final int EXIT_STATUS_FAILED_KILL = 302; - - // Return value for cleared frame - public static final int EXIT_STATUS_DOWN_HOST = 399; - - // Upgrade the memory on the layer by 1g and retry. - public static final int EXIT_STATUS_MEMORY_FAILURE = 33; - - // Upgrade the memory on the layer by 1g and retry. - public static final int DOCKER_EXIT_STATUS_MEMORY_FAILURE = 137; - - // max retry time - public static final int FRAME_TIME_NO_RETRY = 3600 * 8; - - // The maximum amount of virtual memory a frame can be using - // without being penalized for it. - public static final long VIRTUAL_MEM_THRESHHOLD = CueUtil.GB2; - - // How long to keep track of a frame kill request - public static final int FRAME_KILL_CACHE_EXPIRE_AFTER_WRITE_MINUTES = 3; - - // A higher number gets more deep booking but less spread on the cue. - public static final int DEFAULT_MAX_FRAMES_PER_PASS = 4; - - // Disable RQD communication. - public static boolean testMode = false; - - // The time in seconds it takes for a proc or frame to orphan. - public static final int ORPHANED_SECONDS = 300; - - // The chance a frame will unbook itself to run a higher priority frame. - // 0 will never unbook, > 100 will always unbook. - public static final int UNBOOK_FREQUENCY = 101; - - // The default operating system assigned to host that don't report one. - public static final String OS_DEFAULT = "rhel40"; - - // The default minimum memory increase for when jobs fail due to not enough - // memory - public static final long MINIMUM_MEMORY_INCREASE = CueUtil.GB2; - - public static final double SOFT_MEMORY_MULTIPLIER = 1.1; - public static final double HARD_MEMORY_MULTIPLIER = 1.4; - - /** - * Dispatch a host to the facility. - * - * @param host - * @return A list of procs that were dispatched. - */ - List dispatchHostToAllShows(DispatchHost host); - - /** - * Dispatch a host to the facility. - * - * @param host - * @return A list of procs that were dispatched. - */ - List dispatchHost(DispatchHost host); - - /** - * Dispatch a host to the specified group and specify the maximum - * number of frames to dispatch from the host. - * - * @param host - * @param g - * @param numFrames - * @return - */ - List dispatchHost(DispatchHost host, GroupInterface g); - - /** - * Dispatch a host to the specified job. - * - * @param host - * @param job - * @return A list of procs that were dispatched. - * @throws DispatcherException if an error occurs. - */ - List dispatchHost(DispatchHost host, JobInterface job); - - /** - * Dispatch a host to the specified job. - * - * @param host - * @param job - * @return A list of procs that were dispatched. - * @throws DispatcherException if an error occurs. - */ - List dispatchHost(DispatchHost host, LayerInterface layer); - - /** - * Dispatch a host to the specified job. - * - * @param host - * @param job - * @return A list of procs that were dispatched. - * @throws DispatcherException if an error occurs. - */ - List dispatchHost(DispatchHost host, FrameInterface frame); - - /** - * Dispatch a proc to the specified job. - * - * @param proc - * @param job - * @throws DispatcherException if an error occurs. - */ - void dispatchProcToJob(VirtualProc proc, JobInterface job); - - /** - * Return true if the dispatcher should not talk to RQD - * - * @return - */ - boolean isTestMode(); - - /** - * Return true if the dispatcher should not talk to RQD - * - * @return - */ - void setTestMode(boolean enabled); - - /** - * Handles assigning a processor to a specified frame. - * - * @param frame - * @param proc - * - * @throws FrameReservationException if the frame cannot be reserved. - * @throws ResourceReservationFailureException if resources cannot - * be reserved. - * @throws RqdClientException if communication with RQD fails. - */ - void dispatch(DispatchFrame frame, VirtualProc proc); - - /** - * Dispatch the given host to the specified show. - * - * @param host - * @param show - * @return - */ - List dispatchHost(DispatchHost host, ShowInterface show); + // Amount of load per core a host can have before the perceived + // number of idle cores is modified to reflect load conditions + // on the host. + public static final int CORE_LOAD_THRESHOLD = 5; + + // Amount of memory that has to be idle for the rest of the cores + // on the machine to be considered stranded. + public static final long MEM_STRANDED_THRESHHOLD = CueUtil.GB + CueUtil.MB512; + + // Determines the service default minimum memory per frame. + public static final long MEM_SERVICE_RESERVED_DEFAULT = CueUtil.GB4; + + // Determines the service default minimum gpu per frame. + public static final long MEM_SERVICE_GPU_RESERVED_DEFAULT = 0; + + // Return value for cleared frame + public static final int EXIT_STATUS_FRAME_CLEARED = 299; + + /* + * An orphan proc occurs when a proc is left with no frame assignment. + */ + public static final int EXIT_STATUS_FRAME_ORPHAN = 301; + + /* + * A failed kill occurs when a user tries to kill a frame and RQD throws an exception. + */ + public static final int EXIT_STATUS_FAILED_KILL = 302; + + // Return value for cleared frame + public static final int EXIT_STATUS_DOWN_HOST = 399; + + // Upgrade the memory on the layer by 1g and retry. + public static final int EXIT_STATUS_MEMORY_FAILURE = 33; + + // Upgrade the memory on the layer by 1g and retry. + public static final int DOCKER_EXIT_STATUS_MEMORY_FAILURE = 137; + + // max retry time + public static final int FRAME_TIME_NO_RETRY = 3600 * 8; + + // The maximum amount of virtual memory a frame can be using + // without being penalized for it. + public static final long VIRTUAL_MEM_THRESHHOLD = CueUtil.GB2; + + // How long to keep track of a frame kill request + public static final int FRAME_KILL_CACHE_EXPIRE_AFTER_WRITE_MINUTES = 3; + + // A higher number gets more deep booking but less spread on the cue. + public static final int DEFAULT_MAX_FRAMES_PER_PASS = 4; + + // Disable RQD communication. + public static boolean testMode = false; + + // The time in seconds it takes for a proc or frame to orphan. + public static final int ORPHANED_SECONDS = 300; + + // The chance a frame will unbook itself to run a higher priority frame. + // 0 will never unbook, > 100 will always unbook. + public static final int UNBOOK_FREQUENCY = 101; + + // The default operating system assigned to host that don't report one. + public static final String OS_DEFAULT = "rhel40"; + + // The default minimum memory increase for when jobs fail due to not enough + // memory + public static final long MINIMUM_MEMORY_INCREASE = CueUtil.GB2; + + public static final double SOFT_MEMORY_MULTIPLIER = 1.1; + public static final double HARD_MEMORY_MULTIPLIER = 1.4; + + /** + * Dispatch a host to the facility. + * + * @param host + * @return A list of procs that were dispatched. + */ + List dispatchHostToAllShows(DispatchHost host); + + /** + * Dispatch a host to the facility. + * + * @param host + * @return A list of procs that were dispatched. + */ + List dispatchHost(DispatchHost host); + + /** + * Dispatch a host to the specified group and specify the maximum number of frames to dispatch + * from the host. + * + * @param host + * @param g + * @param numFrames + * @return + */ + List dispatchHost(DispatchHost host, GroupInterface g); + + /** + * Dispatch a host to the specified job. + * + * @param host + * @param job + * @return A list of procs that were dispatched. + * @throws DispatcherException if an error occurs. + */ + List dispatchHost(DispatchHost host, JobInterface job); + + /** + * Dispatch a host to the specified job. + * + * @param host + * @param job + * @return A list of procs that were dispatched. + * @throws DispatcherException if an error occurs. + */ + List dispatchHost(DispatchHost host, LayerInterface layer); + + /** + * Dispatch a host to the specified job. + * + * @param host + * @param job + * @return A list of procs that were dispatched. + * @throws DispatcherException if an error occurs. + */ + List dispatchHost(DispatchHost host, FrameInterface frame); + + /** + * Dispatch a proc to the specified job. + * + * @param proc + * @param job + * @throws DispatcherException if an error occurs. + */ + void dispatchProcToJob(VirtualProc proc, JobInterface job); + + /** + * Return true if the dispatcher should not talk to RQD + * + * @return + */ + boolean isTestMode(); + + /** + * Return true if the dispatcher should not talk to RQD + * + * @return + */ + void setTestMode(boolean enabled); + + /** + * Handles assigning a processor to a specified frame. + * + * @param frame + * @param proc + * + * @throws FrameReservationException if the frame cannot be reserved. + * @throws ResourceReservationFailureException if resources cannot be reserved. + * @throws RqdClientException if communication with RQD fails. + */ + void dispatch(DispatchFrame frame, VirtualProc proc); + + /** + * Dispatch the given host to the specified show. + * + * @param host + * @param show + * @return + */ + List dispatchHost(DispatchHost host, ShowInterface show); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatcherException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatcherException.java index d98b5c9de..752b05f63 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatcherException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatcherException.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; import com.imageworks.spcue.SpcueRuntimeException; @@ -24,15 +20,14 @@ @SuppressWarnings("serial") public class DispatcherException extends SpcueRuntimeException { - public DispatcherException(String arg0) { - super(arg0); - // TODO Auto-generated constructor stub - } + public DispatcherException(String arg0) { + super(arg0); + // TODO Auto-generated constructor stub + } - public DispatcherException(String arg0, Throwable arg1) { - super(arg0, arg1); - // TODO Auto-generated constructor stub - } + public DispatcherException(String arg0, Throwable arg1) { + super(arg0, arg1); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameCompleteHandler.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameCompleteHandler.java index 3e92158a0..88d2f72cc 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameCompleteHandler.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameCompleteHandler.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; import java.sql.Timestamp; @@ -62,709 +58,656 @@ import com.imageworks.spcue.grpc.service.ServiceOverride; /** - * The FrameCompleteHandler encapsulates all logic necessary for processing - * FrameComplete reports from RQD. + * The FrameCompleteHandler encapsulates all logic necessary for processing FrameComplete reports + * from RQD. */ public class FrameCompleteHandler { - private static final Logger logger = LogManager.getLogger(FrameCompleteHandler.class); - - private static final Random randomNumber = new Random(); - - private HostManager hostManager; - private JobManager jobManager; - private RedirectManager redirectManager; - private BookingManager bookingManager; - private DispatchQueue dispatchQueue; - private BookingQueue bookingQueue; - private Dispatcher dispatcher; - private Dispatcher localDispatcher; - private JobManagerSupport jobManagerSupport; - private DispatchSupport dispatchSupport; - private JmsMover jsmMover; - - private WhiteboardDao whiteboardDao; - private ServiceDao serviceDao; - private ShowDao showDao; - private Environment env; + private static final Logger logger = LogManager.getLogger(FrameCompleteHandler.class); + + private static final Random randomNumber = new Random(); + + private HostManager hostManager; + private JobManager jobManager; + private RedirectManager redirectManager; + private BookingManager bookingManager; + private DispatchQueue dispatchQueue; + private BookingQueue bookingQueue; + private Dispatcher dispatcher; + private Dispatcher localDispatcher; + private JobManagerSupport jobManagerSupport; + private DispatchSupport dispatchSupport; + private JmsMover jsmMover; + + private WhiteboardDao whiteboardDao; + private ServiceDao serviceDao; + private ShowDao showDao; + private Environment env; + + /* + * The last time a proc was unbooked for subscription or job balancing. Since there are so many + * more dispatch threads than booking threads, the dispatcher will over compensate and unbook too + * many cores if an imbalance occurs. Its better to keep cores running the same place for cache + * coherence. + */ + private final AtomicLong lastUnbook = new AtomicLong(0); + + /* + * The amount of time to wait before unbooking another proc for subscription or job balancing. + */ + private static final int UNBOOK_EXPIRE_MS = 2500; + + /** + * Boolean to toggle if this class is accepting data or not. + */ + private boolean shutdown = false; + + /** + * Whether or not to satisfy dependents (*_ON_FRAME and *_ON_LAYER) only on Frame success + */ + private boolean satisfyDependOnlyOnFrameSuccess; + + public boolean getSatisfyDependOnlyOnFrameSuccess() { + return satisfyDependOnlyOnFrameSuccess; + } + + public void setSatisfyDependOnlyOnFrameSuccess(boolean satisfyDependOnlyOnFrameSuccess) { + this.satisfyDependOnlyOnFrameSuccess = satisfyDependOnlyOnFrameSuccess; + } + + @Autowired + public FrameCompleteHandler(Environment env) { + this.env = env; + satisfyDependOnlyOnFrameSuccess = + env.getProperty("depend.satisfy_only_on_frame_success", Boolean.class, true); + } + + /** + * Handle the given FrameCompleteReport from RQD. + * + * @param report + */ + public void handleFrameCompleteReport(final FrameCompleteReport report) { /* - * The last time a proc was unbooked for subscription or job balancing. - * Since there are so many more dispatch threads than booking threads, the - * dispatcher will over compensate and unbook too many cores if an imbalance - * occurs. Its better to keep cores running the same place for cache - * coherence. + * A boolean we're going to set to true if we can detect a corrupted data block in Oracle. */ - private final AtomicLong lastUnbook = new AtomicLong(0); - - /* - * The amount of time to wait before unbooking another proc for subscription - * or job balancing. - */ - private static final int UNBOOK_EXPIRE_MS = 2500; - - /** - * Boolean to toggle if this class is accepting data or not. - */ - private boolean shutdown = false; - - /** - * Whether or not to satisfy dependents (*_ON_FRAME and *_ON_LAYER) only on Frame success - */ - private boolean satisfyDependOnlyOnFrameSuccess; - - public boolean getSatisfyDependOnlyOnFrameSuccess() { - return satisfyDependOnlyOnFrameSuccess; + if (isShutdown()) { + throw new RqdRetryReportException( + "Error processing the frame complete report, " + "cuebot not accepting packets."); } - public void setSatisfyDependOnlyOnFrameSuccess(boolean satisfyDependOnlyOnFrameSuccess) { - this.satisfyDependOnlyOnFrameSuccess = satisfyDependOnlyOnFrameSuccess; - } - - @Autowired - public FrameCompleteHandler(Environment env) { - this.env = env; - satisfyDependOnlyOnFrameSuccess = env.getProperty( - "depend.satisfy_only_on_frame_success", Boolean.class, true); + try { + final VirtualProc proc = hostManager.getVirtualProc(report.getFrame().getResourceId()); + final DispatchJob job = jobManager.getDispatchJob(proc.getJobId()); + final LayerDetail layer = jobManager.getLayerDetail(report.getFrame().getLayerId()); + final FrameDetail frameDetail = jobManager.getFrameDetail(report.getFrame().getFrameId()); + final DispatchFrame frame = jobManager.getDispatchFrame(report.getFrame().getFrameId()); + final FrameState newFrameState = determineFrameState(job, layer, frame, report); + final String key = proc.getJobId() + "_" + report.getFrame().getLayerId() + "_" + + report.getFrame().getFrameId(); + + // rqd is currently not able to report exit_signal=9 when a frame is killed by + // the OOM logic. The current solution sets exitStatus to + // Dispatcher.EXIT_STATUS_MEMORY_FAILURE before killing the frame, this enables + // auto-retrying frames affected by the logic when they report with a + // frameCompleteReport. This status retouch ensures a frame complete report is + // not able to override what has been set by the previous logic. + int exitStatus = report.getExitStatus(); + if (frameDetail.exitStatus == Dispatcher.EXIT_STATUS_MEMORY_FAILURE) { + exitStatus = frameDetail.exitStatus; + } + + if (dispatchSupport.stopFrame(frame, newFrameState, exitStatus, + report.getFrame().getMaxRss())) { + if (dispatcher.isTestMode()) { + // Database modifications on a threadpool cannot be captured by the test thread + handlePostFrameCompleteOperations(proc, report, job, frame, newFrameState, frameDetail); + } else { + dispatchQueue.execute(new KeyRunnable(key) { + @Override + public void run() { + try { + handlePostFrameCompleteOperations(proc, report, job, frame, newFrameState, + frameDetail); + } catch (Exception e) { + logger.warn("Exception during handlePostFrameCompleteOperations " + + "in handleFrameCompleteReport" + CueExceptionUtil.getStackTrace(e)); + } + } + }); + } + } else { + /* + * First check if we have a redirect. When a user retries a frame the proc is redirected + * back to the same job without checking any other properties. + */ + if (redirectManager.hasRedirect(proc)) { + dispatchQueue.execute(new KeyRunnable(key) { + @Override + public void run() { + try { + redirectManager.redirect(proc); + } catch (Exception e) { + logger.warn("Exception during redirect in handleFrameCompleteReport" + + CueExceptionUtil.getStackTrace(e)); + } + } + }); + } else { + dispatchQueue.execute(new KeyRunnable(key) { + @Override + public void run() { + try { + dispatchSupport.unbookProc(proc); + } catch (Exception e) { + logger.warn("Exception during unbookProc in handleFrameCompleteReport" + + CueExceptionUtil.getStackTrace(e)); + } + } + }); + } + } + } catch (EmptyResultDataAccessException e) { + /* + * Do not propagate this exception to RQD. This usually means the cue lost connectivity to the + * host and cleared out the record of the proc. If this is propagated back to RQD, RQD will + * keep retrying the operation forever. + */ + logger.info("failed to acquire data needed to " + "process completed frame: " + + report.getFrame().getFrameName() + " in job " + report.getFrame().getJobName() + "," + + e); + } catch (Exception e) { + + /* + * Everything else we kick back to RQD. + */ + logger.info("failed to acquire data needed " + "to process completed frame: " + + report.getFrame().getFrameName() + " in job " + report.getFrame().getJobName() + "," + + e); + + throw new RqdRetryReportException( + "error processing the frame complete " + "report, sending retry message to RQD " + e, e); } + } + + /** + * Handles frame complete operations other than the actual frame completing. + * + * Updates proc time usage counters. Drops dependencies. Sets jobs to the finished state. + * Optimizes layer memory requirements. Checks for other jobs that might need procs. Unbook proc + * if it needs to be moved. Check show subscription values. + * + * If the proc is not unbooked and moved, its re-dispatched onto the same job. + * + * @param proc + * @param report + * @param job + * @param frame + * @param newFrameState + */ + public void handlePostFrameCompleteOperations(VirtualProc proc, FrameCompleteReport report, + DispatchJob job, DispatchFrame frame, FrameState newFrameState, FrameDetail frameDetail) { + try { + + /* + * The default behavior is to keep the proc on the same job. + */ + boolean unbookProc = proc.unbooked; + + dispatchSupport.updateUsageCounters(frame, report.getExitStatus()); + + boolean isLayerComplete = false; + + if (newFrameState.equals(FrameState.SUCCEEDED) + || (!satisfyDependOnlyOnFrameSuccess && newFrameState.equals(FrameState.EATEN))) { + jobManagerSupport.satisfyWhatDependsOn(frame); + isLayerComplete = jobManager.isLayerComplete(frame); + if (isLayerComplete) { + jobManagerSupport.satisfyWhatDependsOn((LayerInterface) frame); + } + } - /** - * Handle the given FrameCompleteReport from RQD. - * - * @param report - */ - public void handleFrameCompleteReport(final FrameCompleteReport report) { - + if (newFrameState.equals(FrameState.SUCCEEDED) && !isLayerComplete) { /* - * A boolean we're going to set to true if we can detect - * a corrupted data block in Oracle. + * If the layer meets some specific criteria then try to update the minimum memory and tags + * so it can run on a wider variety of cores, namely older hardware. */ - if (isShutdown()) { - throw new RqdRetryReportException( - "Error processing the frame complete report, " + - "cuebot not accepting packets."); + jobManager.optimizeLayer(frame, report.getFrame().getNumCores(), + report.getFrame().getMaxRss(), report.getRunTime()); + } + + /* + * The final frame can either be Succeeded or Eaten. If you only check if the frame is + * Succeeded before doing an isJobComplete check, then jobs that finish with the auto-eat flag + * enabled will not leave the cue. + */ + if (newFrameState.equals(FrameState.SUCCEEDED) || newFrameState.equals(FrameState.EATEN)) { + if (jobManager.isJobComplete(job)) { + job.state = JobState.FINISHED; + jobManagerSupport.queueShutdownJob(job, new Source("natural"), false); } - + } + + /* + * Some exit statuses indicate that a frame was killed by the application due to a memory + * issue and should be retried. In this case, disable the optimizer and raise the memory by + * what is specified in the show's service override, service or 2GB. + */ + if (report.getExitStatus() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE + || report.getExitSignal() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE + || frameDetail.exitStatus == Dispatcher.EXIT_STATUS_MEMORY_FAILURE + || report.getExitStatus() == Dispatcher.DOCKER_EXIT_STATUS_MEMORY_FAILURE) { + long increase = CueUtil.GB2; + + // since there can be multiple services, just going for the + // first service (primary) + String serviceName = ""; try { - final VirtualProc proc = hostManager.getVirtualProc(report.getFrame().getResourceId()); - final DispatchJob job = jobManager.getDispatchJob(proc.getJobId()); - final LayerDetail layer = jobManager.getLayerDetail(report.getFrame().getLayerId()); - final FrameDetail frameDetail = jobManager.getFrameDetail(report.getFrame().getFrameId()); - final DispatchFrame frame = jobManager.getDispatchFrame(report.getFrame().getFrameId()); - final FrameState newFrameState = determineFrameState(job, layer, frame, report); - final String key = proc.getJobId() + "_" + report.getFrame().getLayerId() + - "_" + report.getFrame().getFrameId(); - - // rqd is currently not able to report exit_signal=9 when a frame is killed by - // the OOM logic. The current solution sets exitStatus to - // Dispatcher.EXIT_STATUS_MEMORY_FAILURE before killing the frame, this enables - // auto-retrying frames affected by the logic when they report with a - // frameCompleteReport. This status retouch ensures a frame complete report is - // not able to override what has been set by the previous logic. - int exitStatus = report.getExitStatus(); - if (frameDetail.exitStatus == Dispatcher.EXIT_STATUS_MEMORY_FAILURE) { - exitStatus = frameDetail.exitStatus; - } - - if (dispatchSupport.stopFrame(frame, newFrameState, exitStatus, - report.getFrame().getMaxRss())) { - if (dispatcher.isTestMode()) { - // Database modifications on a threadpool cannot be captured by the test thread - handlePostFrameCompleteOperations(proc, report, job, frame, - newFrameState, frameDetail); - } else { - dispatchQueue.execute(new KeyRunnable(key) { - @Override - public void run() { - try { - handlePostFrameCompleteOperations(proc, report, job, frame, - newFrameState, frameDetail); - } catch (Exception e) { - logger.warn("Exception during handlePostFrameCompleteOperations " + - "in handleFrameCompleteReport" + CueExceptionUtil.getStackTrace(e)); - } - } - }); - } - } - else { - /* - * First check if we have a redirect. When a user - * retries a frame the proc is redirected back - * to the same job without checking any other - * properties. - */ - if (redirectManager.hasRedirect(proc)) { - dispatchQueue.execute(new KeyRunnable(key) { - @Override - public void run() { - try { - redirectManager.redirect(proc); - } catch (Exception e) { - logger.warn("Exception during redirect in handleFrameCompleteReport" + - CueExceptionUtil.getStackTrace(e)); - } - } - }); - } - else { - dispatchQueue.execute(new KeyRunnable(key) { - @Override - public void run() { - try { - dispatchSupport.unbookProc(proc); - } catch (Exception e) { - logger.warn("Exception during unbookProc in handleFrameCompleteReport" + - CueExceptionUtil.getStackTrace(e)); - } - } - }); - } - } + serviceName = frame.services.split(",")[0]; + ServiceOverride showService = + whiteboardDao.getServiceOverride(showDao.findShowDetail(frame.show), serviceName); + // increase override is stored in Kb format so convert to Mb + // for easier reading. Note: Kb->Mb conversion uses 1024 blocks + increase = showService.getData().getMinMemoryIncrease(); + logger.info("Using " + serviceName + " service show " + "override for memory increase: " + + Math.floor(increase / 1024) + "Mb."); + } catch (NullPointerException e) { + logger.info("Frame has no associated services"); + } catch (EmptyResultDataAccessException e) { + logger.info(frame.show + " has no service override for " + serviceName + "."); + Service service = whiteboardDao.findService(serviceName); + increase = service.getMinMemoryIncrease(); + logger.info( + "Using service default for mem increase: " + Math.floor(increase / 1024) + "Mb."); } - catch (EmptyResultDataAccessException e) { - /* - * Do not propagate this exception to RQD. This - * usually means the cue lost connectivity to - * the host and cleared out the record of the proc. - * If this is propagated back to RQD, RQD will - * keep retrying the operation forever. - */ - logger.info("failed to acquire data needed to " + - "process completed frame: " + - report.getFrame().getFrameName() + " in job " + - report.getFrame().getJobName() + "," + e); + + unbookProc = true; + jobManager.enableMemoryOptimizer(frame, false); + jobManager.increaseLayerMemoryRequirement(frame, proc.memoryReserved + increase); + logger.info("Increased mem usage to: " + (proc.memoryReserved + increase)); + } + + /* + * Check for local dispatching. + */ + + if (proc.isLocalDispatch) { + + if (!bookingManager.hasLocalHostAssignment(proc)) { + logger.info("the proc " + proc + " no longer has a local assignment."); + unbookProc = true; } - catch (Exception e) { - - /* - * Everything else we kick back to RQD. - */ - logger.info("failed to acquire data needed " + - "to process completed frame: " + - report.getFrame().getFrameName() + " in job " + - report.getFrame().getJobName() + "," + e); - - throw new RqdRetryReportException("error processing the frame complete " + - "report, sending retry message to RQD " + e, e); + } + + /* + * An exit status of FAILED_LAUNCH (256) indicates that the frame could not be launched due to + * some unforeseen unrecoverable error that is not checked when the launch command is given. + * The most common cause of this is when the job log directory is removed before the job is + * complete. + * + * Frames that return a 256 are put Frame back into WAITING status + */ + + else if (report.getExitStatus() == FrameExitStatus.FAILED_LAUNCH_VALUE) { + logger.info("unbooking " + proc + " frame status was failed frame launch."); + unbookProc = true; + } + + else if (report.getHost().getNimbyLocked()) { + + if (!proc.isLocalDispatch) { + logger.info("unbooking " + proc + " was NIMBY locked."); + unbookProc = true; } - } - /** - * Handles frame complete operations other than the actual frame - * completing. - * - * Updates proc time usage counters. - * Drops dependencies. - * Sets jobs to the finished state. - * Optimizes layer memory requirements. - * Checks for other jobs that might need procs. - * Unbook proc if it needs to be moved. - * Check show subscription values. - * - * If the proc is not unbooked and moved, its re-dispatched onto the same job. - * - * @param proc - * @param report - * @param job - * @param frame - * @param newFrameState - */ - public void handlePostFrameCompleteOperations(VirtualProc proc, - FrameCompleteReport report, DispatchJob job, DispatchFrame frame, - FrameState newFrameState, FrameDetail frameDetail) { - try { + /* Update the NIMBY locked state */ + hostManager.setHostLock(proc, LockState.NIMBY_LOCKED, new Source("NIMBY")); + } else if (report.getHost().getFreeMem() < CueUtil.MB512) { + /* + * Unbook anything on a proc that has only 512MB of free memory left. + */ + logger.info("unbooking" + proc + " was low was memory "); + unbookProc = true; + } else if (dispatchSupport.isShowOverBurst(proc)) { + /* + * Unbook the proc if the show is over burst. + */ + logger.info("show using proc " + proc + " is over burst."); + unbookProc = true; + } else if (!hostManager.isHostUp(proc)) { + + logger.info("the proc " + proc + " is not in the update state."); + unbookProc = true; + } else if (hostManager.isLocked(proc)) { + if (!proc.isLocalDispatch) { + logger.info("the proc " + proc + " is not in the open state."); + unbookProc = true; + } + } else if (redirectManager.hasRedirect(proc)) { - /* - * The default behavior is to keep the proc on the same job. - */ - boolean unbookProc = proc.unbooked; + logger.info("the proc " + proc + " has been redirected."); - dispatchSupport.updateUsageCounters(frame, report.getExitStatus()); + if (redirectManager.redirect(proc)) { + return; + } + } - boolean isLayerComplete = false; + /* + * If the proc is unbooked at this point, then unbook it and return. + */ + if (unbookProc) { + dispatchSupport.unbookProc(proc); + return; + } - if (newFrameState.equals(FrameState.SUCCEEDED) - || (!satisfyDependOnlyOnFrameSuccess - && newFrameState.equals(FrameState.EATEN))) { - jobManagerSupport.satisfyWhatDependsOn(frame); - isLayerComplete = jobManager.isLayerComplete(frame); - if (isLayerComplete) { - jobManagerSupport.satisfyWhatDependsOn((LayerInterface) frame); - } - } + /* + * Check to see if the job the proc is currently assigned is still dispatchable. + */ + if (job.state.equals(JobState.FINISHED) + || !dispatchSupport.isJobDispatchable(job, proc.isLocalDispatch)) { - if (newFrameState.equals(FrameState.SUCCEEDED) && !isLayerComplete) { - /* - * If the layer meets some specific criteria then try to - * update the minimum memory and tags so it can run on a - * wider variety of cores, namely older hardware. - */ - jobManager.optimizeLayer(frame, report.getFrame().getNumCores(), - report.getFrame().getMaxRss(), report.getRunTime()); - } + logger.info("The " + job + " is no longer dispatchable."); + dispatchSupport.unbookProc(proc); - /* - * The final frame can either be Succeeded or Eaten. If you only - * check if the frame is Succeeded before doing an isJobComplete - * check, then jobs that finish with the auto-eat flag enabled will - * not leave the cue. - */ - if (newFrameState.equals(FrameState.SUCCEEDED) - || newFrameState.equals(FrameState.EATEN)) { - if (jobManager.isJobComplete(job)) { - job.state = JobState.FINISHED; - jobManagerSupport.queueShutdownJob(job, new Source( - "natural"), false); - } - } + /* + * Only rebook whole cores that have not been locally dispatched. Rebooking fractional can + * cause storms of booking requests that don't have a chance of finding a suitable frame to + * run. + */ + if (!proc.isLocalDispatch && proc.coresReserved >= 100 + && dispatchSupport.isCueBookable(job)) { - /* - * Some exit statuses indicate that a frame was killed by the - * application due to a memory issue and should be retried. In this - * case, disable the optimizer and raise the memory by what is - * specified in the show's service override, service or 2GB. - */ - if (report.getExitStatus() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE - || report.getExitSignal() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE - || frameDetail.exitStatus == Dispatcher.EXIT_STATUS_MEMORY_FAILURE - || report.getExitStatus() == Dispatcher.DOCKER_EXIT_STATUS_MEMORY_FAILURE) { - long increase = CueUtil.GB2; - - // since there can be multiple services, just going for the - // first service (primary) - String serviceName = ""; - try { - serviceName = frame.services.split(",")[0]; - ServiceOverride showService = whiteboardDao.getServiceOverride( - showDao.findShowDetail(frame.show), serviceName); - // increase override is stored in Kb format so convert to Mb - // for easier reading. Note: Kb->Mb conversion uses 1024 blocks - increase = showService.getData().getMinMemoryIncrease(); - logger.info("Using " + serviceName + " service show " + - "override for memory increase: " + - Math.floor(increase / 1024) + "Mb."); - } - catch (NullPointerException e) { - logger.info("Frame has no associated services"); - } - catch (EmptyResultDataAccessException e) { - logger.info(frame.show + " has no service override for " + - serviceName + "."); - Service service = whiteboardDao.findService(serviceName); - increase = service.getMinMemoryIncrease(); - logger.info("Using service default for mem increase: " + - Math.floor(increase / 1024) + "Mb."); - } - - unbookProc = true; - jobManager.enableMemoryOptimizer(frame, false); - jobManager.increaseLayerMemoryRequirement(frame, - proc.memoryReserved + increase); - logger.info("Increased mem usage to: " + - (proc.memoryReserved + increase)); - } + bookingQueue.execute( + new DispatchBookHost(hostManager.getDispatchHost(proc.getHostId()), dispatcher, env)); + } - /* - * Check for local dispatching. - */ + if (job.state.equals(JobState.FINISHED)) { + jsmMover.send(job); + } + return; + } + + /* + * If the job is marked unbookable and its over its minimum value, we check to see if the proc + * can be moved to a job that hasn't reached its minimum proc yet. + * + * This will handle show balancing in the future. + */ + + if (!proc.isLocalDispatch && randomNumber.nextInt(100) <= Dispatcher.UNBOOK_FREQUENCY + && System.currentTimeMillis() > lastUnbook.get()) { + + // First make sure all jobs have their min cores + // Then check for higher priority jobs + // If not, rebook this job + if (job.autoUnbook && proc.coresReserved >= 100) { + if (jobManager.isOverMinCores(job)) { + try { - if (proc.isLocalDispatch) { + boolean unbook = dispatchSupport.findUnderProcedJob(job, proc); - if (!bookingManager.hasLocalHostAssignment(proc)) { - logger.info("the proc " + proc + " no longer has a local assignment."); - unbookProc = true; - } - } + if (!unbook) { + JobDetail jobDetail = jobManager.getJobDetail(job.id); + unbook = dispatchSupport.higherPriorityJobExists(jobDetail, proc); + } - /* - * An exit status of FAILED_LAUNCH (256) indicates that the frame could - * not be launched due to some unforeseen unrecoverable error that - * is not checked when the launch command is given. The most common - * cause of this is when the job log directory is removed before the - * job is complete. - * - * Frames that return a 256 are put Frame back into WAITING status - */ - - else if (report.getExitStatus() == FrameExitStatus.FAILED_LAUNCH_VALUE) { - logger.info("unbooking " + proc + " frame status was failed frame launch."); - unbookProc = true; - } + if (unbook) { - else if (report.getHost().getNimbyLocked()) { - - if (!proc.isLocalDispatch) { - logger.info("unbooking " + proc + " was NIMBY locked."); - unbookProc = true; - } - - /* Update the NIMBY locked state */ - hostManager.setHostLock(proc, LockState.NIMBY_LOCKED, - new Source("NIMBY")); - } else if (report.getHost().getFreeMem() < CueUtil.MB512) { - /* - * Unbook anything on a proc that has only 512MB of free memory - * left. - */ - logger.info("unbooking" + proc + " was low was memory "); - unbookProc = true; - } else if (dispatchSupport.isShowOverBurst(proc)) { - /* - * Unbook the proc if the show is over burst. - */ - logger.info("show using proc " + proc + " is over burst."); - unbookProc = true; - } else if (!hostManager.isHostUp(proc)) { - - logger.info("the proc " + proc + " is not in the update state."); - unbookProc = true; - } else if (hostManager.isLocked(proc)) { - if (!proc.isLocalDispatch) { - logger.info("the proc " + proc - + " is not in the open state."); - unbookProc = true; - } - } else if (redirectManager.hasRedirect(proc)) { - - logger.info("the proc " + proc + " has been redirected."); - - if (redirectManager.redirect(proc)) { - return; - } - } + // Set a new time to allow unbooking. + lastUnbook.set(System.currentTimeMillis() + UNBOOK_EXPIRE_MS); - /* - * If the proc is unbooked at this point, then unbook it and return. - */ - if (unbookProc) { + logger.info("Transfering " + proc); dispatchSupport.unbookProc(proc); - return; - } - /* - * Check to see if the job the proc is currently assigned is still - * dispatchable. - */ - if (job.state.equals(JobState.FINISHED) - || !dispatchSupport.isJobDispatchable(job, - proc.isLocalDispatch)) { + DispatchHost host = hostManager.getDispatchHost(proc.getHostId()); - logger.info("The " + job + " is no longer dispatchable."); - dispatchSupport.unbookProc(proc); - - /* - * Only rebook whole cores that have not been locally - * dispatched. Rebooking fractional can cause storms of booking - * requests that don't have a chance of finding a suitable frame - * to run. - */ - if (!proc.isLocalDispatch && proc.coresReserved >= 100 - && dispatchSupport.isCueBookable(job)) { - - bookingQueue.execute(new DispatchBookHost(hostManager - .getDispatchHost(proc.getHostId()), dispatcher, env)); - } - - if (job.state.equals(JobState.FINISHED)) { - jsmMover.send(job); - } + bookingQueue.execute(new DispatchBookHost(host, dispatcher, env)); return; + } + } catch (JobLookupException e) { + // wasn't able to find new job } - - /* - * If the job is marked unbookable and its over its minimum value, - * we check to see if the proc can be moved to a job that hasn't - * reached its minimum proc yet. - * - * This will handle show balancing in the future. - */ - - if (!proc.isLocalDispatch - && randomNumber.nextInt(100) <= Dispatcher.UNBOOK_FREQUENCY - && System.currentTimeMillis() > lastUnbook.get()) { - - // First make sure all jobs have their min cores - // Then check for higher priority jobs - // If not, rebook this job - if (job.autoUnbook && proc.coresReserved >= 100) { - if (jobManager.isOverMinCores(job)) { - try { - - boolean unbook = - dispatchSupport.findUnderProcedJob(job, proc); - - if (!unbook) { - JobDetail jobDetail = jobManager.getJobDetail(job.id); - unbook = dispatchSupport.higherPriorityJobExists(jobDetail, proc); - } - - if (unbook) { - - // Set a new time to allow unbooking. - lastUnbook.set(System.currentTimeMillis() - + UNBOOK_EXPIRE_MS); - - logger.info("Transfering " + proc); - dispatchSupport.unbookProc(proc); - - DispatchHost host = - hostManager.getDispatchHost(proc.getHostId()); - - bookingQueue.execute( - new DispatchBookHost(host, dispatcher, env)); - return; - } - } catch (JobLookupException e) { - // wasn't able to find new job - } - } - } - } - - if (newFrameState.equals(FrameState.WAITING) - || newFrameState.equals(FrameState.SUCCEEDED)) { - - /* - * Check for stranded cores on the host. - */ - if (!proc.isLocalDispatch - && dispatchSupport.hasStrandedCores(proc) - && jobManager.isLayerThreadable(frame) - && dispatchSupport.isJobBookable(job)) { - - int stranded_cores = hostManager.getStrandedCoreUnits(proc); - if (stranded_cores >= 100) { - - DispatchHost host = - hostManager.getDispatchHost(proc.getHostId()); - dispatchSupport.strandCores(host, stranded_cores); - dispatchSupport.unbookProc(proc); - bookingQueue.execute(new DispatchBookHost(host, job, - dispatcher, env)); - return; - } - } - - // Book the next frame of this job on the same proc - if (proc.isLocalDispatch) { - dispatchQueue.execute(new DispatchNextFrame(job, proc, - localDispatcher)); - } else { - dispatchQueue.execute(new DispatchNextFrame(job, proc, - dispatcher)); - } - } else { - dispatchSupport.unbookProc(proc, "frame state was " - + newFrameState.toString()); - } - } catch (Exception e) { - /* - * At this point, the proc has no place to go. Since we've run into - * an error its best to just unbook it. You can't handle this with a - * roll back because the record existed before any transactions - * started. - */ - logger.warn("An error occured when procssing " - + "frame complete message, " - + CueExceptionUtil.getStackTrace(e)); - try { - dispatchSupport.unbookProc(proc, - "an error occured when procssing frame complete message."); - } catch (EmptyResultDataAccessException ee) { - logger.info("Failed to find proc to unbook after frame " - + "complete message " + CueExceptionUtil.getStackTrace(ee)); - } + } } - } + } - /** - * Determines the new FrameState for a frame based on values contained in - * the FrameCompleteReport - * - * If the frame is Waiting or Eaten, then it was manually set to that status - * before the frame was killed. In that case whatever the current state in - * the DB is the one we want to use. - * - * If the frame status is dead or the frame.exitStatus is a non-zero value, - * and the frame has been retried job.maxRetries times, then the frame is - * Dead. If the frame has an exit status of 256, that is a non-retry status, - * the frame is dead. - * - * Assuming the two previous checks are not true, then a non-zero exit - * status sets the frame back to Waiting, while a zero status sets the frame - * to Succeeded. - * - * @param job - * @param frame - * @param report - * @return - */ - public static final FrameState determineFrameState(DispatchJob job, LayerDetail layer, - DispatchFrame frame, FrameCompleteReport report) { + if (newFrameState.equals(FrameState.WAITING) || newFrameState.equals(FrameState.SUCCEEDED)) { - if (EnumSet.of(FrameState.WAITING, FrameState.EATEN).contains( - frame.state)) { - return frame.state; + /* + * Check for stranded cores on the host. + */ + if (!proc.isLocalDispatch && dispatchSupport.hasStrandedCores(proc) + && jobManager.isLayerThreadable(frame) && dispatchSupport.isJobBookable(job)) { + + int stranded_cores = hostManager.getStrandedCoreUnits(proc); + if (stranded_cores >= 100) { + + DispatchHost host = hostManager.getDispatchHost(proc.getHostId()); + dispatchSupport.strandCores(host, stranded_cores); + dispatchSupport.unbookProc(proc); + bookingQueue.execute(new DispatchBookHost(host, job, dispatcher, env)); + return; + } } - // Checks for frames that have reached max retries. - else if (frame.state.equals(FrameState.DEAD)) { - if (job.autoEat) { - return FrameState.EATEN; - } else { - return FrameState.DEPEND; - } - } else if (report.getExitStatus() != 0) { - - long r = System.currentTimeMillis() / 1000; - long lastUpdate = (r - report.getFrame().getLluTime()) / 60; - - FrameState newState = FrameState.WAITING; - if (report.getExitStatus() == FrameExitStatus.SKIP_RETRY_VALUE - || (job.maxRetries != 0 && report.getExitSignal() == 119)) { - report = FrameCompleteReport.newBuilder(report).setExitStatus(FrameExitStatus.SKIP_RETRY_VALUE).build(); - newState = FrameState.WAITING; - // exemption code 256 - } else if ((report.getExitStatus() == FrameExitStatus.FAILED_LAUNCH_VALUE || - report.getExitSignal() == FrameExitStatus.FAILED_LAUNCH_VALUE) && - (frame.retries < job.maxRetries)) { - report = FrameCompleteReport.newBuilder(report).setExitStatus(report.getExitStatus()).build(); - newState = FrameState.WAITING; - } else if (job.autoEat) { - newState = FrameState.EATEN; - // ETC Time out and LLU timeout - } else if (layer.timeout_llu != 0 && report.getFrame().getLluTime() != 0 - && lastUpdate > (layer.timeout_llu -1)) { - newState = FrameState.DEAD; - } else if (layer.timeout != 0 && report.getRunTime() > layer.timeout * 60) { - newState = FrameState.DEAD; - } else if (report.getRunTime() > Dispatcher.FRAME_TIME_NO_RETRY) { - newState = FrameState.DEAD; - } else if (frame.retries >= job.maxRetries) { - if (!(report.getExitStatus() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE - || report.getExitSignal() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE - || report.getExitStatus() == Dispatcher.DOCKER_EXIT_STATUS_MEMORY_FAILURE)) - newState = FrameState.DEAD; - } - return newState; + // Book the next frame of this job on the same proc + if (proc.isLocalDispatch) { + dispatchQueue.execute(new DispatchNextFrame(job, proc, localDispatcher)); } else { - return FrameState.SUCCEEDED; + dispatchQueue.execute(new DispatchNextFrame(job, proc, dispatcher)); } + } else { + dispatchSupport.unbookProc(proc, "frame state was " + newFrameState.toString()); + } + } catch (Exception e) { + /* + * At this point, the proc has no place to go. Since we've run into an error its best to just + * unbook it. You can't handle this with a roll back because the record existed before any + * transactions started. + */ + logger.warn("An error occured when procssing " + "frame complete message, " + + CueExceptionUtil.getStackTrace(e)); + try { + dispatchSupport.unbookProc(proc, "an error occured when procssing frame complete message."); + } catch (EmptyResultDataAccessException ee) { + logger.info("Failed to find proc to unbook after frame " + "complete message " + + CueExceptionUtil.getStackTrace(ee)); + } } - - public boolean isShutdown() { - return shutdown; + } + + /** + * Determines the new FrameState for a frame based on values contained in the FrameCompleteReport + * + * If the frame is Waiting or Eaten, then it was manually set to that status before the frame was + * killed. In that case whatever the current state in the DB is the one we want to use. + * + * If the frame status is dead or the frame.exitStatus is a non-zero value, and the frame has been + * retried job.maxRetries times, then the frame is Dead. If the frame has an exit status of 256, + * that is a non-retry status, the frame is dead. + * + * Assuming the two previous checks are not true, then a non-zero exit status sets the frame back + * to Waiting, while a zero status sets the frame to Succeeded. + * + * @param job + * @param frame + * @param report + * @return + */ + public static final FrameState determineFrameState(DispatchJob job, LayerDetail layer, + DispatchFrame frame, FrameCompleteReport report) { + + if (EnumSet.of(FrameState.WAITING, FrameState.EATEN).contains(frame.state)) { + return frame.state; } - - public synchronized void shutdown() { - logger.info("Shutting down FrameCompleteHandler."); - shutdown = true; + // Checks for frames that have reached max retries. + else if (frame.state.equals(FrameState.DEAD)) { + if (job.autoEat) { + return FrameState.EATEN; + } else { + return FrameState.DEPEND; + } + } else if (report.getExitStatus() != 0) { + + long r = System.currentTimeMillis() / 1000; + long lastUpdate = (r - report.getFrame().getLluTime()) / 60; + + FrameState newState = FrameState.WAITING; + if (report.getExitStatus() == FrameExitStatus.SKIP_RETRY_VALUE + || (job.maxRetries != 0 && report.getExitSignal() == 119)) { + report = FrameCompleteReport.newBuilder(report) + .setExitStatus(FrameExitStatus.SKIP_RETRY_VALUE).build(); + newState = FrameState.WAITING; + // exemption code 256 + } else if ((report.getExitStatus() == FrameExitStatus.FAILED_LAUNCH_VALUE + || report.getExitSignal() == FrameExitStatus.FAILED_LAUNCH_VALUE) + && (frame.retries < job.maxRetries)) { + report = + FrameCompleteReport.newBuilder(report).setExitStatus(report.getExitStatus()).build(); + newState = FrameState.WAITING; + } else if (job.autoEat) { + newState = FrameState.EATEN; + // ETC Time out and LLU timeout + } else if (layer.timeout_llu != 0 && report.getFrame().getLluTime() != 0 + && lastUpdate > (layer.timeout_llu - 1)) { + newState = FrameState.DEAD; + } else if (layer.timeout != 0 && report.getRunTime() > layer.timeout * 60) { + newState = FrameState.DEAD; + } else if (report.getRunTime() > Dispatcher.FRAME_TIME_NO_RETRY) { + newState = FrameState.DEAD; + } else if (frame.retries >= job.maxRetries) { + if (!(report.getExitStatus() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE + || report.getExitSignal() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE + || report.getExitStatus() == Dispatcher.DOCKER_EXIT_STATUS_MEMORY_FAILURE)) + newState = FrameState.DEAD; + } + + return newState; + } else { + return FrameState.SUCCEEDED; } + } - public HostManager getHostManager() { - return hostManager; - } + public boolean isShutdown() { + return shutdown; + } - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; - } + public synchronized void shutdown() { + logger.info("Shutting down FrameCompleteHandler."); + shutdown = true; + } - public JobManager getJobManager() { - return jobManager; - } + public HostManager getHostManager() { + return hostManager; + } - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; + } - public RedirectManager getRedirectManager() { - return redirectManager; - } + public JobManager getJobManager() { + return jobManager; + } - public void setRedirectManager(RedirectManager redirectManager) { - this.redirectManager = redirectManager; - } + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } - public DispatchQueue getDispatchQueue() { - return dispatchQueue; - } + public RedirectManager getRedirectManager() { + return redirectManager; + } - public void setDispatchQueue(DispatchQueue dispatchQueue) { - this.dispatchQueue = dispatchQueue; - } + public void setRedirectManager(RedirectManager redirectManager) { + this.redirectManager = redirectManager; + } - public BookingQueue getBookingQueue() { - return bookingQueue; - } + public DispatchQueue getDispatchQueue() { + return dispatchQueue; + } - public void setBookingQueue(BookingQueue bookingQueue) { - this.bookingQueue = bookingQueue; - } + public void setDispatchQueue(DispatchQueue dispatchQueue) { + this.dispatchQueue = dispatchQueue; + } - public Dispatcher getDispatcher() { - return dispatcher; - } + public BookingQueue getBookingQueue() { + return bookingQueue; + } - public void setDispatcher(Dispatcher dispatcher) { - this.dispatcher = dispatcher; - } + public void setBookingQueue(BookingQueue bookingQueue) { + this.bookingQueue = bookingQueue; + } - public JobManagerSupport getJobManagerSupport() { - return jobManagerSupport; - } + public Dispatcher getDispatcher() { + return dispatcher; + } - public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { - this.jobManagerSupport = jobManagerSupport; - } + public void setDispatcher(Dispatcher dispatcher) { + this.dispatcher = dispatcher; + } - public DispatchSupport getDispatchSupport() { - return dispatchSupport; - } + public JobManagerSupport getJobManagerSupport() { + return jobManagerSupport; + } - public void setDispatchSupport(DispatchSupport dispatchSupport) { - this.dispatchSupport = dispatchSupport; - } + public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { + this.jobManagerSupport = jobManagerSupport; + } - public Dispatcher getLocalDispatcher() { - return localDispatcher; - } + public DispatchSupport getDispatchSupport() { + return dispatchSupport; + } - public void setLocalDispatcher(Dispatcher localDispatcher) { - this.localDispatcher = localDispatcher; - } + public void setDispatchSupport(DispatchSupport dispatchSupport) { + this.dispatchSupport = dispatchSupport; + } - public BookingManager getBookingManager() { - return bookingManager; - } + public Dispatcher getLocalDispatcher() { + return localDispatcher; + } - public void setBookingManager(BookingManager bookingManager) { - this.bookingManager = bookingManager; - } + public void setLocalDispatcher(Dispatcher localDispatcher) { + this.localDispatcher = localDispatcher; + } - public JmsMover getJmsMover() { - return jsmMover; - } + public BookingManager getBookingManager() { + return bookingManager; + } - public void setJmsMover(JmsMover jsmMover) { - this.jsmMover = jsmMover; - } + public void setBookingManager(BookingManager bookingManager) { + this.bookingManager = bookingManager; + } - public WhiteboardDao getWhiteboardDao() { return whiteboardDao; } + public JmsMover getJmsMover() { + return jsmMover; + } - public void setWhiteboardDao(WhiteboardDao whiteboardDao) { - this.whiteboardDao = whiteboardDao; } + public void setJmsMover(JmsMover jsmMover) { + this.jsmMover = jsmMover; + } - public ServiceDao getServiceDao() { return serviceDao; } + public WhiteboardDao getWhiteboardDao() { + return whiteboardDao; + } - public void setServiceDao(ServiceDao serviceDao) { - this.serviceDao = serviceDao; } + public void setWhiteboardDao(WhiteboardDao whiteboardDao) { + this.whiteboardDao = whiteboardDao; + } - public ShowDao getShowDao() { return showDao; } + public ServiceDao getServiceDao() { + return serviceDao; + } - public void setShowDao(ShowDao showDao) { - this.showDao = showDao; } + public void setServiceDao(ServiceDao serviceDao) { + this.serviceDao = serviceDao; + } -} + public ShowDao getShowDao() { + return showDao; + } + public void setShowDao(ShowDao showDao) { + this.showDao = showDao; + } +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameLookupException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameLookupException.java index aa5e71048..038530499 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameLookupException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameLookupException.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; import com.imageworks.spcue.SpcueRuntimeException; @@ -24,24 +20,23 @@ @SuppressWarnings("serial") public class FrameLookupException extends SpcueRuntimeException { - public FrameLookupException() { - // TODO Auto-generated constructor stub - } + public FrameLookupException() { + // TODO Auto-generated constructor stub + } - public FrameLookupException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public FrameLookupException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public FrameLookupException(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public FrameLookupException(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public FrameLookupException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public FrameLookupException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameReservationException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameReservationException.java index cd80e6578..989c4c435 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameReservationException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameReservationException.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; import com.imageworks.spcue.SpcueRuntimeException; @@ -24,24 +20,23 @@ @SuppressWarnings("serial") public class FrameReservationException extends SpcueRuntimeException { - public FrameReservationException() { - // TODO Auto-generated constructor stub - } + public FrameReservationException() { + // TODO Auto-generated constructor stub + } - public FrameReservationException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public FrameReservationException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public FrameReservationException(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public FrameReservationException(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public FrameReservationException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public FrameReservationException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HealthyThreadPool.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HealthyThreadPool.java index 52f9f2728..0a6301044 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HealthyThreadPool.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HealthyThreadPool.java @@ -16,223 +16,202 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; - -/*** - * A ThreadPoolExecutor with two additional features: - * - Handles repeated tasks by always keeping the latest version - * - With isHealthyOrShutdown, the threadpool will drain and clear resources when unhealthy +/** + * A ThreadPoolExecutor with two additional features: - Handles repeated tasks by always keeping the + * latest version - With isHealthyOrShutdown, the threadpool will drain and clear resources when + * unhealthy * */ public class HealthyThreadPool extends ThreadPoolExecutor { - // The service need s to be unhealthy for this period of time to report - private static final Logger logger = LogManager.getLogger("HEALTH"); - // Threshold to consider healthy or unhealthy - private final int healthThreshold; - private final int poolSize; - private final int minUnhealthyPeriodMin; - private final QueueRejectCounter rejectCounter = new QueueRejectCounter(); - private final Cache taskCache; - private final String name; - private Date lastCheck = new Date(); - private boolean wasHealthy = true; - protected final AtomicBoolean isShutdown = new AtomicBoolean(false); - private final int baseSleepTimeMillis; - - /** - * Start a thread pool - * @param name For logging purposes - * @param healthThreshold Percentage that should be available to consider healthy - * @param minUnhealthyPeriodMin Period in min to consider a queue unhealthy - * @param poolSize how many jobs can be queued - * @param threadsMinimum Minimum number of threads - * @param threadsMaximum Maximum number of threads to grow to - */ - public HealthyThreadPool(String name, - int healthThreshold, - int minUnhealthyPeriodMin, - int poolSize, - int threadsMinimum, - int threadsMaximum) { - this(name, healthThreshold, minUnhealthyPeriodMin, poolSize, - threadsMinimum, threadsMaximum, 0); - } - - /** - * Start a thread pool - * - * @param name For logging purposes - * @param healthThreshold Percentage that should be available to consider healthy - * @param minUnhealthyPeriodMin Period in min to consider a queue unhealthy - * @param poolSize how many jobs can be queued - * @param threadsMinimum Minimum number of threads - * @param threadsMaximum Maximum number of threads to grow to - * @param baseSleepTimeMillis Time a thread should sleep when the service is not under pressure - */ - public HealthyThreadPool(String name, - int healthThreshold, - int minUnhealthyPeriodMin, - int poolSize, - int threadsMinimum, - int threadsMaximum, - int baseSleepTimeMillis) { - super(threadsMinimum, threadsMaximum, 10, - TimeUnit.SECONDS, new LinkedBlockingQueue(poolSize)); - - logger.debug(name + ": Starting a new HealthyThreadPool"); - this.name = name; - this.healthThreshold = healthThreshold; - this.poolSize = poolSize; - this.minUnhealthyPeriodMin = minUnhealthyPeriodMin; - this.baseSleepTimeMillis = baseSleepTimeMillis; - this.setRejectedExecutionHandler(rejectCounter); - - this.taskCache = CacheBuilder.newBuilder() - .expireAfterWrite(3, TimeUnit.MINUTES) - // Invalidate entries that got executed by the threadPool and lost their reference - .weakValues() - .concurrencyLevel(threadsMaximum) - .build(); - } - - public void execute(KeyRunnable r) { - if (isShutdown.get()) { - logger.info(name + ": Task ignored, queue on hold or shutdown"); - return; - } - if (taskCache.getIfPresent(r.getKey()) == null){ - taskCache.put(r.getKey(), r); - super.execute(r); - } + // The service need s to be unhealthy for this period of time to report + private static final Logger logger = LogManager.getLogger("HEALTH"); + // Threshold to consider healthy or unhealthy + private final int healthThreshold; + private final int poolSize; + private final int minUnhealthyPeriodMin; + private final QueueRejectCounter rejectCounter = new QueueRejectCounter(); + private final Cache taskCache; + private final String name; + private Date lastCheck = new Date(); + private boolean wasHealthy = true; + protected final AtomicBoolean isShutdown = new AtomicBoolean(false); + private final int baseSleepTimeMillis; + + /** + * Start a thread pool + * + * @param name For logging purposes + * @param healthThreshold Percentage that should be available to consider healthy + * @param minUnhealthyPeriodMin Period in min to consider a queue unhealthy + * @param poolSize how many jobs can be queued + * @param threadsMinimum Minimum number of threads + * @param threadsMaximum Maximum number of threads to grow to + */ + public HealthyThreadPool(String name, int healthThreshold, int minUnhealthyPeriodMin, + int poolSize, int threadsMinimum, int threadsMaximum) { + this(name, healthThreshold, minUnhealthyPeriodMin, poolSize, threadsMinimum, threadsMaximum, 0); + } + + /** + * Start a thread pool + * + * @param name For logging purposes + * @param healthThreshold Percentage that should be available to consider healthy + * @param minUnhealthyPeriodMin Period in min to consider a queue unhealthy + * @param poolSize how many jobs can be queued + * @param threadsMinimum Minimum number of threads + * @param threadsMaximum Maximum number of threads to grow to + * @param baseSleepTimeMillis Time a thread should sleep when the service is not under pressure + */ + public HealthyThreadPool(String name, int healthThreshold, int minUnhealthyPeriodMin, + int poolSize, int threadsMinimum, int threadsMaximum, int baseSleepTimeMillis) { + super(threadsMinimum, threadsMaximum, 10, TimeUnit.SECONDS, + new LinkedBlockingQueue(poolSize)); + + logger.debug(name + ": Starting a new HealthyThreadPool"); + this.name = name; + this.healthThreshold = healthThreshold; + this.poolSize = poolSize; + this.minUnhealthyPeriodMin = minUnhealthyPeriodMin; + this.baseSleepTimeMillis = baseSleepTimeMillis; + this.setRejectedExecutionHandler(rejectCounter); + + this.taskCache = CacheBuilder.newBuilder().expireAfterWrite(3, TimeUnit.MINUTES) + // Invalidate entries that got executed by the threadPool and lost their + // reference + .weakValues().concurrencyLevel(threadsMaximum).build(); + } + + public void execute(KeyRunnable r) { + if (isShutdown.get()) { + logger.info(name + ": Task ignored, queue on hold or shutdown"); + return; } - - public long getRejectedTaskCount() { - return rejectCounter.getRejectCount(); + if (taskCache.getIfPresent(r.getKey()) == null) { + taskCache.put(r.getKey(), r); + super.execute(r); } - - /** - * Monitor if the queue is unhealthy for MIN_UNHEALTHY_PERIOD_MIN - * - * If unhealthy, the service will start the shutdown process and the - * caller is responsible for starting a new instance after the lock on - * awaitTermination is released. - */ - protected boolean shutdownUnhealthy() throws InterruptedException { - Date now = new Date(); - if (diffInMinutes(lastCheck, now) > minUnhealthyPeriodMin){ - this.wasHealthy = healthCheck(); - this.lastCheck = now; - } - - if(healthCheck() || wasHealthy) { - logger.debug(name + ": healthy (" + - "Remaining Capacity: " + this.getQueue().remainingCapacity() + - ", Running: " + this.getActiveCount() + - ", Total Executed: " + this.getCompletedTaskCount() + - ")"); - return true; - } - else if (isShutdown.get()) { - logger.warn("Queue shutting down"); - return false; - } - else { - logger.warn(name + ": unhealthy, starting shutdown)"); - threadDump(); - - isShutdown.set(true); - super.shutdownNow(); - logger.warn(name + ": Awaiting unhealthy queue termination"); - if (super.awaitTermination(1, TimeUnit.MINUTES)){ - logger.info(name + ": Terminated successfully"); - } - else { - logger.warn(name + ": Failed to terminate"); - } - // Threads will eventually terminate, proceed - taskCache.invalidateAll(); - return false; - } + } + + public long getRejectedTaskCount() { + return rejectCounter.getRejectCount(); + } + + /** + * Monitor if the queue is unhealthy for MIN_UNHEALTHY_PERIOD_MIN + * + * If unhealthy, the service will start the shutdown process and the caller is responsible for + * starting a new instance after the lock on awaitTermination is released. + */ + protected boolean shutdownUnhealthy() throws InterruptedException { + Date now = new Date(); + if (diffInMinutes(lastCheck, now) > minUnhealthyPeriodMin) { + this.wasHealthy = healthCheck(); + this.lastCheck = now; } - private void threadDump() { - ThreadMXBean mx = ManagementFactory.getThreadMXBean(); - for(ThreadInfo info : mx.dumpAllThreads(true, true)){ - logger.debug(info.toString()); - } + if (healthCheck() || wasHealthy) { + logger.debug(name + ": healthy (" + "Remaining Capacity: " + + this.getQueue().remainingCapacity() + ", Running: " + this.getActiveCount() + + ", Total Executed: " + this.getCompletedTaskCount() + ")"); + return true; + } else if (isShutdown.get()) { + logger.warn("Queue shutting down"); + return false; + } else { + logger.warn(name + ": unhealthy, starting shutdown)"); + threadDump(); + + isShutdown.set(true); + super.shutdownNow(); + logger.warn(name + ": Awaiting unhealthy queue termination"); + if (super.awaitTermination(1, TimeUnit.MINUTES)) { + logger.info(name + ": Terminated successfully"); + } else { + logger.warn(name + ": Failed to terminate"); + } + // Threads will eventually terminate, proceed + taskCache.invalidateAll(); + return false; } + } - private static long diffInMinutes(Date dateStart, Date dateEnd) { - return TimeUnit.MINUTES.convert( - dateEnd.getTime() - dateStart.getTime(), - TimeUnit.MILLISECONDS - ); + private void threadDump() { + ThreadMXBean mx = ManagementFactory.getThreadMXBean(); + for (ThreadInfo info : mx.dumpAllThreads(true, true)) { + logger.debug(info.toString()); } - - /** - * Lowers the sleep time as the queue grows. - * - * @return - */ - public int sleepTime() { - if (!isShutdown.get()) { - int sleep = (int) (baseSleepTimeMillis - (((this.getQueue().size () / - (float) this.poolSize) * baseSleepTimeMillis)) * 2); - if (sleep < 0) { - sleep = 0; - } - return sleep; - } else { - return 0; - } + } + + private static long diffInMinutes(Date dateStart, Date dateEnd) { + return TimeUnit.MINUTES.convert(dateEnd.getTime() - dateStart.getTime(), TimeUnit.MILLISECONDS); + } + + /** + * Lowers the sleep time as the queue grows. + * + * @return + */ + public int sleepTime() { + if (!isShutdown.get()) { + int sleep = (int) (baseSleepTimeMillis + - (((this.getQueue().size() / (float) this.poolSize) * baseSleepTimeMillis)) * 2); + if (sleep < 0) { + sleep = 0; + } + return sleep; + } else { + return 0; } - - @Override - protected void beforeExecute(Thread t, Runnable r) { - super.beforeExecute(t, r); - if (isShutdown()) { - this.remove(r); - } else { - if (baseSleepTimeMillis > 0) { - try { - Thread.sleep(sleepTime()); - } catch (InterruptedException e) { - logger.info(name + ": booking queue was interrupted."); - } - } + } + + @Override + protected void beforeExecute(Thread t, Runnable r) { + super.beforeExecute(t, r); + if (isShutdown()) { + this.remove(r); + } else { + if (baseSleepTimeMillis > 0) { + try { + Thread.sleep(sleepTime()); + } catch (InterruptedException e) { + logger.info(name + ": booking queue was interrupted."); } + } } - - @Override - protected void afterExecute(Runnable r, Throwable t) { - super.afterExecute(r, t); - - // Invalidate cache to avoid having to wait for GC to mark processed entries collectible - KeyRunnable h = (KeyRunnable)r; - taskCache.invalidate(h.getKey()); - } - - protected boolean healthCheck() { - return (this.getQueue().remainingCapacity() > 0) || - (getRejectedTaskCount() < this.poolSize / healthThreshold); - } - - public void shutdown() { - if (!isShutdown.getAndSet(true)) { - logger.info("Shutting down thread pool " + name + ", currently " - + getActiveCount() + " active threads."); - final long startTime = System.currentTimeMillis(); - while (this.getQueue().size() != 0 && this.getActiveCount() != 0) { - try { - if (System.currentTimeMillis() - startTime > 10000) { - throw new InterruptedException(name - + " thread pool failed to shutdown properly"); - } - Thread.sleep(250); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - break; - } - } + } + + @Override + protected void afterExecute(Runnable r, Throwable t) { + super.afterExecute(r, t); + + // Invalidate cache to avoid having to wait for GC to mark processed entries + // collectible + KeyRunnable h = (KeyRunnable) r; + taskCache.invalidate(h.getKey()); + } + + protected boolean healthCheck() { + return (this.getQueue().remainingCapacity() > 0) + || (getRejectedTaskCount() < this.poolSize / healthThreshold); + } + + public void shutdown() { + if (!isShutdown.getAndSet(true)) { + logger.info("Shutting down thread pool " + name + ", currently " + getActiveCount() + + " active threads."); + final long startTime = System.currentTimeMillis(); + while (this.getQueue().size() != 0 && this.getActiveCount() != 0) { + try { + if (System.currentTimeMillis() - startTime > 10000) { + throw new InterruptedException(name + " thread pool failed to shutdown properly"); + } + Thread.sleep(250); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + break; } + } } + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportHandler.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportHandler.java index 98a9df495..4f0a55bc0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportHandler.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportHandler.java @@ -1,17 +1,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.dispatcher; @@ -72,1415 +70,998 @@ public class HostReportHandler { - private static final Logger logger = LogManager.getLogger( - HostReportHandler.class - ); - - private BookingManager bookingManager; - private HostManager hostManager; - private BookingQueue bookingQueue; - private ThreadPoolExecutor reportQueue; - private ThreadPoolExecutor killQueue; - private DispatchSupport dispatchSupport; - private Dispatcher dispatcher; - private Dispatcher localDispatcher; - private RqdClient rqdClient; - private JobManager jobManager; - private JobDao jobDao; - private LayerDao layerDao; - - @Autowired - private Environment env; - - @Autowired - private CommentManager commentManager; - - @Autowired - private PrometheusMetricsCollector prometheusMetrics; - - // Comment constants - private static final String SUBJECT_COMMENT_FULL_TEMP_DIR = - "Host set to REPAIR for not having enough storage " + - "space on the temporary directory (mcp)"; - private static final String CUEBOT_COMMENT_USER = "cuebot"; - private static final String WINDOWS_OS = "Windows"; - - // A cache to store kill requests and count the number of occurrences. - // The cache expires after write to avoid growing unbounded. If a request for a host-frame doesn't appear - // for a period of time, the entry will be removed. - Cache killRequestCounterCache = CacheBuilder.newBuilder() - .expireAfterWrite( - FRAME_KILL_CACHE_EXPIRE_AFTER_WRITE_MINUTES, - TimeUnit.MINUTES - ) - .build(); - - /** - * Boolean to toggle if this class is accepting data or not. - */ - public boolean shutdown = false; - - /** - * Return true if this handler is not accepting packets anymore. - * @return - */ - public boolean isShutdown() { - return shutdown; + private static final Logger logger = LogManager.getLogger(HostReportHandler.class); + + private BookingManager bookingManager; + private HostManager hostManager; + private BookingQueue bookingQueue; + private ThreadPoolExecutor reportQueue; + private ThreadPoolExecutor killQueue; + private DispatchSupport dispatchSupport; + private Dispatcher dispatcher; + private Dispatcher localDispatcher; + private RqdClient rqdClient; + private JobManager jobManager; + private JobDao jobDao; + private LayerDao layerDao; + + @Autowired + private Environment env; + + @Autowired + private CommentManager commentManager; + + @Autowired + private PrometheusMetricsCollector prometheusMetrics; + + // Comment constants + private static final String SUBJECT_COMMENT_FULL_TEMP_DIR = + "Host set to REPAIR for not having enough storage " + + "space on the temporary directory (mcp)"; + private static final String CUEBOT_COMMENT_USER = "cuebot"; + private static final String WINDOWS_OS = "Windows"; + + // A cache to store kill requests and count the number + // of occurrences. + // The cache expires after write to avoid growing unbounded. If a request for a + // host-frame doesn't appear + // for a period of time, the entry will be removed. + Cache killRequestCounterCache = CacheBuilder.newBuilder() + .expireAfterWrite(FRAME_KILL_CACHE_EXPIRE_AFTER_WRITE_MINUTES, TimeUnit.MINUTES).build(); + + /** + * Boolean to toggle if this class is accepting data or not. + */ + public boolean shutdown = false; + + /** + * Return true if this handler is not accepting packets anymore. + * + * @return + */ + public boolean isShutdown() { + return shutdown; + } + + /** + * Shutdown this handler so it no longer accepts packets. Any call to queue a host report will + * throw an exception. + */ + public synchronized void shutdown() { + logger.info("Shutting down HostReportHandler."); + shutdown = true; + } + + /** + * Queues up the given boot report. + * + * @param report + */ + public void queueBootReport(BootReport report) { + if (isShutdown()) { + throw new RqdRetryReportException( + "Error processing host report. Cuebot not " + "accepting packets."); } - - /** - * Shutdown this handler so it no longer accepts packets. Any - * call to queue a host report will throw an exception. - */ - public synchronized void shutdown() { - logger.info("Shutting down HostReportHandler."); - shutdown = true; + reportQueue.execute(new DispatchHandleHostReport(report, this)); + } + + /** + * Queues up the given host report. + * + * @param report + */ + public void queueHostReport(HostReport report) { + if (isShutdown()) { + throw new RqdRetryReportException( + "Error processing host report. Cuebot not " + "accepting packets."); } - - /** - * Queues up the given boot report. - * - * @param report - */ - public void queueBootReport(BootReport report) { - if (isShutdown()) { - throw new RqdRetryReportException( - "Error processing host report. Cuebot not " + - "accepting packets." - ); + reportQueue.execute(new DispatchHandleHostReport(report, this)); + } + + public void handleHostReport(HostReport report, boolean isBoot) { + long startTime = System.currentTimeMillis(); + try { + long swapOut = 0; + if (report.getHost().getAttributesMap().containsKey("swapout")) { + swapOut = Integer.parseInt(report.getHost().getAttributesMap().get("swapout")); + if (swapOut > 0) + logger.info(report.getHost().getName() + " swapout: " + + report.getHost().getAttributesMap().get("swapout")); + } + + DispatchHost host; + RenderHost rhost = report.getHost(); + try { + host = hostManager.findDispatchHost(rhost.getName()); + hostManager.setHostStatistics(host, rhost.getTotalMem(), rhost.getFreeMem(), + rhost.getTotalSwap(), rhost.getFreeSwap(), rhost.getTotalMcp(), rhost.getFreeMcp(), + rhost.getTotalGpuMem(), rhost.getFreeGpuMem(), rhost.getLoad(), + new Timestamp(rhost.getBootTime() * 1000l), rhost.getAttributesMap().get("SP_OS")); + + // Both logics are conflicting, only change hardware state if + // there was no need for a tempDirStorage state change + if (!changeStateForTempDirStorage(host, report.getHost())) { + changeHardwareState(host, report.getHost().getState(), isBoot); } - reportQueue.execute(new DispatchHandleHostReport(report, this)); - } + changeNimbyState(host, report.getHost()); - /** - * Queues up the given host report. - * - * @param report - */ - public void queueHostReport(HostReport report) { - if (isShutdown()) { - throw new RqdRetryReportException( - "Error processing host report. Cuebot not " + - "accepting packets." - ); + /** + * This should only happen at boot time or it will fight with the dispatcher over row locks. + */ + if (isBoot) { + hostManager.setHostResources(host, report); } - reportQueue.execute(new DispatchHandleHostReport(report, this)); - } - public void handleHostReport(HostReport report, boolean isBoot) { - long startTime = System.currentTimeMillis(); - try { - long swapOut = 0; - if (report.getHost().getAttributesMap().containsKey("swapout")) { - swapOut = Integer.parseInt( - report.getHost().getAttributesMap().get("swapout") - ); - if (swapOut > 0) logger.info( - report.getHost().getName() + - " swapout: " + - report.getHost().getAttributesMap().get("swapout") - ); - } - - DispatchHost host; - RenderHost rhost = report.getHost(); - try { - host = hostManager.findDispatchHost(rhost.getName()); - hostManager.setHostStatistics( - host, - rhost.getTotalMem(), - rhost.getFreeMem(), - rhost.getTotalSwap(), - rhost.getFreeSwap(), - rhost.getTotalMcp(), - rhost.getFreeMcp(), - rhost.getTotalGpuMem(), - rhost.getFreeGpuMem(), - rhost.getLoad(), - new Timestamp(rhost.getBootTime() * 1000l), - rhost.getAttributesMap().get("SP_OS") - ); - - // Both logics are conflicting, only change hardware state if - // there was no need for a tempDirStorage state change - if (!changeStateForTempDirStorage(host, report.getHost())) { - changeHardwareState( - host, - report.getHost().getState(), - isBoot - ); - } - changeNimbyState(host, report.getHost()); - - /** - * This should only happen at boot time or it will - * fight with the dispatcher over row locks. - */ - if (isBoot) { - hostManager.setHostResources(host, report); - } - - dispatchSupport.determineIdleCores( - host, - report.getHost().getLoad() - ); - } catch (DataAccessException dae) { - logger.info( - "Unable to find host " + - rhost.getName() + - "," + - dae + - " , creating host." - ); - // TODO: Skip adding it if the host name is over 30 characters - - host = hostManager.createHost(report); - } catch (Exception e) { - logger.warn("Error processing HostReport, " + e); - return; - } - - /* - * Verify all the frames in the report are valid. - * Frames that are not valid are removed. - */ - List runningFrames = verifyRunningFrameInfo( - report - ); - - /* - * Updates memory usage for the proc, frames, - * jobs, and layers. And LLU time for the frames. - */ - updateMemoryUsageAndLluTime(runningFrames); - - /* - * kill frames that have over run. - */ - killTimedOutFrames(runningFrames, report.getHost().getName()); - - /* - * Prevent OOM (Out-Of-Memory) issues on the host and manage frame reserved memory - */ - handleMemoryUsage(host, report.getHost(), runningFrames); - - /* - * The checks are done in order of least CPU intensive to - * most CPU intensive, saving checks that hit the DB for last. - * - * These are done so we don't populate the booking queue with - * a bunch of hosts that can't be booked. - */ - String msg = null; - boolean hasLocalJob = bookingManager.hasLocalHostAssignment(host); - int coresToReserve = host.handleNegativeCoresRequirement( - Dispatcher.CORE_POINTS_RESERVED_MIN - ); - - if (hasLocalJob) { - List lcas = - bookingManager.getLocalHostAssignment(host); - for (LocalHostAssignment lca : lcas) { - bookingManager.removeInactiveLocalHostAssignment(lca); - } - } - long memReservedMin = env.getRequiredProperty( - "dispatcher.memory.mem_reserved_min", - Long.class - ); - - if ( - !isTempDirStorageEnough( - report.getHost().getTotalMcp(), - report.getHost().getFreeMcp(), - host.getOs() - ) - ) { - msg = String.format( - "%s doesn't have enough free space in the temporary directory (mcp), %dMB", - host.name, - (report.getHost().getFreeMcp() / 1024) - ); - } else if ( - coresToReserve <= 0 || - host.idleCores < Dispatcher.CORE_POINTS_RESERVED_MIN - ) { - msg = String.format( - "%s doesn't have enough idle cores, %d needs %d", - host.name, - host.idleCores, - Dispatcher.CORE_POINTS_RESERVED_MIN - ); - } else if (host.idleMemory < memReservedMin) { - msg = String.format( - "%s doesn't have enough idle memory, %d needs %d", - host.name, - host.idleMemory, - memReservedMin - ); - } else if (report.getHost().getFreeMem() < CueUtil.MB512) { - msg = String.format( - "%s doesn't have enough free system mem, %d needs %d", - host.name, - report.getHost().getFreeMem(), - memReservedMin - ); - } else if (!host.hardwareState.equals(HardwareState.UP)) { - msg = host + " is not in the Up state."; - } else if (host.lockState.equals(LockState.LOCKED)) { - msg = host + " is locked."; - } else if (report.getHost().getNimbyLocked()) { - if (!hasLocalJob) { - msg = host + " is NIMBY locked."; - } - } else if (!dispatchSupport.isCueBookable(host)) { - msg = "The cue has no pending jobs"; - } - - /* - * If a message was set, the host is not bookable. Log - * the message and move on. - */ - if (msg != null) { - logger.trace(msg); - } else { - // check again. The dangling local host assignment could be removed. - hasLocalJob = bookingManager.hasLocalHostAssignment(host); - - /* - * Check to see if a local job has been assigned. - */ - if (hasLocalJob) { - if (!bookingManager.hasResourceDeficit(host)) { - bookingQueue.execute( - new DispatchBookHostLocal(host, localDispatcher) - ); - } - return; - } - - /* - * Check if the host prefers a show. If it does , dispatch - * to that show first. - */ - if (hostManager.isPreferShow(host)) { - bookingQueue.execute( - new DispatchBookHost( - host, - hostManager.getPreferredShow(host), - dispatcher, - env - ) - ); - return; - } - - bookingQueue.execute( - new DispatchBookHost(host, dispatcher, env) - ); - } - } finally { - if ( - reportQueue.getQueue().size() > 0 || - System.currentTimeMillis() - startTime > 100 - ) { - /* - * Write a log if the host report takes a long time to process. - */ - CueUtil.logDuration( - startTime, - "host report " + - report.getHost().getName() + - " with " + - report.getFramesCount() + - " running frames, waiting: " + - reportQueue.getQueue().size() - ); - } + dispatchSupport.determineIdleCores(host, report.getHost().getLoad()); + } catch (DataAccessException dae) { + logger.info("Unable to find host " + rhost.getName() + "," + dae + " , creating host."); + // TODO: Skip adding it if the host name is over 30 characters + + host = hostManager.createHost(report); + } catch (Exception e) { + logger.warn("Error processing HostReport, " + e); + return; + } + + /* + * Verify all the frames in the report are valid. Frames that are not valid are removed. + */ + List runningFrames = verifyRunningFrameInfo(report); + + /* + * Updates memory usage for the proc, frames, jobs, and layers. And LLU time for the frames. + */ + updateMemoryUsageAndLluTime(runningFrames); + + /* + * kill frames that have over run. + */ + killTimedOutFrames(runningFrames, report.getHost().getName()); + + /* + * Prevent OOM (Out-Of-Memory) issues on the host and manage frame reserved memory + */ + handleMemoryUsage(host, report.getHost(), runningFrames); + + /* + * The checks are done in order of least CPU intensive to most CPU intensive, saving checks + * that hit the DB for last. + * + * These are done so we don't populate the booking queue with a bunch of hosts that can't be + * booked. + */ + String msg = null; + boolean hasLocalJob = bookingManager.hasLocalHostAssignment(host); + int coresToReserve = host.handleNegativeCoresRequirement(Dispatcher.CORE_POINTS_RESERVED_MIN); + + if (hasLocalJob) { + List lcas = bookingManager.getLocalHostAssignment(host); + for (LocalHostAssignment lca : lcas) { + bookingManager.removeInactiveLocalHostAssignment(lca); } - } - - /** - * Check if a reported temp storage size and availability is enough for running a job - * - * Use dispatcher.min_available_temp_storage_percentage (opencue.properties) to - * define what's the accepted threshold. Providing hostOs is necessary as this feature - * is currently not available on Windows hosts - * - * @param tempTotalStorage Total storage on the temp directory - * @param tempFreeStorage Free storage on the temp directory - * @param hostOs Reported operational systems - * @return - */ - private boolean isTempDirStorageEnough( - Long tempTotalStorage, - Long tempFreeStorage, - String[] hostOs - ) { - // The minimum amount of free space in the temporary directory to book a host - int minAvailableTempPercentage = env.getRequiredProperty( - "dispatcher.min_available_temp_storage_percentage", - Integer.class - ); - - return ( - minAvailableTempPercentage == -1 || - // It is safe to assume multiple OSs imply windows is not the base OS, - // threfore Windows will always report a single hostOs - (hostOs.length == 1 && hostOs[0].equalsIgnoreCase(WINDOWS_OS)) || - (((tempFreeStorage * 100.0) / tempTotalStorage) >= - minAvailableTempPercentage) - ); - } - - /** - * Update the hardware state property. - * - * If a host pings in with a different hardware state than what - * is currently in the DB, the state is updated. If the hardware - * state is Rebooting or RebootWhenIdle, then state can only be - * updated with a boot report. If the state is Repair, then state is - * never updated via RQD. - * - * @param host - * @param reportState - * @param isBoot - */ - private void changeHardwareState( - DispatchHost host, - HardwareState reportState, - boolean isBoot - ) { - // If the states are the same there is no reason to do this update. - if (host.hardwareState.equals(reportState)) { - return; + } + long memReservedMin = + env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); + + if (!isTempDirStorageEnough(report.getHost().getTotalMcp(), report.getHost().getFreeMcp(), + host.getOs())) { + msg = String.format( + "%s doesn't have enough free space in the temporary directory (mcp), %dMB", host.name, + (report.getHost().getFreeMcp() / 1024)); + } else if (coresToReserve <= 0 || host.idleCores < Dispatcher.CORE_POINTS_RESERVED_MIN) { + msg = String.format("%s doesn't have enough idle cores, %d needs %d", host.name, + host.idleCores, Dispatcher.CORE_POINTS_RESERVED_MIN); + } else if (host.idleMemory < memReservedMin) { + msg = String.format("%s doesn't have enough idle memory, %d needs %d", host.name, + host.idleMemory, memReservedMin); + } else if (report.getHost().getFreeMem() < CueUtil.MB512) { + msg = String.format("%s doesn't have enough free system mem, %d needs %d", host.name, + report.getHost().getFreeMem(), memReservedMin); + } else if (!host.hardwareState.equals(HardwareState.UP)) { + msg = host + " is not in the Up state."; + } else if (host.lockState.equals(LockState.LOCKED)) { + msg = host + " is locked."; + } else if (report.getHost().getNimbyLocked()) { + if (!hasLocalJob) { + msg = host + " is NIMBY locked."; } - - switch (host.hardwareState) { - case DOWN: - hostManager.setHostState(host, HardwareState.UP); - host.hardwareState = HardwareState.UP; - break; - case REBOOTING: - case REBOOT_WHEN_IDLE: - // Rebooting hosts only change to UP when processing a boot report - if (isBoot) { - hostManager.setHostState(host, HardwareState.UP); - host.hardwareState = HardwareState.UP; - } - break; - case REPAIR: - // Do not change the state of the host if its in a repair state. - break; - default: - hostManager.setHostState(host, reportState); - host.hardwareState = reportState; - break; + } else if (!dispatchSupport.isCueBookable(host)) { + msg = "The cue has no pending jobs"; + } + + /* + * If a message was set, the host is not bookable. Log the message and move on. + */ + if (msg != null) { + logger.trace(msg); + } else { + // check again. The dangling local host assignment could be removed. + hasLocalJob = bookingManager.hasLocalHostAssignment(host); + + /* + * Check to see if a local job has been assigned. + */ + if (hasLocalJob) { + if (!bookingManager.hasResourceDeficit(host)) { + bookingQueue.execute(new DispatchBookHostLocal(host, localDispatcher)); + } + return; } - } - /** - * Prevent cue frames from booking on hosts with full temporary directories. - * - * Change host state to REPAIR or UP according to the amount of free space - * in the temporary directory: - * - Set the host state to REPAIR, when the amount of free space in the - * temporary directory is less than the minimum required. - * - Set the host state to UP, when the amount of free space in the temporary directory - * is greater or equal to the minimum required and the host has a comment with - * subject: SUBJECT_COMMENT_FULL_TEMP_DIR - * - * @param host - * @param reportHost - * @return - */ - private boolean changeStateForTempDirStorage( - DispatchHost host, - RenderHost reportHost - ) { - // The minimum amount of free space in the temporary directory to book a host - int minAvailableTempPercentage = env.getRequiredProperty( - "dispatcher.min_available_temp_storage_percentage", - Integer.class - ); - - // Prevent cue frames from booking on hosts with full temporary directories - boolean hasEnoughTempStorage = isTempDirStorageEnough( - reportHost.getTotalMcp(), - reportHost.getFreeMcp(), - host.getOs() - ); - if (!hasEnoughTempStorage && host.hardwareState == HardwareState.UP) { - // Insert a comment indicating that the Host status = Repair with reason = Full temporary directory - CommentDetail c = new CommentDetail(); - c.subject = SUBJECT_COMMENT_FULL_TEMP_DIR; - c.user = CUEBOT_COMMENT_USER; - c.timestamp = null; - long requiredTempMb = (long) (((minAvailableTempPercentage / - 100.0) * - reportHost.getTotalMcp()) / - 1024); - c.message = - "Host " + - host.getName() + - " marked as REPAIR. The current amount of free space in the " + - "temporary directory (mcp) is " + - (reportHost.getFreeMcp() / 1024) + - "MB. It must have at least " + - ((requiredTempMb)) + - "MB of free space in temporary directory"; - commentManager.addComment(host, c); - - // Set the host state to REPAIR - hostManager.setHostState(host, HardwareState.REPAIR); - host.hardwareState = HardwareState.REPAIR; - - return true; - } else if ( - hasEnoughTempStorage && host.hardwareState == HardwareState.REPAIR - ) { - // Check if the host with REPAIR status has comments with subject=SUBJECT_COMMENT_FULL_TEMP_DIR and - // user=CUEBOT_COMMENT_USER and delete the comments, if they exist - boolean commentsDeleted = - commentManager.deleteCommentByHostUserAndSubject( - host, - CUEBOT_COMMENT_USER, - SUBJECT_COMMENT_FULL_TEMP_DIR - ); - - if (commentsDeleted) { - // Set the host state to UP - hostManager.setHostState(host, HardwareState.UP); - host.hardwareState = HardwareState.UP; - return true; - } + /* + * Check if the host prefers a show. If it does , dispatch to that show first. + */ + if (hostManager.isPreferShow(host)) { + bookingQueue.execute( + new DispatchBookHost(host, hostManager.getPreferredShow(host), dispatcher, env)); + return; } - return false; + + bookingQueue.execute(new DispatchBookHost(host, dispatcher, env)); + } + } finally { + if (reportQueue.getQueue().size() > 0 || System.currentTimeMillis() - startTime > 100) { + /* + * Write a log if the host report takes a long time to process. + */ + CueUtil.logDuration(startTime, + "host report " + report.getHost().getName() + " with " + report.getFramesCount() + + " running frames, waiting: " + reportQueue.getQueue().size()); + } + } + } + + /** + * Check if a reported temp storage size and availability is enough for running a job + * + * Use dispatcher.min_available_temp_storage_percentage (opencue.properties) to define what's the + * accepted threshold. Providing hostOs is necessary as this feature is currently not available on + * Windows hosts + * + * @param tempTotalStorage Total storage on the temp directory + * @param tempFreeStorage Free storage on the temp directory + * @param hostOs Reported operational systems + * @return + */ + private boolean isTempDirStorageEnough(Long tempTotalStorage, Long tempFreeStorage, + String[] hostOs) { + // The minimum amount of free space in the temporary directory to book a host + int minAvailableTempPercentage = + env.getRequiredProperty("dispatcher.min_available_temp_storage_percentage", Integer.class); + + return (minAvailableTempPercentage == -1 || + // It is safe to assume multiple OSs imply windows is not the base OS, + // threfore Windows will always report a single hostOs + (hostOs.length == 1 && hostOs[0].equalsIgnoreCase(WINDOWS_OS)) + || (((tempFreeStorage * 100.0) / tempTotalStorage) >= minAvailableTempPercentage)); + } + + /** + * Update the hardware state property. + * + * If a host pings in with a different hardware state than what is currently in the DB, the state + * is updated. If the hardware state is Rebooting or RebootWhenIdle, then state can only be + * updated with a boot report. If the state is Repair, then state is never updated via RQD. + * + * @param host + * @param reportState + * @param isBoot + */ + private void changeHardwareState(DispatchHost host, HardwareState reportState, boolean isBoot) { + // If the states are the same there is no reason to do this update. + if (host.hardwareState.equals(reportState)) { + return; } - /** - * Changes the NIMBY lock state. If the DB indicates a NIMBY lock - * but RQD does not, then the host is unlocked. If the DB indicates - * the host is not locked but RQD indicates it is, the host is locked. - * - * @param host - * @param rh - */ - private void changeNimbyState(DispatchHost host, RenderHost rh) { - if (rh.getNimbyLocked()) { - if (host.lockState.equals(LockState.OPEN)) { - host.lockState = LockState.NIMBY_LOCKED; - hostManager.setHostLock( - host, - LockState.NIMBY_LOCKED, - new Source("NIMBY") - ); - } - } else { - if (host.lockState.equals(LockState.NIMBY_LOCKED)) { - host.lockState = LockState.OPEN; - hostManager.setHostLock( - host, - LockState.OPEN, - new Source("NIMBY") - ); - } + switch (host.hardwareState) { + case DOWN: + hostManager.setHostState(host, HardwareState.UP); + host.hardwareState = HardwareState.UP; + break; + case REBOOTING: + case REBOOT_WHEN_IDLE: + // Rebooting hosts only change to UP when processing a boot report + if (isBoot) { + hostManager.setHostState(host, HardwareState.UP); + host.hardwareState = HardwareState.UP; } + break; + case REPAIR: + // Do not change the state of the host if its in a repair state. + break; + default: + hostManager.setHostState(host, reportState); + host.hardwareState = reportState; + break; + } + } + + /** + * Prevent cue frames from booking on hosts with full temporary directories. + * + * Change host state to REPAIR or UP according to the amount of free space in the temporary + * directory: - Set the host state to REPAIR, when the amount of free space in the temporary + * directory is less than the minimum required. - Set the host state to UP, when the amount of + * free space in the temporary directory is greater or equal to the minimum required and the host + * has a comment with subject: SUBJECT_COMMENT_FULL_TEMP_DIR + * + * @param host + * @param reportHost + * @return + */ + private boolean changeStateForTempDirStorage(DispatchHost host, RenderHost reportHost) { + // The minimum amount of free space in the temporary directory to book a host + int minAvailableTempPercentage = + env.getRequiredProperty("dispatcher.min_available_temp_storage_percentage", Integer.class); + + // Prevent cue frames from booking on hosts with full temporary directories + boolean hasEnoughTempStorage = + isTempDirStorageEnough(reportHost.getTotalMcp(), reportHost.getFreeMcp(), host.getOs()); + if (!hasEnoughTempStorage && host.hardwareState == HardwareState.UP) { + // Insert a comment indicating that the Host status = Repair with reason = Full + // temporary directory + CommentDetail c = new CommentDetail(); + c.subject = SUBJECT_COMMENT_FULL_TEMP_DIR; + c.user = CUEBOT_COMMENT_USER; + c.timestamp = null; + long requiredTempMb = + (long) (((minAvailableTempPercentage / 100.0) * reportHost.getTotalMcp()) / 1024); + c.message = + "Host " + host.getName() + " marked as REPAIR. The current amount of free space in the " + + "temporary directory (mcp) is " + (reportHost.getFreeMcp() / 1024) + + "MB. It must have at least " + ((requiredTempMb)) + + "MB of free space in temporary directory"; + commentManager.addComment(host, c); + + // Set the host state to REPAIR + hostManager.setHostState(host, HardwareState.REPAIR); + host.hardwareState = HardwareState.REPAIR; + + return true; + } else if (hasEnoughTempStorage && host.hardwareState == HardwareState.REPAIR) { + // Check if the host with REPAIR status has comments with + // subject=SUBJECT_COMMENT_FULL_TEMP_DIR and + // user=CUEBOT_COMMENT_USER and delete the comments, if they exist + boolean commentsDeleted = commentManager.deleteCommentByHostUserAndSubject(host, + CUEBOT_COMMENT_USER, SUBJECT_COMMENT_FULL_TEMP_DIR); + + if (commentsDeleted) { + // Set the host state to UP + hostManager.setHostState(host, HardwareState.UP); + host.hardwareState = HardwareState.UP; + return true; + } + } + return false; + } + + /** + * Changes the NIMBY lock state. If the DB indicates a NIMBY lock but RQD does not, then the host + * is unlocked. If the DB indicates the host is not locked but RQD indicates it is, the host is + * locked. + * + * @param host + * @param rh + */ + private void changeNimbyState(DispatchHost host, RenderHost rh) { + if (rh.getNimbyLocked()) { + if (host.lockState.equals(LockState.OPEN)) { + host.lockState = LockState.NIMBY_LOCKED; + hostManager.setHostLock(host, LockState.NIMBY_LOCKED, new Source("NIMBY")); + } + } else { + if (host.lockState.equals(LockState.NIMBY_LOCKED)) { + host.lockState = LockState.OPEN; + hostManager.setHostLock(host, LockState.OPEN, new Source("NIMBY")); + } + } + } + + /** + * Changes the Lock state of the host. Looks at the number of locked cores and sets host to locked + * if all cores are locked. + * + * @param host DispatchHost + * @param coreInfo CoreDetail + */ + private void changeLockState(DispatchHost host, CoreDetail coreInfo) { + if (host.lockState == LockState.LOCKED) { + if (coreInfo.getLockedCores() < coreInfo.getTotalCores()) { + host.lockState = LockState.OPEN; + hostManager.setHostLock(host, LockState.OPEN, new Source("cores")); + } + } else if (coreInfo.getLockedCores() >= coreInfo.getTotalCores()) { + host.lockState = LockState.LOCKED; + hostManager.setHostLock(host, LockState.LOCKED, new Source("cores")); + } + } + + /** + * Prevent host from entering an OOM state where oom-killer might start killing important OS + * processes and frames start using SWAP memory The kill logic will kick in one of the following + * conditions is met: - Host has less than oom_max_safe_used_physical_memory_threshold memory + * available and less than oom_max_safe_used_swap_memory_threshold swap available - A frame is + * taking more than OOM_FRAME_OVERBOARD_PERCENT of what it had reserved For frames that are using + * more than they had reserved but not above the threshold, negotiate expanding the reservations + * with other frames on the same host + * + * @param dispatchHost + * @param report + */ + private void handleMemoryUsage(final DispatchHost dispatchHost, RenderHost renderHost, + List runningFrames) { + // Don't keep memory balances on nimby hosts and hosts with invalid memory + // information + if (dispatchHost.isNimby || renderHost.getTotalMem() <= 0) { + return; } - /** - * Changes the Lock state of the host. Looks at the number of locked cores and sets host to - * locked if all cores are locked. - * - * @param host DispatchHost - * @param coreInfo CoreDetail - */ - private void changeLockState(DispatchHost host, CoreDetail coreInfo) { - if (host.lockState == LockState.LOCKED) { - if (coreInfo.getLockedCores() < coreInfo.getTotalCores()) { - host.lockState = LockState.OPEN; - hostManager.setHostLock( - host, - LockState.OPEN, - new Source("cores") - ); - } - } else if (coreInfo.getLockedCores() >= coreInfo.getTotalCores()) { - host.lockState = LockState.LOCKED; - hostManager.setHostLock( - host, - LockState.LOCKED, - new Source("cores") - ); - } + final double OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD = env.getRequiredProperty( + "dispatcher.oom_max_safe_used_physical_memory_threshold", Double.class); + final double OOM_MAX_SAFE_USED_SWAP_THRESHOLD = + env.getRequiredProperty("dispatcher.oom_max_safe_used_swap_memory_threshold", Double.class); + final double OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD = + env.getRequiredProperty("dispatcher.oom_frame_overboard_allowed_threshold", Double.class); + + Double physMemoryUsageRatio = renderHost.getTotalMem() > 0 + ? 1.0 - renderHost.getFreeMem() / (double) renderHost.getTotalMem() + : 0.0; + + Double swapMemoryUsageRatio = renderHost.getTotalSwap() > 0 + ? 1.0 - renderHost.getFreeSwap() / (double) renderHost.getTotalSwap() + : 0.0; + + // If checking for the swap threshold has been disabled, only memory usage is + // taken into consideration. + // If checking for memory has been disabled, checking for swap isolated is not + // safe, therefore disabled + boolean memoryWarning = false; + if (OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD > 0.0 && OOM_MAX_SAFE_USED_SWAP_THRESHOLD > 0.0 + && !physMemoryUsageRatio.isNaN() && !swapMemoryUsageRatio.isNaN()) { + memoryWarning = physMemoryUsageRatio > OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD + && swapMemoryUsageRatio > OOM_MAX_SAFE_USED_SWAP_THRESHOLD; + } else if (OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD > 0.0 && !physMemoryUsageRatio.isNaN()) { + memoryWarning = physMemoryUsageRatio > OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD; } - /** - * Prevent host from entering an OOM state where oom-killer might start killing - * important OS processes and frames start using SWAP memory - * The kill logic will kick in one of the following conditions is met: - * - Host has less than oom_max_safe_used_physical_memory_threshold memory - * available and less than oom_max_safe_used_swap_memory_threshold swap - * available - * - A frame is taking more than OOM_FRAME_OVERBOARD_PERCENT of what it had - * reserved - * For frames that are using more than they had reserved but not above the - * threshold, negotiate expanding the reservations with other frames on the same - * host - * - * @param dispatchHost - * @param report - */ - private void handleMemoryUsage( - final DispatchHost dispatchHost, - RenderHost renderHost, - List runningFrames - ) { - // Don't keep memory balances on nimby hosts and hosts with invalid memory - // information - if (dispatchHost.isNimby || renderHost.getTotalMem() <= 0) { - return; + if (memoryWarning) { + logger.warn("Memory warning(" + renderHost.getName() + "): physMemoryRatio: " + + physMemoryUsageRatio + ", swapRatio: " + swapMemoryUsageRatio); + // Try to kill frames using swap memory as they are probably performing poorly + long swapUsed = renderHost.getTotalSwap() - renderHost.getFreeSwap(); + long maxSwapUsageAllowed = + (long) (renderHost.getTotalSwap() * OOM_MAX_SAFE_USED_SWAP_THRESHOLD); + + // Sort runningFrames bassed on how much swap they are using + runningFrames.sort(Comparator + .comparingLong((RunningFrameInfo frame) -> frame.getUsedSwapMemory()).reversed()); + + int killAttemptsRemaining = 5; + for (RunningFrameInfo frame : runningFrames) { + // Reached the first frame on the sorted list without swap usage + if (frame.getUsedSwapMemory() <= 0) { + break; } - - final double OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD = - env.getRequiredProperty( - "dispatcher.oom_max_safe_used_physical_memory_threshold", - Double.class - ); - final double OOM_MAX_SAFE_USED_SWAP_THRESHOLD = env.getRequiredProperty( - "dispatcher.oom_max_safe_used_swap_memory_threshold", - Double.class - ); - final double OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD = - env.getRequiredProperty( - "dispatcher.oom_frame_overboard_allowed_threshold", - Double.class - ); - - Double physMemoryUsageRatio = renderHost.getTotalMem() > 0 - ? 1.0 - renderHost.getFreeMem() / (double) renderHost.getTotalMem() - : 0.0; - - Double swapMemoryUsageRatio = renderHost.getTotalSwap() > 0 - ? 1.0 - - renderHost.getFreeSwap() / (double) renderHost.getTotalSwap() - : 0.0; - - // If checking for the swap threshold has been disabled, only memory usage is - // taken into consideration. - // If checking for memory has been disabled, checking for swap isolated is not - // safe, therefore disabled - boolean memoryWarning = false; - if ( - OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD > 0.0 && - OOM_MAX_SAFE_USED_SWAP_THRESHOLD > 0.0 && - !physMemoryUsageRatio.isNaN() && - !swapMemoryUsageRatio.isNaN() - ) { - memoryWarning = - physMemoryUsageRatio > OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD && - swapMemoryUsageRatio > OOM_MAX_SAFE_USED_SWAP_THRESHOLD; - } else if ( - OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD > 0.0 && - !physMemoryUsageRatio.isNaN() - ) { - memoryWarning = - physMemoryUsageRatio > OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD; + if (killProcForMemory(frame.getFrameId(), renderHost.getName(), KillCause.HostUnderOom)) { + swapUsed -= frame.getUsedSwapMemory(); + logger.info("Memory warning(" + renderHost.getName() + "): " + "Killing frame on " + + frame.getJobName() + "." + frame.getFrameName() + ", using too much swap."); } - if (memoryWarning) { - logger.warn( - "Memory warning(" + - renderHost.getName() + - "): physMemoryRatio: " + - physMemoryUsageRatio + - ", swapRatio: " + - swapMemoryUsageRatio - ); - // Try to kill frames using swap memory as they are probably performing poorly - long swapUsed = - renderHost.getTotalSwap() - renderHost.getFreeSwap(); - long maxSwapUsageAllowed = (long) (renderHost.getTotalSwap() * - OOM_MAX_SAFE_USED_SWAP_THRESHOLD); - - // Sort runningFrames bassed on how much swap they are using - runningFrames.sort( - Comparator.comparingLong((RunningFrameInfo frame) -> - frame.getUsedSwapMemory() - ).reversed() - ); - - int killAttemptsRemaining = 5; - for (RunningFrameInfo frame : runningFrames) { - // Reached the first frame on the sorted list without swap usage - if (frame.getUsedSwapMemory() <= 0) { - break; - } - if ( - killProcForMemory( - frame.getFrameId(), - renderHost.getName(), - KillCause.HostUnderOom - ) - ) { - swapUsed -= frame.getUsedSwapMemory(); - logger.info( - "Memory warning(" + - renderHost.getName() + - "): " + - "Killing frame on " + - frame.getJobName() + - "." + - frame.getFrameName() + - ", using too much swap." - ); - } - - killAttemptsRemaining -= 1; - if ( - killAttemptsRemaining <= 0 || - swapUsed <= maxSwapUsageAllowed - ) { - break; - } - } + killAttemptsRemaining -= 1; + if (killAttemptsRemaining <= 0 || swapUsed <= maxSwapUsageAllowed) { + break; + } + } + } else { + // When no mass cleaning was required, check for frames going overboard + // if frames didn't go overboard, manage its reservations trying to increase + // them accordingly + for (final RunningFrameInfo frame : runningFrames) { + if (OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD > 0 && isFrameOverboard(frame)) { + if (!killFrameOverusingMemory(frame, dispatchHost.getName())) { + logger.warn("Frame " + frame.getJobName() + "." + frame.getFrameName() + + " is overboard but could not be killed"); + } } else { - // When no mass cleaning was required, check for frames going overboard - // if frames didn't go overboard, manage its reservations trying to increase - // them accordingly - for (final RunningFrameInfo frame : runningFrames) { - if ( - OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD > 0 && - isFrameOverboard(frame) - ) { - if ( - !killFrameOverusingMemory(frame, dispatchHost.getName()) - ) { - logger.warn( - "Frame " + - frame.getJobName() + - "." + - frame.getFrameName() + - " is overboard but could not be killed" - ); - } - } else { - handleMemoryReservations(frame); - } - } + handleMemoryReservations(frame); } + } } + } - public enum KillCause { - FrameOverboard("This frame is using more memory than it had reserved."), - HostUnderOom("Frame killed by host under OOM pressure"), - FrameTimedOut("Frame timed out"), - FrameLluTimedOut("Frame LLU timed out"), - FrameVerificationFailure("Frame failed to be verified on the database"); + public enum KillCause { + FrameOverboard("This frame is using more memory than it had reserved."), HostUnderOom( + "Frame killed by host under OOM pressure"), FrameTimedOut( + "Frame timed out"), FrameLluTimedOut("Frame LLU timed out"), FrameVerificationFailure( + "Frame failed to be verified on the database"); - private final String message; + private final String message; - private KillCause(String message) { - this.message = message; - } - - @Override - public String toString() { - return message; - } + private KillCause(String message) { + this.message = message; } - private boolean killFrameOverusingMemory( - RunningFrameInfo frame, - String hostname - ) { - try { - VirtualProc proc = hostManager.getVirtualProc( - frame.getResourceId() - ); - - // Don't mess with localDispatch procs - if (proc.isLocalDispatch) { - return false; - } - boolean killed = killProcForMemory( - proc.frameId, - hostname, - KillCause.FrameOverboard - ); - if (killed) { - logger.info( - "Killing frame on " + - frame.getJobName() + - "." + - frame.getFrameName() + - ", using too much memory." - ); - } - return killed; - } catch (EmptyResultDataAccessException e) { - return false; - } + @Override + public String toString() { + return message; } + } - private boolean getKillClearance(String hostname, String frameId) { - String cacheKey = hostname + "-" + frameId; - final int FRAME_KILL_RETRY_LIMIT = env.getRequiredProperty( - "dispatcher.frame_kill_retry_limit", - Integer.class - ); + private boolean killFrameOverusingMemory(RunningFrameInfo frame, String hostname) { + try { + VirtualProc proc = hostManager.getVirtualProc(frame.getResourceId()); - // Cache frame+host receiving a killRequest and count how many times the request is being retried - // meaning rqd is probably failing at attempting to kill the related proc - long cachedCount; + // Don't mess with localDispatch procs + if (proc.isLocalDispatch) { + return false; + } + boolean killed = killProcForMemory(proc.frameId, hostname, KillCause.FrameOverboard); + if (killed) { + logger.info("Killing frame on " + frame.getJobName() + "." + frame.getFrameName() + + ", using too much memory."); + } + return killed; + } catch (EmptyResultDataAccessException e) { + return false; + } + } + + private boolean getKillClearance(String hostname, String frameId) { + String cacheKey = hostname + "-" + frameId; + final int FRAME_KILL_RETRY_LIMIT = + env.getRequiredProperty("dispatcher.frame_kill_retry_limit", Integer.class); + + // Cache frame+host receiving a killRequest and count how many times the request + // is being retried + // meaning rqd is probably failing at attempting to kill the related proc + long cachedCount; + try { + cachedCount = 1 + killRequestCounterCache.get(cacheKey, () -> 0L); + } catch (ExecutionException e) { + return false; + } + killRequestCounterCache.put(cacheKey, cachedCount); + if (cachedCount > FRAME_KILL_RETRY_LIMIT) { + // If the kill retry limit has been reached, notify prometheus of the issue and + // give up + if (!dispatcher.isTestMode()) { try { - cachedCount = 1 + killRequestCounterCache.get(cacheKey, () -> 0L); - } catch (ExecutionException e) { - return false; - } - killRequestCounterCache.put(cacheKey, cachedCount); - if (cachedCount > FRAME_KILL_RETRY_LIMIT) { - // If the kill retry limit has been reached, notify prometheus of the issue and - // give up - if (!dispatcher.isTestMode()) { - try { - FrameInterface frame = jobManager.getFrame(frameId); - JobInterface job = jobManager.getJob(frame.getJobId()); - prometheusMetrics.incrementFrameKillFailureCounter( - hostname, - job.getName(), - frame.getName(), - frameId - ); - } catch (EmptyResultDataAccessException e) { - logger.info( - "Trying to kill a frame that no longer exists: host=" + - hostname + - " frameId=" + - frameId - ); - } - } - return false; + FrameInterface frame = jobManager.getFrame(frameId); + JobInterface job = jobManager.getJob(frame.getJobId()); + prometheusMetrics.incrementFrameKillFailureCounter(hostname, job.getName(), + frame.getName(), frameId); + } catch (EmptyResultDataAccessException e) { + logger.info("Trying to kill a frame that no longer exists: host=" + hostname + " frameId=" + + frameId); } - return true; + } + return false; } + return true; + } - private boolean killProcForMemory( - String frameId, - String hostname, - KillCause killCause - ) { - if (!getKillClearance(hostname, frameId)) { - return false; - } - - FrameInterface frame = jobManager.getFrame(frameId); - if (dispatcher.isTestMode()) { - // Different threads don't share the same database state on the test environment - (new DispatchRqdKillFrameMemory( - hostname, - frame, - killCause.toString(), - rqdClient, - dispatchSupport, - dispatcher.isTestMode() - )).run(); - } else { - try { - killQueue.execute( - new DispatchRqdKillFrameMemory( - hostname, - frame, - killCause.toString(), - rqdClient, - dispatchSupport, - dispatcher.isTestMode() - ) - ); - prometheusMetrics.incrementFrameKilledCounter( - hostname, - killCause - ); - } catch (TaskRejectedException e) { - logger.warn( - "Unable to add a DispatchRqdKillFrame request, task rejected, " + - e - ); - return false; - } - } - DispatchSupport.killedOffenderProcs.incrementAndGet(); - return true; + private boolean killProcForMemory(String frameId, String hostname, KillCause killCause) { + if (!getKillClearance(hostname, frameId)) { + return false; } - private boolean killFrame( - String frameId, - String hostname, - KillCause killCause - ) { - if (!getKillClearance(hostname, frameId)) { - return false; - } + FrameInterface frame = jobManager.getFrame(frameId); + if (dispatcher.isTestMode()) { + // Different threads don't share the same database state on the test environment + (new DispatchRqdKillFrameMemory(hostname, frame, killCause.toString(), rqdClient, + dispatchSupport, dispatcher.isTestMode())).run(); + } else { + try { + killQueue.execute(new DispatchRqdKillFrameMemory(hostname, frame, killCause.toString(), + rqdClient, dispatchSupport, dispatcher.isTestMode())); + prometheusMetrics.incrementFrameKilledCounter(hostname, killCause); + } catch (TaskRejectedException e) { + logger.warn("Unable to add a DispatchRqdKillFrame request, task rejected, " + e); + return false; + } + } + DispatchSupport.killedOffenderProcs.incrementAndGet(); + return true; + } - if (dispatcher.isTestMode()) { - // Different threads don't share the same database state on the test environment - (new DispatchRqdKillFrame( - hostname, - frameId, - killCause.toString(), - rqdClient - )).run(); - } else { - try { - killQueue.execute( - new DispatchRqdKillFrame( - hostname, - frameId, - killCause.toString(), - rqdClient - ) - ); - prometheusMetrics.incrementFrameKilledCounter( - hostname, - killCause - ); - } catch (TaskRejectedException e) { - logger.warn( - "Unable to add a DispatchRqdKillFrame request, task rejected, " + - e - ); - } - } - DispatchSupport.killedOffenderProcs.incrementAndGet(); - return true; + private boolean killFrame(String frameId, String hostname, KillCause killCause) { + if (!getKillClearance(hostname, frameId)) { + return false; } - /** - * Check frame memory usage comparing the amount used with the amount it had reserved - * @param frame - * @return - */ - private boolean isFrameOverboard(final RunningFrameInfo frame) { - final double OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD = - env.getRequiredProperty( - "dispatcher.oom_frame_overboard_allowed_threshold", - Double.class - ); - - if (OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD < 0) { - return false; - } + if (dispatcher.isTestMode()) { + // Different threads don't share the same database state on the test environment + (new DispatchRqdKillFrame(hostname, frameId, killCause.toString(), rqdClient)).run(); + } else { + try { + killQueue + .execute(new DispatchRqdKillFrame(hostname, frameId, killCause.toString(), rqdClient)); + prometheusMetrics.incrementFrameKilledCounter(hostname, killCause); + } catch (TaskRejectedException e) { + logger.warn("Unable to add a DispatchRqdKillFrame request, task rejected, " + e); + } + } + DispatchSupport.killedOffenderProcs.incrementAndGet(); + return true; + } + + /** + * Check frame memory usage comparing the amount used with the amount it had reserved + * + * @param frame + * @return + */ + private boolean isFrameOverboard(final RunningFrameInfo frame) { + final double OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD = + env.getRequiredProperty("dispatcher.oom_frame_overboard_allowed_threshold", Double.class); + + if (OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD < 0) { + return false; + } - double rss = (double) frame.getRss(); - double maxRss = (double) frame.getMaxRss(); - final double MAX_RSS_OVERBOARD_THRESHOLD = - OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD * 2; - final double RSS_AVAILABLE_FOR_MAX_RSS_TRIGGER = 0.1; + double rss = (double) frame.getRss(); + double maxRss = (double) frame.getMaxRss(); + final double MAX_RSS_OVERBOARD_THRESHOLD = OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD * 2; + final double RSS_AVAILABLE_FOR_MAX_RSS_TRIGGER = 0.1; - try { - VirtualProc proc = hostManager.getVirtualProc( - frame.getResourceId() - ); - double reserved = (double) proc.memoryReserved; - - // Last memory report is higher than the threshold - if ( - isOverboard( - rss, - reserved, - OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD - ) - ) { - return true; - } - // If rss is not overboard, handle the situation where the frame might be going overboard from - // time to time but the last report wasn't during a spike. For this case, consider a combination - // of rss and maxRss. maxRss > 2 * threshold and rss > 0.9 - else { - return ( - isOverboard( - maxRss, - reserved, - MAX_RSS_OVERBOARD_THRESHOLD - ) && - isOverboard( - rss, - reserved, - -RSS_AVAILABLE_FOR_MAX_RSS_TRIGGER - ) - ); - } - } catch (EmptyResultDataAccessException e) { - logger.info( - "HostReportHandler(isFrameOverboard): Virtual proc for frame " + - frame.getFrameName() + - " on job " + - frame.getJobName() + - " doesn't exist on the database" - ); - // Not able to mark the frame overboard is it couldn't be found on the db. - // Proc accounting (verifyRunningProc) should take care of it - return false; - } - } + try { + VirtualProc proc = hostManager.getVirtualProc(frame.getResourceId()); + double reserved = (double) proc.memoryReserved; - private boolean isOverboard(double value, double total, double threshold) { - return value / total >= (1 + threshold); + // Last memory report is higher than the threshold + if (isOverboard(rss, reserved, OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD)) { + return true; + } + // If rss is not overboard, handle the situation where the frame might be going + // overboard from + // time to time but the last report wasn't during a spike. For this case, + // consider a combination + // of rss and maxRss. maxRss > 2 * threshold and rss > 0.9 + else { + return (isOverboard(maxRss, reserved, MAX_RSS_OVERBOARD_THRESHOLD) + && isOverboard(rss, reserved, -RSS_AVAILABLE_FOR_MAX_RSS_TRIGGER)); + } + } catch (EmptyResultDataAccessException e) { + logger.info( + "HostReportHandler(isFrameOverboard): Virtual proc for frame " + frame.getFrameName() + + " on job " + frame.getJobName() + " doesn't exist on the database"); + // Not able to mark the frame overboard is it couldn't be found on the db. + // Proc accounting (verifyRunningProc) should take care of it + return false; } - - /** - * Handle memory reservations for the given frame - * - * @param frame - */ - private void handleMemoryReservations(final RunningFrameInfo frame) { - VirtualProc proc = null; + } + + private boolean isOverboard(double value, double total, double threshold) { + return value / total >= (1 + threshold); + } + + /** + * Handle memory reservations for the given frame + * + * @param frame + */ + private void handleMemoryReservations(final RunningFrameInfo frame) { + VirtualProc proc = null; + try { + proc = hostManager.getVirtualProc(frame.getResourceId()); + + if (proc.isLocalDispatch) { + return; + } + + if (dispatchSupport.increaseReservedMemory(proc, frame.getRss())) { + proc.memoryReserved = frame.getRss(); + logger.info("frame " + frame.getFrameName() + " on job " + frame.getJobName() + + " increased its reserved memory to " + CueUtil.KbToMb(frame.getRss())); + } + } catch (ResourceReservationFailureException e) { + if (proc != null) { + long memNeeded = frame.getRss() - proc.memoryReserved; + logger.info("frame " + frame.getFrameName() + " on job " + frame.getJobName() + + "was unable to reserve an additional " + CueUtil.KbToMb(memNeeded) + "on proc " + + proc.getName() + ", " + e); try { - proc = hostManager.getVirtualProc(frame.getResourceId()); - - if (proc.isLocalDispatch) { - return; - } - - if (dispatchSupport.increaseReservedMemory(proc, frame.getRss())) { - proc.memoryReserved = frame.getRss(); - logger.info( - "frame " + - frame.getFrameName() + - " on job " + - frame.getJobName() + - " increased its reserved memory to " + - CueUtil.KbToMb(frame.getRss()) - ); - } - } catch (ResourceReservationFailureException e) { - if (proc != null) { - long memNeeded = frame.getRss() - proc.memoryReserved; - logger.info( - "frame " + - frame.getFrameName() + - " on job " + - frame.getJobName() + - "was unable to reserve an additional " + - CueUtil.KbToMb(memNeeded) + - "on proc " + - proc.getName() + - ", " + - e - ); - try { - if ( - dispatchSupport.balanceReservedMemory(proc, memNeeded) - ) { - proc.memoryReserved = frame.getRss(); - logger.info( - "was able to balance host: " + proc.getName() - ); - } else { - logger.info( - "failed to balance host: " + proc.getName() - ); - } - } catch (Exception ex) { - logger.warn( - "failed to balance host: " + proc.getName() + ", " + e - ); - } - } else { - logger.info( - "frame " + - frame.getFrameName() + - " on job " + - frame.getJobName() + - "was unable to reserve an additional memory. Proc could not be found" - ); - } - } catch (EmptyResultDataAccessException e) { - logger.info( - "HostReportHandler: Memory reservations for frame " + - frame.getFrameName() + - " on job " + - frame.getJobName() + - " proc could not be found" - ); + if (dispatchSupport.balanceReservedMemory(proc, memNeeded)) { + proc.memoryReserved = frame.getRss(); + logger.info("was able to balance host: " + proc.getName()); + } else { + logger.info("failed to balance host: " + proc.getName()); + } + } catch (Exception ex) { + logger.warn("failed to balance host: " + proc.getName() + ", " + e); } + } else { + logger.info("frame " + frame.getFrameName() + " on job " + frame.getJobName() + + "was unable to reserve an additional memory. Proc could not be found"); + } + } catch (EmptyResultDataAccessException e) { + logger.info("HostReportHandler: Memory reservations for frame " + frame.getFrameName() + + " on job " + frame.getJobName() + " proc could not be found"); } - - /** - * Kill frames that over run. - * - * @param rFrames - */ - private void killTimedOutFrames( - List runningFrames, - String hostname - ) { - for (RunningFrameInfo frame : runningFrames) { - String layerId = frame.getLayerId(); - - try { - LayerDetail layer = layerDao.getLayerDetail(layerId); - long runtimeMinutes = - ((System.currentTimeMillis() - frame.getStartTime()) / - 1000l) / - 60; - - if (layer.timeout != 0 && runtimeMinutes > layer.timeout) { - killFrame( - frame.getFrameId(), - hostname, - KillCause.FrameTimedOut - ); - } else if (layer.timeout_llu != 0 && frame.getLluTime() != 0) { - long r = System.currentTimeMillis() / 1000; - long lastUpdate = (r - frame.getLluTime()) / 60; - - if ( - layer.timeout_llu != 0 && - lastUpdate > (layer.timeout_llu - 1) - ) { - killFrame( - frame.getFrameId(), - hostname, - KillCause.FrameLluTimedOut - ); - } - } - } catch (EmptyResultDataAccessException e) { - logger.info("Unable to get layer with id=" + layerId); - } + } + + /** + * Kill frames that over run. + * + * @param rFrames + */ + private void killTimedOutFrames(List runningFrames, String hostname) { + for (RunningFrameInfo frame : runningFrames) { + String layerId = frame.getLayerId(); + + try { + LayerDetail layer = layerDao.getLayerDetail(layerId); + long runtimeMinutes = ((System.currentTimeMillis() - frame.getStartTime()) / 1000l) / 60; + + if (layer.timeout != 0 && runtimeMinutes > layer.timeout) { + killFrame(frame.getFrameId(), hostname, KillCause.FrameTimedOut); + } else if (layer.timeout_llu != 0 && frame.getLluTime() != 0) { + long r = System.currentTimeMillis() / 1000; + long lastUpdate = (r - frame.getLluTime()) / 60; + + if (layer.timeout_llu != 0 && lastUpdate > (layer.timeout_llu - 1)) { + killFrame(frame.getFrameId(), hostname, KillCause.FrameLluTimedOut); + } } + } catch (EmptyResultDataAccessException e) { + logger.info("Unable to get layer with id=" + layerId); + } } - - /** - * Update memory usage and LLU time for the given list of frames. - * - * @param rFrames - */ - private void updateMemoryUsageAndLluTime(List rFrames) { - for (RunningFrameInfo rf : rFrames) { - FrameInterface frame = jobManager.getFrame(rf.getFrameId()); - - dispatchSupport.updateFrameMemoryUsageAndLluTime( - frame, - rf.getRss(), - rf.getMaxRss(), - rf.getLluTime() - ); - - dispatchSupport.updateProcMemoryUsage( - frame, - rf.getRss(), - rf.getMaxRss(), - rf.getVsize(), - rf.getMaxVsize(), - rf.getUsedGpuMemory(), - rf.getMaxUsedGpuMemory(), - rf.getUsedSwapMemory(), - rf.getChildren().toByteArray() - ); - } - - updateJobMemoryUsage(rFrames); - updateLayerMemoryUsage(rFrames); + } + + /** + * Update memory usage and LLU time for the given list of frames. + * + * @param rFrames + */ + private void updateMemoryUsageAndLluTime(List rFrames) { + for (RunningFrameInfo rf : rFrames) { + FrameInterface frame = jobManager.getFrame(rf.getFrameId()); + + dispatchSupport.updateFrameMemoryUsageAndLluTime(frame, rf.getRss(), rf.getMaxRss(), + rf.getLluTime()); + + dispatchSupport.updateProcMemoryUsage(frame, rf.getRss(), rf.getMaxRss(), rf.getVsize(), + rf.getMaxVsize(), rf.getUsedGpuMemory(), rf.getMaxUsedGpuMemory(), rf.getUsedSwapMemory(), + rf.getChildren().toByteArray()); } - /** - * Update job memory using for the given list of frames. - * - * @param frames - */ - private void updateJobMemoryUsage(List frames) { - final Map jobs = new HashMap( - frames.size() - ); - - for (RunningFrameInfo frame : frames) { - JobEntity job = new JobEntity(frame.getJobId()); - if (jobs.containsKey(job)) { - if (jobs.get(job) < frame.getMaxRss()) { - jobs.put(job, frame.getMaxRss()); - } - } else { - jobs.put(job, frame.getMaxRss()); - } - } - - for (Map.Entry set : jobs.entrySet()) { - jobDao.updateMaxRSS(set.getKey(), set.getValue()); + updateJobMemoryUsage(rFrames); + updateLayerMemoryUsage(rFrames); + } + + /** + * Update job memory using for the given list of frames. + * + * @param frames + */ + private void updateJobMemoryUsage(List frames) { + final Map jobs = new HashMap(frames.size()); + + for (RunningFrameInfo frame : frames) { + JobEntity job = new JobEntity(frame.getJobId()); + if (jobs.containsKey(job)) { + if (jobs.get(job) < frame.getMaxRss()) { + jobs.put(job, frame.getMaxRss()); } + } else { + jobs.put(job, frame.getMaxRss()); + } } - /** - * Update layer memory usage for the given list of frames. - * - * @param frames - */ - private void updateLayerMemoryUsage(List frames) { - final Map layers = new HashMap( - frames.size() - ); - - for (RunningFrameInfo frame : frames) { - LayerEntity layer = new LayerEntity(frame.getLayerId()); - if (layers.containsKey(layer)) { - if (layers.get(layer) < frame.getMaxRss()) { - layers.put(layer, frame.getMaxRss()); - } - } else { - layers.put(layer, frame.getMaxRss()); - } - } - - /* Attempt to update the max RSS value for the job **/ - for (Map.Entry set : layers.entrySet()) { - layerDao.increaseLayerMinMemory(set.getKey(), set.getValue()); - layerDao.updateLayerMaxRSS(set.getKey(), set.getValue(), false); + for (Map.Entry set : jobs.entrySet()) { + jobDao.updateMaxRSS(set.getKey(), set.getValue()); + } + } + + /** + * Update layer memory usage for the given list of frames. + * + * @param frames + */ + private void updateLayerMemoryUsage(List frames) { + final Map layers = new HashMap(frames.size()); + + for (RunningFrameInfo frame : frames) { + LayerEntity layer = new LayerEntity(frame.getLayerId()); + if (layers.containsKey(layer)) { + if (layers.get(layer) < frame.getMaxRss()) { + layers.put(layer, frame.getMaxRss()); } + } else { + layers.put(layer, frame.getMaxRss()); + } } - /** - * Number of seconds before running frames have to exist before being - * verified against the DB. - */ - private static final long FRAME_VERIFICATION_GRACE_PERIOD_SECONDS = 120; - - /** - * Verify all running frames in the given report against - * the DB. Frames that have not been running for at least - * FRAME_VERIFICATION_GRACE_PERIOD_SECONDS are skipped. - * - * If a frame->proc mapping is not verified then the record - * for the proc is pulled from the DB. If the proc doesn't - * exist at all, then the frame is killed with the message: - * "but the DB did not reflect this" - * - * The main reason why a proc no longer exists is that the cue - * though the host went down and cleared out all running frames. - * - * @param report - */ - public List verifyRunningFrameInfo(HostReport report) { - List runningFrames = new ArrayList( - report.getFramesCount() - ); - - for (RunningFrameInfo runningFrame : report.getFramesList()) { - long runtimeSeconds = - (System.currentTimeMillis() - runningFrame.getStartTime()) / - 1000l; - - // Don't test frames that haven't been running long enough. - if (runtimeSeconds < FRAME_VERIFICATION_GRACE_PERIOD_SECONDS) { - logger.info( - "verified " + - runningFrame.getJobName() + - "/" + - runningFrame.getFrameName() + - " on " + - report.getHost().getName() + - " by grace period " + - runtimeSeconds + - " seconds." - ); - runningFrames.add(runningFrame); - continue; - } - - if ( - hostManager.verifyRunningProc( - runningFrame.getResourceId(), - runningFrame.getFrameId() - ) - ) { - runningFrames.add(runningFrame); - continue; - } - - /* - * The frame this proc is running is no longer - * assigned to this proc. Don't ever touch - * the frame record. If we make it here that means - * the proc has been running for over 2 min. - */ - String msg; - VirtualProc proc = null; - - try { - proc = hostManager.getVirtualProc(runningFrame.getResourceId()); - msg = - "Virtual proc " + - proc.getProcId() + - "is assigned to " + - proc.getFrameId() + - " not " + - runningFrame.getFrameId(); - } catch (Exception e) { - /* - * This will happen if the host goes offline and then - * comes back. In this case, we don't touch the frame - * since it might already be running somewhere else. We - * do however kill the proc. - */ - msg = "Virtual proc did not exist."; - } - - DispatchSupport.accountingErrors.incrementAndGet(); - if (proc != null && hostManager.isOprhan(proc)) { - dispatchSupport.clearVirtualProcAssignement(proc); - dispatchSupport.unbookProc(proc); - proc = null; - } - if (proc == null) { - // A frameCompleteReport might have been delivered before this report was - // processed - FrameDetail frameLatestVersion = jobManager.getFrameDetail( - runningFrame.getFrameId() - ); - if (frameLatestVersion.state != FrameState.RUNNING) { - logger.info( - "DelayedVerification, the proc " + - runningFrame.getResourceId() + - " on host " + - report.getHost().getName() + - " has already Completed " + - runningFrame.getJobName() + - "/" + - runningFrame.getFrameName() - ); - } else if ( - killFrame( - runningFrame.getFrameId(), - report.getHost().getName(), - KillCause.FrameVerificationFailure - ) - ) { - logger.info( - "FrameVerificationError, the proc " + - runningFrame.getResourceId() + - " on host " + - report.getHost().getName() + - " was running for " + - (runtimeSeconds / 60.0f) + - " minutes " + - runningFrame.getJobName() + - "/" + - runningFrame.getFrameName() + - " but the DB did not " + - "reflect this. " + - msg - ); - } else { - logger.warn( - "FrameStuckWarning: frameId=" + - runningFrame.getFrameId() + - " render_node=" + - report.getHost().getName() + - " - " + - runningFrame.getJobName() + - "/" + - runningFrame.getFrameName() - ); - } - } + /* Attempt to update the max RSS value for the job **/ + for (Map.Entry set : layers.entrySet()) { + layerDao.increaseLayerMinMemory(set.getKey(), set.getValue()); + layerDao.updateLayerMaxRSS(set.getKey(), set.getValue(), false); + } + } + + /** + * Number of seconds before running frames have to exist before being verified against the DB. + */ + private static final long FRAME_VERIFICATION_GRACE_PERIOD_SECONDS = 120; + + /** + * Verify all running frames in the given report against the DB. Frames that have not been running + * for at least FRAME_VERIFICATION_GRACE_PERIOD_SECONDS are skipped. + * + * If a frame->proc mapping is not verified then the record for the proc is pulled from the DB. If + * the proc doesn't exist at all, then the frame is killed with the message: "but the DB did not + * reflect this" + * + * The main reason why a proc no longer exists is that the cue though the host went down and + * cleared out all running frames. + * + * @param report + */ + public List verifyRunningFrameInfo(HostReport report) { + List runningFrames = new ArrayList(report.getFramesCount()); + + for (RunningFrameInfo runningFrame : report.getFramesList()) { + long runtimeSeconds = (System.currentTimeMillis() - runningFrame.getStartTime()) / 1000l; + + // Don't test frames that haven't been running long enough. + if (runtimeSeconds < FRAME_VERIFICATION_GRACE_PERIOD_SECONDS) { + logger.info( + "verified " + runningFrame.getJobName() + "/" + runningFrame.getFrameName() + " on " + + report.getHost().getName() + " by grace period " + runtimeSeconds + " seconds."); + runningFrames.add(runningFrame); + continue; + } + + if (hostManager.verifyRunningProc(runningFrame.getResourceId(), runningFrame.getFrameId())) { + runningFrames.add(runningFrame); + continue; + } + + /* + * The frame this proc is running is no longer assigned to this proc. Don't ever touch the + * frame record. If we make it here that means the proc has been running for over 2 min. + */ + String msg; + VirtualProc proc = null; + + try { + proc = hostManager.getVirtualProc(runningFrame.getResourceId()); + msg = "Virtual proc " + proc.getProcId() + "is assigned to " + proc.getFrameId() + " not " + + runningFrame.getFrameId(); + } catch (Exception e) { + /* + * This will happen if the host goes offline and then comes back. In this case, we don't + * touch the frame since it might already be running somewhere else. We do however kill the + * proc. + */ + msg = "Virtual proc did not exist."; + } + + DispatchSupport.accountingErrors.incrementAndGet(); + if (proc != null && hostManager.isOprhan(proc)) { + dispatchSupport.clearVirtualProcAssignement(proc); + dispatchSupport.unbookProc(proc); + proc = null; + } + if (proc == null) { + // A frameCompleteReport might have been delivered before this report was + // processed + FrameDetail frameLatestVersion = jobManager.getFrameDetail(runningFrame.getFrameId()); + if (frameLatestVersion.state != FrameState.RUNNING) { + logger.info("DelayedVerification, the proc " + runningFrame.getResourceId() + " on host " + + report.getHost().getName() + " has already Completed " + runningFrame.getJobName() + + "/" + runningFrame.getFrameName()); + } else if (killFrame(runningFrame.getFrameId(), report.getHost().getName(), + KillCause.FrameVerificationFailure)) { + logger.info("FrameVerificationError, the proc " + runningFrame.getResourceId() + + " on host " + report.getHost().getName() + " was running for " + + (runtimeSeconds / 60.0f) + " minutes " + runningFrame.getJobName() + "/" + + runningFrame.getFrameName() + " but the DB did not " + "reflect this. " + msg); + } else { + logger.warn("FrameStuckWarning: frameId=" + runningFrame.getFrameId() + " render_node=" + + report.getHost().getName() + " - " + runningFrame.getJobName() + "/" + + runningFrame.getFrameName()); } - return runningFrames; + } } + return runningFrames; + } - public HostManager getHostManager() { - return hostManager; - } + public HostManager getHostManager() { + return hostManager; + } - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; - } + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; + } - public BookingQueue getBookingQueue() { - return bookingQueue; - } + public BookingQueue getBookingQueue() { + return bookingQueue; + } - public void setBookingQueue(BookingQueue bookingQueue) { - this.bookingQueue = bookingQueue; - } + public void setBookingQueue(BookingQueue bookingQueue) { + this.bookingQueue = bookingQueue; + } - public ThreadPoolExecutor getReportQueue() { - return reportQueue; - } + public ThreadPoolExecutor getReportQueue() { + return reportQueue; + } - public void setReportQueue(ThreadPoolExecutor reportQueue) { - this.reportQueue = reportQueue; - } + public void setReportQueue(ThreadPoolExecutor reportQueue) { + this.reportQueue = reportQueue; + } - public DispatchSupport getDispatchSupport() { - return dispatchSupport; - } + public DispatchSupport getDispatchSupport() { + return dispatchSupport; + } - public void setDispatchSupport(DispatchSupport dispatchSupport) { - this.dispatchSupport = dispatchSupport; - } + public void setDispatchSupport(DispatchSupport dispatchSupport) { + this.dispatchSupport = dispatchSupport; + } - public Dispatcher getDispatcher() { - return dispatcher; - } + public Dispatcher getDispatcher() { + return dispatcher; + } - public void setDispatcher(Dispatcher dispatcher) { - this.dispatcher = dispatcher; - } + public void setDispatcher(Dispatcher dispatcher) { + this.dispatcher = dispatcher; + } - public RqdClient getRqdClient() { - return rqdClient; - } + public RqdClient getRqdClient() { + return rqdClient; + } - public void setRqdClient(RqdClient rqdClient) { - this.rqdClient = rqdClient; - } + public void setRqdClient(RqdClient rqdClient) { + this.rqdClient = rqdClient; + } - public JobManager getJobManager() { - return jobManager; - } + public JobManager getJobManager() { + return jobManager; + } - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } - public JobDao getJobDao() { - return jobDao; - } + public JobDao getJobDao() { + return jobDao; + } - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } - public LayerDao getLayerDao() { - return layerDao; - } + public LayerDao getLayerDao() { + return layerDao; + } - public void setLayerDao(LayerDao layerDao) { - this.layerDao = layerDao; - } + public void setLayerDao(LayerDao layerDao) { + this.layerDao = layerDao; + } - public BookingManager getBookingManager() { - return bookingManager; - } + public BookingManager getBookingManager() { + return bookingManager; + } - public void setBookingManager(BookingManager bookingManager) { - this.bookingManager = bookingManager; - } + public void setBookingManager(BookingManager bookingManager) { + this.bookingManager = bookingManager; + } - public Dispatcher getLocalDispatcher() { - return localDispatcher; - } + public Dispatcher getLocalDispatcher() { + return localDispatcher; + } - public void setLocalDispatcher(Dispatcher localDispatcher) { - this.localDispatcher = localDispatcher; - } + public void setLocalDispatcher(Dispatcher localDispatcher) { + this.localDispatcher = localDispatcher; + } - public ThreadPoolExecutor getKillQueue() { - return killQueue; - } + public ThreadPoolExecutor getKillQueue() { + return killQueue; + } - public void setKillQueue(ThreadPoolExecutor killQueue) { - this.killQueue = killQueue; - } + public void setKillQueue(ThreadPoolExecutor killQueue) { + this.killQueue = killQueue; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportQueue.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportQueue.java index 6326086ae..7875c459b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportQueue.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportQueue.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; import java.lang.ref.WeakReference; @@ -36,109 +32,109 @@ public class HostReportQueue extends ThreadPoolExecutor { - private static final Logger logger = LogManager.getLogger(HostReportQueue.class); - private QueueRejectCounter rejectCounter = new QueueRejectCounter(); - private AtomicBoolean isShutdown = new AtomicBoolean(false); - private int queueCapacity; - - private Cache hostMap = CacheBuilder.newBuilder() - .expireAfterWrite(1, TimeUnit.HOURS) - .build(); - - /** - * Wrapper around protobuf object HostReport to add reportTi - */ - private class HostReportWrapper{ - private final HostReport hostReport; - private final WeakReference reportTaskRef; - public long taskTime = System.currentTimeMillis(); - - public HostReportWrapper(HostReport hostReport, DispatchHandleHostReport reportTask) { - this.hostReport = hostReport; - this.reportTaskRef = new WeakReference<>(reportTask); - } - - public HostReport getHostReport() { - return hostReport; - } - - public DispatchHandleHostReport getReportTask() { - return reportTaskRef.get(); - } - - public long getTaskTime() { - return taskTime; - } + private static final Logger logger = LogManager.getLogger(HostReportQueue.class); + private QueueRejectCounter rejectCounter = new QueueRejectCounter(); + private AtomicBoolean isShutdown = new AtomicBoolean(false); + private int queueCapacity; + + private Cache hostMap = + CacheBuilder.newBuilder().expireAfterWrite(1, TimeUnit.HOURS).build(); + + /** + * Wrapper around protobuf object HostReport to add reportTi + */ + private class HostReportWrapper { + private final HostReport hostReport; + private final WeakReference reportTaskRef; + public long taskTime = System.currentTimeMillis(); + + public HostReportWrapper(HostReport hostReport, DispatchHandleHostReport reportTask) { + this.hostReport = hostReport; + this.reportTaskRef = new WeakReference<>(reportTask); } - public HostReportQueue(int threadPoolSizeInitial, int threadPoolSizeMax, int queueSize) { - super(threadPoolSizeInitial, threadPoolSizeMax, 10 , TimeUnit.SECONDS, - new LinkedBlockingQueue(queueSize)); - this.setRejectedExecutionHandler(rejectCounter); + public HostReport getHostReport() { + return hostReport; } - public void execute(DispatchHandleHostReport newReport) { - if (isShutdown.get()) { - return; - } - HostReportWrapper oldWrappedReport = hostMap.getIfPresent(newReport.getKey()); - // If hostReport exists on the cache and there's also a task waiting to be executed - // replace the old report by the new on, but refrain from creating another task - if (oldWrappedReport != null) { - DispatchHandleHostReport oldReport = oldWrappedReport.getReportTask(); - if(oldReport != null) { - // Replace report, but keep the reference of the existing task - hostMap.put(newReport.getKey(), - new HostReportWrapper(newReport.getHostReport(), oldReport)); - return; - } - } - hostMap.put(newReport.getKey(), - new HostReportWrapper(newReport.getHostReport(), newReport)); - super.execute(newReport); + public DispatchHandleHostReport getReportTask() { + return reportTaskRef.get(); } - public HostReport removePendingHostReport(String key) { - if (key != null) { - HostReportWrapper r = hostMap.getIfPresent(key); - if (r != null) { - hostMap.asMap().remove(key, r); - return r.getHostReport(); - } - } - return null; + public long getTaskTime() { + return taskTime; } + } - public long getRejectedTaskCount() { - return rejectCounter.getRejectCount(); - } + public HostReportQueue(int threadPoolSizeInitial, int threadPoolSizeMax, int queueSize) { + super(threadPoolSizeInitial, threadPoolSizeMax, 10, TimeUnit.SECONDS, + new LinkedBlockingQueue(queueSize)); + this.setRejectedExecutionHandler(rejectCounter); + } - public int getQueueCapacity() { - return queueCapacity; + public void execute(DispatchHandleHostReport newReport) { + if (isShutdown.get()) { + return; } - - public void shutdown() { - if (!isShutdown.getAndSet(true)) { - logger.info("Shutting down report pool, currently " + this.getActiveCount() + " active threads."); - - final long startTime = System.currentTimeMillis(); - while (this.getQueue().size() != 0 && this.getActiveCount() != 0) { - try { - logger.info("report pool is waiting for " + this.getQueue().size() + " more units to complete"); - if (System.currentTimeMillis() - startTime > 10000) { - throw new InterruptedException("report thread pool failed to shutdown properly"); - } - Thread.sleep(250); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - break; - } - } + HostReportWrapper oldWrappedReport = hostMap.getIfPresent(newReport.getKey()); + // If hostReport exists on the cache and there's also a task waiting to be + // executed + // replace the old report by the new on, but refrain from creating another task + if (oldWrappedReport != null) { + DispatchHandleHostReport oldReport = oldWrappedReport.getReportTask(); + if (oldReport != null) { + // Replace report, but keep the reference of the existing task + hostMap.put(newReport.getKey(), + new HostReportWrapper(newReport.getHostReport(), oldReport)); + return; + } + } + hostMap.put(newReport.getKey(), new HostReportWrapper(newReport.getHostReport(), newReport)); + super.execute(newReport); + } + + public HostReport removePendingHostReport(String key) { + if (key != null) { + HostReportWrapper r = hostMap.getIfPresent(key); + if (r != null) { + hostMap.asMap().remove(key, r); + return r.getHostReport(); + } + } + return null; + } + + public long getRejectedTaskCount() { + return rejectCounter.getRejectCount(); + } + + public int getQueueCapacity() { + return queueCapacity; + } + + public void shutdown() { + if (!isShutdown.getAndSet(true)) { + logger.info( + "Shutting down report pool, currently " + this.getActiveCount() + " active threads."); + + final long startTime = System.currentTimeMillis(); + while (this.getQueue().size() != 0 && this.getActiveCount() != 0) { + try { + logger.info( + "report pool is waiting for " + this.getQueue().size() + " more units to complete"); + if (System.currentTimeMillis() - startTime > 10000) { + throw new InterruptedException("report thread pool failed to shutdown properly"); + } + Thread.sleep(250); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + break; } + } } + } - public boolean isHealthy() { - return getQueue().remainingCapacity() > 0; - } + public boolean isHealthy() { + return getQueue().remainingCapacity() > 0; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/JobLookupException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/JobLookupException.java index ce617f804..897324666 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/JobLookupException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/JobLookupException.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; import com.imageworks.spcue.SpcueRuntimeException; @@ -24,24 +20,23 @@ @SuppressWarnings("serial") public class JobLookupException extends SpcueRuntimeException { - public JobLookupException() { - // TODO Auto-generated constructor stub - } + public JobLookupException() { + // TODO Auto-generated constructor stub + } - public JobLookupException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public JobLookupException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public JobLookupException(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public JobLookupException(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public JobLookupException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public JobLookupException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/LocalDispatcher.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/LocalDispatcher.java index 9c3754f69..46d0b9e30 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/LocalDispatcher.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/LocalDispatcher.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; import java.util.ArrayList; @@ -44,408 +40,372 @@ public class LocalDispatcher extends AbstractDispatcher implements Dispatcher { - @Autowired - private Environment env; - - private static final Logger logger = - LogManager.getLogger(LocalDispatcher.class); - - private BookingManager bookingManager; - private JobManager jobManager; - private HostManager hostManager; - - private static final int MAX_QUERY_FRAMES = 10; - private static final int MAX_DISPATCHED_FRAMES = 10; - - @Override - public List dispatchHostToAllShows(DispatchHost host) { - return new ArrayList(); + @Autowired + private Environment env; + + private static final Logger logger = LogManager.getLogger(LocalDispatcher.class); + + private BookingManager bookingManager; + private JobManager jobManager; + private HostManager hostManager; + + private static final int MAX_QUERY_FRAMES = 10; + private static final int MAX_DISPATCHED_FRAMES = 10; + + @Override + public List dispatchHostToAllShows(DispatchHost host) { + return new ArrayList(); + } + + @Override + public List dispatchHost(DispatchHost host) { + + List lhas = bookingManager.getLocalHostAssignment(host); + host.isLocalDispatch = true; + + ArrayList procs = new ArrayList(); + for (LocalHostAssignment lha : lhas) { + prepHost(host, lha); + switch (lha.getType()) { + case JOB_PARTITION: + procs.addAll(dispatchHost(host, jobManager.getJob(lha.getJobId()), lha)); + break; + case LAYER_PARTITION: + procs.addAll(dispatchHost(host, jobManager.getLayerDetail(lha.getLayerId()), lha)); + break; + case FRAME_PARTITION: + procs.addAll(dispatchHost(host, jobManager.getFrame(lha.getFrameId()), lha)); + break; + default: + logger.warn("Error, invalid render " + "partition type: " + lha.getType()); + } } - @Override - public List dispatchHost(DispatchHost host) { - - List lhas = - bookingManager.getLocalHostAssignment(host); - host.isLocalDispatch = true; - - ArrayList procs = new ArrayList(); - for (LocalHostAssignment lha : lhas) { - prepHost(host, lha); - switch(lha.getType()) { - case JOB_PARTITION: - procs.addAll(dispatchHost(host, jobManager.getJob( - lha.getJobId()), lha)); - break; - case LAYER_PARTITION: - procs.addAll(dispatchHost(host, jobManager.getLayerDetail( - lha.getLayerId()), lha)); - break; - case FRAME_PARTITION: - procs.addAll(dispatchHost(host, jobManager.getFrame( - lha.getFrameId()), lha)); - break; - default: - logger.warn("Error, invalid render " + - "partition type: " + lha.getType()); - } - } + return procs; + } - return procs; - } + private List dispatchHost(DispatchHost host, JobInterface job, + LocalHostAssignment lha) { - private List dispatchHost(DispatchHost host, JobInterface job, - LocalHostAssignment lha) { - - List procs = new ArrayList(MAX_DISPATCHED_FRAMES); + List procs = new ArrayList(MAX_DISPATCHED_FRAMES); + /* + * Grab a list of frames to dispatch. + */ + List frames = + dispatchSupport.findNextDispatchFrames(job, host, MAX_QUERY_FRAMES); + + logger.info("Frames found: " + frames.size() + " for host " + host.getName() + " " + + host.idleCores + "/" + host.idleMemory + " on job " + job.getName()); + + for (DispatchFrame frame : frames) { + + /* + * Check if we have enough memory/cores for this frame, if not move on. + */ + if (!lha.hasAdditionalResources(lha.getThreads() * 100, frame.getMinMemory(), frame.minGpus, + frame.minGpuMemory)) { + continue; + } + + /* + * Build our virtual proc. + */ + VirtualProc proc = VirtualProc.build(host, frame, lha); + + /* + * Double check the job has pending frames. + */ + if (!dispatchSupport.hasPendingFrames(job)) { + break; + } + + /* + * Dispatch the frame. If a frame is booked, dispatchHost returns true, else if returns false. + * If the dispatch fails in a way that we should stop dispatching immediately (the host is + * down), a DispatcherException is thrown. + */ + if (dispatchHost(frame, proc)) { + + procs.add(proc); + + long memReservedMin = + env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); + long memGpuReservedMin = + env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_min", Long.class); /* - * Grab a list of frames to dispatch. + * This should stay here and not go into VirtualProc or else the count will be off if you + * fail to book. */ - List frames = dispatchSupport.findNextDispatchFrames(job, - host, MAX_QUERY_FRAMES); - - logger.info("Frames found: " + frames.size() + " for host " + - host.getName() + " " + host.idleCores + "/" + host.idleMemory + - " on job " + job.getName()); - - for (DispatchFrame frame: frames) { - - /* - * Check if we have enough memory/cores for this frame, if - * not move on. - */ - if (!lha.hasAdditionalResources(lha.getThreads() * 100, - frame.getMinMemory(), - frame.minGpus, - frame.minGpuMemory)) { - continue; - } - - /* - * Build our virtual proc. - */ - VirtualProc proc = VirtualProc.build(host, frame, lha); - - /* - * Double check the job has pending frames. - */ - if (!dispatchSupport.hasPendingFrames(job)) { - break; - } - - /* - * Dispatch the frame. If a frame is booked, dispatchHost returns true, - * else if returns false. If the dispatch fails in a way - * that we should stop dispatching immediately (the host is down), - * a DispatcherException is thrown. - */ - if (dispatchHost(frame, proc)) { - - procs.add(proc); - - long memReservedMin = env.getRequiredProperty( - "dispatcher.memory.mem_reserved_min", - Long.class); - long memGpuReservedMin = env.getRequiredProperty( - "dispatcher.memory.mem_gpu_reserved_min", - Long.class); - /* - * This should stay here and not go into VirtualProc - * or else the count will be off if you fail to book. - */ - lha.useResources(proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved); - if (!lha.hasAdditionalResources(lha.getThreads() * 100, - memReservedMin, - Dispatcher.GPU_UNITS_RESERVED_MIN, - memGpuReservedMin)) { - break; - } - - if (procs.size() >= MAX_DISPATCHED_FRAMES) { - break; - } - } + lha.useResources(proc.coresReserved, proc.memoryReserved, proc.gpusReserved, + proc.gpuMemoryReserved); + if (!lha.hasAdditionalResources(lha.getThreads() * 100, memReservedMin, + Dispatcher.GPU_UNITS_RESERVED_MIN, memGpuReservedMin)) { + break; } - if (procs.size() == 0) { - bookingManager.removeInactiveLocalHostAssignment(lha); + if (procs.size() >= MAX_DISPATCHED_FRAMES) { + break; } - - return procs; - } - - @Override - public List dispatchHost(DispatchHost host, JobInterface job) { - /* - * Load up the local assignment. If one doesn't exist, that means - * the user has removed it and no booking action should be taken. - */ - LocalHostAssignment lha = bookingManager.getLocalHostAssignment(host.getHostId(), - job.getJobId()); - prepHost(host, lha); - - return dispatchHost(host, job, lha); + } } - private List dispatchHost(DispatchHost host, LayerInterface layer, - LocalHostAssignment lha) { - - List procs = new ArrayList(MAX_DISPATCHED_FRAMES); - /* - * Grab a list of frames to dispatch. - */ - List frames = dispatchSupport.findNextDispatchFrames( - layer, host, MAX_QUERY_FRAMES); - - logger.info("Frames found: " + frames.size() + " for host " + - host.getName() + " " + host.idleCores + "/" + host.idleMemory + - " on layer " + layer); - - for (DispatchFrame frame: frames) { - - /* - * Check if we have enough memory/cores for this frame, if - * not move on. - */ - if (!lha.hasAdditionalResources(lha.getThreads() * 100, - frame.getMinMemory(), - frame.minGpus, - frame.minGpuMemory)) { - continue; - } - - /* - * Create our virtual proc. - */ - VirtualProc proc = VirtualProc.build(host, frame, lha); - - /* - * Double check if the layer we're booking has pending frames. - */ - if (!dispatchSupport.hasPendingFrames(layer)) { - break; - } - - /* - * Dispatch the frame. If a frame is booked, dispatchHost returns true, - * else if returns false. If the dispatch fails in a way - * that we should stop dispatching immediately (the host is down), - * a DispatcherException is thrown. - */ - if (dispatchHost(frame, proc)) { - - procs.add(proc); - - long memReservedMin = env.getRequiredProperty( - "dispatcher.memory.mem_reserved_min", - Long.class); - long memGpuReservedMin = env.getRequiredProperty( - "dispatcher.memory.mem_gpu_reserved_min", - Long.class); - - /* - * This should stay here and not go into VirtualProc - * or else the count will be off if you fail to book. - */ - lha.useResources(proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved); - if (!lha.hasAdditionalResources(100, - memReservedMin, - Dispatcher.GPU_UNITS_RESERVED_MIN, - memGpuReservedMin)) { - break; - } - - if (procs.size() >= MAX_DISPATCHED_FRAMES) { - break; - } - } - } - - if (procs.size() == 0) { - bookingManager.removeInactiveLocalHostAssignment(lha); - } - - return procs; + if (procs.size() == 0) { + bookingManager.removeInactiveLocalHostAssignment(lha); } - @Override - public List dispatchHost(DispatchHost host, LayerInterface layer) { + return procs; + } - /* - * Load up the local assignment. If one doesn't exist, that means - * the user has removed it and no booking action should be taken. - */ - - LocalHostAssignment lha = bookingManager.getLocalHostAssignment(host.getHostId(), - layer.getJobId()); - prepHost(host, lha); - - return dispatchHost(host, layer, lha); - } - - private List dispatchHost(DispatchHost host, FrameInterface frame, - LocalHostAssignment lha) { + @Override + public List dispatchHost(DispatchHost host, JobInterface job) { + /* + * Load up the local assignment. If one doesn't exist, that means the user has removed it and no + * booking action should be taken. + */ + LocalHostAssignment lha = + bookingManager.getLocalHostAssignment(host.getHostId(), job.getJobId()); + prepHost(host, lha); - List procs = new ArrayList(1); + return dispatchHost(host, job, lha); + } - /* - * Grab a dispatch frame record for the frame we want to dispatch. - */ - DispatchFrame dframe = jobManager.getDispatchFrame(frame.getId()); - if (!lha.hasAdditionalResources(lha.getMaxCoreUnits(), - dframe.getMinMemory(), - lha.getMaxGpuUnits(), - dframe.minGpuMemory)) { - return procs; - } + private List dispatchHost(DispatchHost host, LayerInterface layer, + LocalHostAssignment lha) { - VirtualProc proc = VirtualProc.build(host, dframe, lha); + List procs = new ArrayList(MAX_DISPATCHED_FRAMES); + /* + * Grab a list of frames to dispatch. + */ + List frames = + dispatchSupport.findNextDispatchFrames(layer, host, MAX_QUERY_FRAMES); + + logger.info("Frames found: " + frames.size() + " for host " + host.getName() + " " + + host.idleCores + "/" + host.idleMemory + " on layer " + layer); + + for (DispatchFrame frame : frames) { + + /* + * Check if we have enough memory/cores for this frame, if not move on. + */ + if (!lha.hasAdditionalResources(lha.getThreads() * 100, frame.getMinMemory(), frame.minGpus, + frame.minGpuMemory)) { + continue; + } + + /* + * Create our virtual proc. + */ + VirtualProc proc = VirtualProc.build(host, frame, lha); + + /* + * Double check if the layer we're booking has pending frames. + */ + if (!dispatchSupport.hasPendingFrames(layer)) { + break; + } + + /* + * Dispatch the frame. If a frame is booked, dispatchHost returns true, else if returns false. + * If the dispatch fails in a way that we should stop dispatching immediately (the host is + * down), a DispatcherException is thrown. + */ + if (dispatchHost(frame, proc)) { + + procs.add(proc); + + long memReservedMin = + env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); + long memGpuReservedMin = + env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_min", Long.class); /* - * Dispatch the frame. If a frame is booked, dispatchHost returns true, - * else if returns false. If the dispatch fails in a way - * that we should stop dispatching immediately (the host is down), - * a DispatcherException is thrown. + * This should stay here and not go into VirtualProc or else the count will be off if you + * fail to book. */ - if (dispatchHost(dframe, proc)) { - procs.add(proc); + lha.useResources(proc.coresReserved, proc.memoryReserved, proc.gpusReserved, + proc.gpuMemoryReserved); + if (!lha.hasAdditionalResources(100, memReservedMin, Dispatcher.GPU_UNITS_RESERVED_MIN, + memGpuReservedMin)) { + break; } - if (procs.size() == 0) { - bookingManager.removeInactiveLocalHostAssignment(lha); + if (procs.size() >= MAX_DISPATCHED_FRAMES) { + break; } + } + } - return procs; + if (procs.size() == 0) { + bookingManager.removeInactiveLocalHostAssignment(lha); } - public List dispatchHost(DispatchHost host, FrameInterface frame) { - /* - * Load up the local assignment. If one doesn't exist, that means - * the user has removed it and no booking action should be taken. - */ + return procs; + } - LocalHostAssignment lha = bookingManager.getLocalHostAssignment(host.getHostId(), - frame.getJobId()); - prepHost(host, lha); + @Override + public List dispatchHost(DispatchHost host, LayerInterface layer) { - return dispatchHost(host, frame, lha); - } + /* + * Load up the local assignment. If one doesn't exist, that means the user has removed it and no + * booking action should be taken. + */ - @Override - public void dispatchProcToJob(VirtualProc proc, JobInterface job) { + LocalHostAssignment lha = + bookingManager.getLocalHostAssignment(host.getHostId(), layer.getJobId()); + prepHost(host, lha); - LocalHostAssignment lha = null; - proc.isLocalDispatch = true; + return dispatchHost(host, layer, lha); + } - try { - lha = bookingManager.getLocalHostAssignment(proc.getHostId(), - job.getJobId()); - } catch (EmptyResultDataAccessException e) { - logger.warn("Unable to find local host assignment for " + proc); - dispatchSupport.unbookProc(proc); - return; - } + private List dispatchHost(DispatchHost host, FrameInterface frame, + LocalHostAssignment lha) { - List frames = null; - switch(lha.getType()) { - case JOB_PARTITION: - frames = dispatchSupport.findNextDispatchFrames(job, - proc, MAX_QUERY_FRAMES); - if (frames.size() == 0) { - dispatchSupport.unbookProc(proc); - dispatchHost(hostManager.getDispatchHost(proc.getHostId()), job); - return; - } + List procs = new ArrayList(1); - break; + /* + * Grab a dispatch frame record for the frame we want to dispatch. + */ + DispatchFrame dframe = jobManager.getDispatchFrame(frame.getId()); + if (!lha.hasAdditionalResources(lha.getMaxCoreUnits(), dframe.getMinMemory(), + lha.getMaxGpuUnits(), dframe.minGpuMemory)) { + return procs; + } - case LAYER_PARTITION: - frames = dispatchSupport.findNextDispatchFrames( - jobManager.getLayer(proc.getLayerId()), - proc, MAX_QUERY_FRAMES); - break; + VirtualProc proc = VirtualProc.build(host, dframe, lha); - case FRAME_PARTITION: + /* + * Dispatch the frame. If a frame is booked, dispatchHost returns true, else if returns false. + * If the dispatch fails in a way that we should stop dispatching immediately (the host is + * down), a DispatcherException is thrown. + */ + if (dispatchHost(dframe, proc)) { + procs.add(proc); + } - DispatchFrame dispatchFrame = - jobManager.getDispatchFrame(lha.getFrameId()); - frames = new ArrayList(1); + if (procs.size() == 0) { + bookingManager.removeInactiveLocalHostAssignment(lha); + } - if (dispatchFrame.state.equals(FrameState.WAITING)) { - frames.add(dispatchFrame); - } - break; + return procs; + } - default: - throw new DispatcherException( - "Invalid local host assignment: " + lha.getType()); + public List dispatchHost(DispatchHost host, FrameInterface frame) { + /* + * Load up the local assignment. If one doesn't exist, that means the user has removed it and no + * booking action should be taken. + */ - } + LocalHostAssignment lha = + bookingManager.getLocalHostAssignment(host.getHostId(), frame.getJobId()); + prepHost(host, lha); - logger.info("Frames found: " + frames.size() + " for host " + - proc + " " + proc.coresReserved + "/" + proc.memoryReserved + - " on job " + job.getName()); + return dispatchHost(host, frame, lha); + } - for (DispatchFrame frame: frames) { - if (dispatchProc(frame, proc)) { - return; - } - } + @Override + public void dispatchProcToJob(VirtualProc proc, JobInterface job) { - dispatchSupport.unbookProc(proc); - } + LocalHostAssignment lha = null; + proc.isLocalDispatch = true; - /** - * Copy the local host assignment into the DispatchHost - * - * @param host - * @param lha - */ - private void prepHost(DispatchHost host, LocalHostAssignment lha) { - host.isLocalDispatch = true; - host.idleCores = lha.getIdleCoreUnits(); - host.idleMemory = lha.getIdleMemory(); - host.idleGpus = lha.getIdleGpuUnits(); - host.idleGpuMemory = lha.getIdleGpuMemory(); + try { + lha = bookingManager.getLocalHostAssignment(proc.getHostId(), job.getJobId()); + } catch (EmptyResultDataAccessException e) { + logger.warn("Unable to find local host assignment for " + proc); + dispatchSupport.unbookProc(proc); + return; } + List frames = null; + switch (lha.getType()) { + case JOB_PARTITION: + frames = dispatchSupport.findNextDispatchFrames(job, proc, MAX_QUERY_FRAMES); + if (frames.size() == 0) { + dispatchSupport.unbookProc(proc); + dispatchHost(hostManager.getDispatchHost(proc.getHostId()), job); + return; + } - @Override - public List dispatchHost(DispatchHost host, ShowInterface show) { - throw new RuntimeException("not implemented"); - } + break; - @Override - public List dispatchHost(DispatchHost host, GroupInterface g) { - throw new RuntimeException("not implemented"); - } + case LAYER_PARTITION: + frames = dispatchSupport.findNextDispatchFrames(jobManager.getLayer(proc.getLayerId()), + proc, MAX_QUERY_FRAMES); + break; - public JobManager getJobManager() { - return jobManager; - } + case FRAME_PARTITION: + DispatchFrame dispatchFrame = jobManager.getDispatchFrame(lha.getFrameId()); + frames = new ArrayList(1); - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } + if (dispatchFrame.state.equals(FrameState.WAITING)) { + frames.add(dispatchFrame); + } + break; + default: + throw new DispatcherException("Invalid local host assignment: " + lha.getType()); - public BookingManager getBookingManager() { - return bookingManager; } + logger.info("Frames found: " + frames.size() + " for host " + proc + " " + proc.coresReserved + + "/" + proc.memoryReserved + " on job " + job.getName()); - public void setBookingManager(BookingManager bookingManager) { - this.bookingManager = bookingManager; + for (DispatchFrame frame : frames) { + if (dispatchProc(frame, proc)) { + return; + } } - public HostManager getHostManager() { - return hostManager; - } - - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; - } + dispatchSupport.unbookProc(proc); + } + + /** + * Copy the local host assignment into the DispatchHost + * + * @param host + * @param lha + */ + private void prepHost(DispatchHost host, LocalHostAssignment lha) { + host.isLocalDispatch = true; + host.idleCores = lha.getIdleCoreUnits(); + host.idleMemory = lha.getIdleMemory(); + host.idleGpus = lha.getIdleGpuUnits(); + host.idleGpuMemory = lha.getIdleGpuMemory(); + } + + @Override + public List dispatchHost(DispatchHost host, ShowInterface show) { + throw new RuntimeException("not implemented"); + } + + @Override + public List dispatchHost(DispatchHost host, GroupInterface g) { + throw new RuntimeException("not implemented"); + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public BookingManager getBookingManager() { + return bookingManager; + } + + public void setBookingManager(BookingManager bookingManager) { + this.bookingManager = bookingManager; + } + + public HostManager getHostManager() { + return hostManager; + } + + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueHealthCheck.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueHealthCheck.java index a2df5dede..ecb7f4c88 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueHealthCheck.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueHealthCheck.java @@ -1,7 +1,7 @@ package com.imageworks.spcue.dispatcher; public interface QueueHealthCheck { - boolean isHealthy(); + boolean isHealthy(); - void shutdownUnhealthy(); + void shutdownUnhealthy(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueRejectCounter.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueRejectCounter.java index bb3d00716..6b3757c01 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueRejectCounter.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueRejectCounter.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; import java.util.concurrent.RejectedExecutionHandler; @@ -25,20 +21,19 @@ public class QueueRejectCounter implements RejectedExecutionHandler { - private AtomicLong rejectCounter = new AtomicLong(0); + private AtomicLong rejectCounter = new AtomicLong(0); - @Override - public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { - rejectCounter.getAndIncrement(); - } + @Override + public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { + rejectCounter.getAndIncrement(); + } - public long getRejectCount() { - return rejectCounter.get(); - } + public long getRejectCount() { + return rejectCounter.get(); + } - public void clear() { - rejectCounter.set(0); - } + public void clear() { + rejectCounter.set(0); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RedirectManager.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RedirectManager.java index ff0a34def..5a7b9b8c7 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RedirectManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RedirectManager.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.dispatcher; import java.util.ArrayList; @@ -47,404 +44,374 @@ import com.imageworks.spcue.util.CueExceptionUtil; import com.imageworks.spcue.util.SqlUtil; - public class RedirectManager { - private static final Logger logger = LogManager.getLogger(RedirectManager.class); - - private JobDao jobDao; - private ProcDao procDao; - private GroupDao groupDao; - private Dispatcher dispatcher; - private BookingQueue bookingQueue; - private HostManager hostManager; - private JobManagerSupport jobManagerSupport; - private DispatchSupport dispatchSupport; - private RedirectService redirectService; - private ProcSearchFactory procSearchFactory; - private Environment env; - - @Autowired - public RedirectManager(RedirectService redirectService, Environment env) { - this.env = env; - this.redirectService = redirectService; + private static final Logger logger = LogManager.getLogger(RedirectManager.class); + + private JobDao jobDao; + private ProcDao procDao; + private GroupDao groupDao; + private Dispatcher dispatcher; + private BookingQueue bookingQueue; + private HostManager hostManager; + private JobManagerSupport jobManagerSupport; + private DispatchSupport dispatchSupport; + private RedirectService redirectService; + private ProcSearchFactory procSearchFactory; + private Environment env; + + @Autowired + public RedirectManager(RedirectService redirectService, Environment env) { + this.env = env; + this.redirectService = redirectService; + } + + /** + * Delete all redirects that are past expiration age. + * + * @return count of redirects deleted + */ + public int deleteExpired() { + return redirectService.deleteExpired(); + } + + /** + * Remove a redirect for a specific proc. + * + * @param proc + */ + public boolean removeRedirect(ProcInterface proc) { + procDao.setRedirectTarget(proc, null); + return redirectService.remove(proc.getProcId()) != null; + } + + /** + * Return true if a redirect for a specific Proc exists. False if it does not. + * + * @param proc + * @return + */ + public boolean hasRedirect(ProcInterface proc) { + return redirectService.containsKey(proc.getProcId()); + } + + /** + * Redirects procs found by the ProcSearchCriteria to the specified group. + * + * @param criteria + * @param group + * @param kill + * @param source + * @return + */ + public List addRedirect(ProcSearchCriteria criteria, GroupInterface group, + boolean kill, Source source) { + + List groups = new ArrayList(1); + groups.add(group); + + ProcSearchInterface search = procSearchFactory.create(criteria); + search.sortByBookedTime(); + search.notGroups(groups); + + List procs = hostManager.findBookedVirtualProcs(search); + if (procs.size() == 0) { + return procs; } - /** - * Delete all redirects that are past expiration age. - * - * @return count of redirects deleted - */ - public int deleteExpired() { - return redirectService.deleteExpired(); - } + for (VirtualProc proc : procs) { + logger.info("Adding redirect from " + proc + " to group " + group.getName()); - /** - * Remove a redirect for a specific proc. - * - * @param proc - */ - public boolean removeRedirect(ProcInterface proc) { - procDao.setRedirectTarget(proc, null); - return redirectService.remove(proc.getProcId()) != null; + Redirect r = new Redirect(group); + if (procDao.setRedirectTarget(proc, r)) { + redirectService.put(proc.getProcId(), r); + } else { + procs.remove(proc); + } } - /** - * Return true if a redirect for a specific Proc - * exists. False if it does not. - * - * @param proc - * @return - */ - public boolean hasRedirect(ProcInterface proc) { - return redirectService.containsKey(proc.getProcId()); + if (kill) { + jobManagerSupport.kill(procs, source); } - /** - * Redirects procs found by the ProcSearchCriteria - * to the specified group. - * - * @param criteria - * @param group - * @param kill - * @param source - * @return - */ - public List addRedirect(ProcSearchCriteria criteria, - GroupInterface group, boolean kill, Source source) { - - List groups = new ArrayList(1); - groups.add(group); - - ProcSearchInterface search = procSearchFactory.create(criteria); - search.sortByBookedTime(); - search.notGroups(groups); - - List procs = hostManager.findBookedVirtualProcs(search); - if (procs.size() == 0) { - return procs; - } - - for (VirtualProc proc: procs) { - logger.info("Adding redirect from " + proc + " to group " - + group.getName()); - - Redirect r = new Redirect(group); - if (procDao.setRedirectTarget(proc, r)) { - redirectService.put(proc.getProcId(), r); - } - else { - procs.remove(proc); - } - } - - if (kill) { - jobManagerSupport.kill(procs, source); - } - - return procs; + return procs; + } + + /** + * Redirects procs found by the proc search criteria to an array of jobs. + * + * @param criteria + * @param jobs + * @param kill + * @param source + * @return + */ + public List addRedirect(ProcSearchCriteria criteria, List jobs, + boolean kill, Source source) { + int index = 0; + + ProcSearchInterface procSearch = procSearchFactory.create(criteria); + procSearch.notJobs(jobs); + List procs = hostManager.findBookedVirtualProcs(procSearch); + if (procs.size() == 0) { + return procs; } - /** - * Redirects procs found by the proc search criteria to - * an array of jobs. - * - * @param criteria - * @param jobs - * @param kill - * @param source - * @return - */ - public List addRedirect(ProcSearchCriteria criteria, - List jobs, boolean kill, Source source) { - int index = 0; - - ProcSearchInterface procSearch = procSearchFactory.create(criteria); - procSearch.notJobs(jobs); - List procs = hostManager.findBookedVirtualProcs(procSearch); - if (procs.size() == 0) { - return procs; - } - - for (VirtualProc proc: procs) { - if (index >= jobs.size()) { - index = 0; - } - - logger.info("Adding redirect from " + proc + " to job " - + jobs.get(index).getName()); - - Redirect r = new Redirect(jobs.get(index)); - if (procDao.setRedirectTarget(proc, r)) { - redirectService.put(proc.getProcId(), r); - index++; - } - else { - procs.remove(proc); - } - } - if (kill) { - jobManagerSupport.kill(procs, source); - } - - return procs; - } - /** - * Redirect a list of procs to the specified job. Using - * redirect counters, the redirect only happens one - * all procs have reported in. This gives users the - * ability to kill multiple frames and open up large - * amounts of memory and cores. - * - * @param procs - * @param job - * @param source - * @return true if the redirect succeeds. - */ - public boolean addRedirect(List procs, JobInterface job, - Source source) { - - String redirectGroupId = SqlUtil.genKeyRandom(); - - for (VirtualProc proc: procs) { - Redirect r = new Redirect(redirectGroupId, job); - if (procDao.setRedirectTarget(proc, r)) { - redirectService.put(proc.getProcId(), r); - } - } - - for (VirtualProc proc: procs) { - jobManagerSupport.kill(proc, source); - } - - return true; - } + for (VirtualProc proc : procs) { + if (index >= jobs.size()) { + index = 0; + } - /** - * Redirect a proc to the specified job. - * - * @param proc - * @param job - * @param kill - * @param source - * @return true if the redirect succeeds. - */ - public boolean addRedirect(VirtualProc proc, JobInterface job, - boolean kill, Source source) { - - if (dispatchSupport.findNextDispatchFrames( - job, proc, 1).size() < 1) { - return false; - } - - Redirect r = new Redirect(job); - if (procDao.setRedirectTarget(proc, r)) { - redirectService.put(proc.getProcId(), r); - if (kill) { - jobManagerSupport.kill(proc, source); - } - return true; - } + logger.info("Adding redirect from " + proc + " to job " + jobs.get(index).getName()); - return false; + Redirect r = new Redirect(jobs.get(index)); + if (procDao.setRedirectTarget(proc, r)) { + redirectService.put(proc.getProcId(), r); + index++; + } else { + procs.remove(proc); + } } - - /** - * Redirect a proc to the specified group. - * - * @param proc - * @param group - * @param kill - * @param source - * @return true if the redirect succeeds. - */ - public boolean addRedirect(VirtualProc proc, GroupInterface group, - boolean kill, Source source) { - - // Test a dispatch - DispatchHost host = hostManager.getDispatchHost(proc.getHostId()); - host.idleCores = proc.coresReserved; - host.idleMemory = proc.memoryReserved; - host.idleGpus = proc.gpusReserved; - host.idleGpuMemory = proc.gpuMemoryReserved; - - if (dispatchSupport.findDispatchJobs(host, group).size() < 1) { - logger.info("Failed to find a pending job in group: " + group.getName()); - return false; - } - - Redirect r = new Redirect(group); - if (procDao.setRedirectTarget(proc, r)) { - redirectService.put(proc.getProcId(), r); - if (kill) { - jobManagerSupport.kill(proc, source); - } - return true; - } - - return false; + if (kill) { + jobManagerSupport.kill(procs, source); } - /** - * Redirect the specified proc to its redirect - * destination; - * - * @param proc - * @return - */ - public boolean redirect(VirtualProc proc) { - - try { - - Redirect r = redirectService.remove(proc.getProcId()); - if (r == null) { - logger.info("Failed to find redirect for proc " + proc); - return false; - } - - int other_redirects_with_same_group = - redirectService.countRedirectsWithGroup(r.getGroupId()); - - if (other_redirects_with_same_group > 0) { - logger.warn("Redirect waiting on " + other_redirects_with_same_group + " more frames."); - return false; - } - - /* - * The proc must be unbooked before its resources can be - * redirected. - */ - dispatchSupport.unbookProc(proc, "is being redirected"); - - /* - * Set the free cores and memory to the exact amount - * on the proc we just unbooked so we don't stomp on - * other redirects. - */ - DispatchHost host = hostManager.getDispatchHost( - proc.getHostId()); - - switch (r.getType()) { - - case JOB_REDIRECT: - logger.info("attempting a job redirect to " + - r.getDestinationId()); - JobInterface job = jobDao.getJob(r.getDestinationId()); - logger.info("redirecting proc " + proc - + " to job " + job.getName()); - - if (dispatcher.isTestMode()) { - dispatcher.dispatchHost(host, job); - } - else { - bookingQueue.execute(new - DispatchBookHost(host, job, dispatcher, env)); - } - return true; - - case GROUP_REDIRECT: - logger.info("attempting a group redirect to " + - r.getDestinationId()); - GroupInterface group = groupDao.getGroup(r.getDestinationId()); - logger.info("redirecting group " + proc + - " to job " + group.getName()); - - if (dispatcher.isTestMode()) { - dispatcher.dispatchHost(host, group); - } - else { - bookingQueue.execute(new DispatchBookHost(host, - group, dispatcher, env)); - } - return true; - - default: - logger.info("redirect failed, invalid redirect type: " - + r.getType()); - return false; - } - - } - catch (Exception e) { - /* - * If anything fails the redirect fails, so just - * return false after logging. - */ - CueExceptionUtil.logStackTrace("redirect failed", e); - return false; - } + return procs; + } + + /** + * Redirect a list of procs to the specified job. Using redirect counters, the redirect only + * happens one all procs have reported in. This gives users the ability to kill multiple frames + * and open up large amounts of memory and cores. + * + * @param procs + * @param job + * @param source + * @return true if the redirect succeeds. + */ + public boolean addRedirect(List procs, JobInterface job, Source source) { + + String redirectGroupId = SqlUtil.genKeyRandom(); + + for (VirtualProc proc : procs) { + Redirect r = new Redirect(redirectGroupId, job); + if (procDao.setRedirectTarget(proc, r)) { + redirectService.put(proc.getProcId(), r); + } } - public JobDao getJobDao() { - return jobDao; + for (VirtualProc proc : procs) { + jobManagerSupport.kill(proc, source); } - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; + return true; + } + + /** + * Redirect a proc to the specified job. + * + * @param proc + * @param job + * @param kill + * @param source + * @return true if the redirect succeeds. + */ + public boolean addRedirect(VirtualProc proc, JobInterface job, boolean kill, Source source) { + + if (dispatchSupport.findNextDispatchFrames(job, proc, 1).size() < 1) { + return false; } - public GroupDao getGroupDao() { - return groupDao; + Redirect r = new Redirect(job); + if (procDao.setRedirectTarget(proc, r)) { + redirectService.put(proc.getProcId(), r); + if (kill) { + jobManagerSupport.kill(proc, source); + } + return true; } - public void setGroupDao(GroupDao groupDao) { - this.groupDao = groupDao; + return false; + } + + /** + * Redirect a proc to the specified group. + * + * @param proc + * @param group + * @param kill + * @param source + * @return true if the redirect succeeds. + */ + public boolean addRedirect(VirtualProc proc, GroupInterface group, boolean kill, Source source) { + + // Test a dispatch + DispatchHost host = hostManager.getDispatchHost(proc.getHostId()); + host.idleCores = proc.coresReserved; + host.idleMemory = proc.memoryReserved; + host.idleGpus = proc.gpusReserved; + host.idleGpuMemory = proc.gpuMemoryReserved; + + if (dispatchSupport.findDispatchJobs(host, group).size() < 1) { + logger.info("Failed to find a pending job in group: " + group.getName()); + return false; } - public Dispatcher getDispatcher() { - return dispatcher; + Redirect r = new Redirect(group); + if (procDao.setRedirectTarget(proc, r)) { + redirectService.put(proc.getProcId(), r); + if (kill) { + jobManagerSupport.kill(proc, source); + } + return true; } - public void setDispatcher(Dispatcher dispatcher) { - this.dispatcher = dispatcher; - } + return false; + } - public BookingQueue getBookingQueue() { - return bookingQueue; - } + /** + * Redirect the specified proc to its redirect destination; + * + * @param proc + * @return + */ + public boolean redirect(VirtualProc proc) { - public void setBookingQueue(BookingQueue bookingQueue) { - this.bookingQueue = bookingQueue; - } + try { - public HostManager getHostManager() { - return hostManager; - } + Redirect r = redirectService.remove(proc.getProcId()); + if (r == null) { + logger.info("Failed to find redirect for proc " + proc); + return false; + } - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; - } + int other_redirects_with_same_group = redirectService.countRedirectsWithGroup(r.getGroupId()); - public JobManagerSupport getJobManagerSupport() { - return jobManagerSupport; + if (other_redirects_with_same_group > 0) { + logger.warn("Redirect waiting on " + other_redirects_with_same_group + " more frames."); + return false; + } + + /* + * The proc must be unbooked before its resources can be redirected. + */ + dispatchSupport.unbookProc(proc, "is being redirected"); + + /* + * Set the free cores and memory to the exact amount on the proc we just unbooked so we don't + * stomp on other redirects. + */ + DispatchHost host = hostManager.getDispatchHost(proc.getHostId()); + + switch (r.getType()) { + + case JOB_REDIRECT: + logger.info("attempting a job redirect to " + r.getDestinationId()); + JobInterface job = jobDao.getJob(r.getDestinationId()); + logger.info("redirecting proc " + proc + " to job " + job.getName()); + + if (dispatcher.isTestMode()) { + dispatcher.dispatchHost(host, job); + } else { + bookingQueue.execute(new DispatchBookHost(host, job, dispatcher, env)); + } + return true; + + case GROUP_REDIRECT: + logger.info("attempting a group redirect to " + r.getDestinationId()); + GroupInterface group = groupDao.getGroup(r.getDestinationId()); + logger.info("redirecting group " + proc + " to job " + group.getName()); + + if (dispatcher.isTestMode()) { + dispatcher.dispatchHost(host, group); + } else { + bookingQueue.execute(new DispatchBookHost(host, group, dispatcher, env)); + } + return true; + + default: + logger.info("redirect failed, invalid redirect type: " + r.getType()); + return false; + } + + } catch (Exception e) { + /* + * If anything fails the redirect fails, so just return false after logging. + */ + CueExceptionUtil.logStackTrace("redirect failed", e); + return false; } + } - public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { - this.jobManagerSupport = jobManagerSupport; - } + public JobDao getJobDao() { + return jobDao; + } - public DispatchSupport getDispatchSupport() { - return dispatchSupport; - } + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } - public void setDispatchSupport(DispatchSupport dispatchSupport) { - this.dispatchSupport = dispatchSupport; - } + public GroupDao getGroupDao() { + return groupDao; + } - public ProcDao getProcDao() { - return procDao; - } + public void setGroupDao(GroupDao groupDao) { + this.groupDao = groupDao; + } - public void setProcDao(ProcDao procDao) { - this.procDao = procDao; - } + public Dispatcher getDispatcher() { + return dispatcher; + } - public ProcSearchFactory getProcSearchFactory() { - return procSearchFactory; - } + public void setDispatcher(Dispatcher dispatcher) { + this.dispatcher = dispatcher; + } - public void setProcSearchFactory(ProcSearchFactory procSearchFactory) { - this.procSearchFactory = procSearchFactory; - } -} + public BookingQueue getBookingQueue() { + return bookingQueue; + } + + public void setBookingQueue(BookingQueue bookingQueue) { + this.bookingQueue = bookingQueue; + } + + public HostManager getHostManager() { + return hostManager; + } + + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; + } + public JobManagerSupport getJobManagerSupport() { + return jobManagerSupport; + } + + public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { + this.jobManagerSupport = jobManagerSupport; + } + + public DispatchSupport getDispatchSupport() { + return dispatchSupport; + } + + public void setDispatchSupport(DispatchSupport dispatchSupport) { + this.dispatchSupport = dispatchSupport; + } + + public ProcDao getProcDao() { + return procDao; + } + + public void setProcDao(ProcDao procDao) { + this.procDao = procDao; + } + + public ProcSearchFactory getProcSearchFactory() { + return procSearchFactory; + } + + public void setProcSearchFactory(ProcSearchFactory procSearchFactory) { + this.procSearchFactory = procSearchFactory; + } +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceContainer.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceContainer.java index 0d1141bc1..879dad3fb 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceContainer.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceContainer.java @@ -2,46 +2,41 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; public interface ResourceContainer { - /** - * Return true if the container can handle the given resource amounts. False - * if not. - * - * @param minCores - * @param minMemory - * @param minGpus - * @param minGpuMemory - * @return - */ - public boolean hasAdditionalResources(int minCores, long minMemory, int minGpus, long minGpuMemory); - - /** - * Subtract the given resources from the grand totals. - * - * @param coreUnits - * @param memory - * @param gpuUnits - * @param gpuMemory - */ - public void useResources(int coreUnits, long memory, int gpuUnits, long gpuMemory); + /** + * Return true if the container can handle the given resource amounts. False if not. + * + * @param minCores + * @param minMemory + * @param minGpus + * @param minGpuMemory + * @return + */ + public boolean hasAdditionalResources(int minCores, long minMemory, int minGpus, + long minGpuMemory); + + /** + * Subtract the given resources from the grand totals. + * + * @param coreUnits + * @param memory + * @param gpuUnits + * @param gpuMemory + */ + public void useResources(int coreUnits, long memory, int gpuUnits, long gpuMemory); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceCreationFailureException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceCreationFailureException.java index 1907b75ae..b83f3f1df 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceCreationFailureException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceCreationFailureException.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; import com.imageworks.spcue.SpcueRuntimeException; @@ -24,24 +20,23 @@ @SuppressWarnings("serial") public class ResourceCreationFailureException extends SpcueRuntimeException { - public ResourceCreationFailureException() { - // TODO Auto-generated constructor stub - } + public ResourceCreationFailureException() { + // TODO Auto-generated constructor stub + } - public ResourceCreationFailureException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public ResourceCreationFailureException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public ResourceCreationFailureException(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public ResourceCreationFailureException(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public ResourceCreationFailureException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public ResourceCreationFailureException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceDuplicationFailureException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceDuplicationFailureException.java index ec80912e9..4b502f021 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceDuplicationFailureException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceDuplicationFailureException.java @@ -2,50 +2,44 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; import com.imageworks.spcue.SpcueRuntimeException; /** - * Thrown when a resource is created for an enity which already - * has a resource assigned to it. + * Thrown when a resource is created for an enity which already has a resource assigned to it. */ @SuppressWarnings("serial") public class ResourceDuplicationFailureException extends SpcueRuntimeException { - public ResourceDuplicationFailureException() { - // TODO Auto-generated constructor stub - } + public ResourceDuplicationFailureException() { + // TODO Auto-generated constructor stub + } - public ResourceDuplicationFailureException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public ResourceDuplicationFailureException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public ResourceDuplicationFailureException(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public ResourceDuplicationFailureException(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public ResourceDuplicationFailureException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public ResourceDuplicationFailureException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceReleaseFailureException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceReleaseFailureException.java index f3bad7ff3..aa2cb5212 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceReleaseFailureException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceReleaseFailureException.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; import com.imageworks.spcue.SpcueRuntimeException; @@ -24,24 +20,23 @@ @SuppressWarnings("serial") public class ResourceReleaseFailureException extends SpcueRuntimeException { - public ResourceReleaseFailureException() { - // TODO Auto-generated constructor stub - } + public ResourceReleaseFailureException() { + // TODO Auto-generated constructor stub + } - public ResourceReleaseFailureException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public ResourceReleaseFailureException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public ResourceReleaseFailureException(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public ResourceReleaseFailureException(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public ResourceReleaseFailureException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public ResourceReleaseFailureException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceReservationFailureException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceReservationFailureException.java index 03bfa8fc9..37f0fdf21 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceReservationFailureException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceReservationFailureException.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; import com.imageworks.spcue.SpcueRuntimeException; @@ -24,24 +20,23 @@ @SuppressWarnings("serial") public class ResourceReservationFailureException extends SpcueRuntimeException { - public ResourceReservationFailureException() { - // TODO Auto-generated constructor stub - } + public ResourceReservationFailureException() { + // TODO Auto-generated constructor stub + } - public ResourceReservationFailureException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public ResourceReservationFailureException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public ResourceReservationFailureException(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public ResourceReservationFailureException(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public ResourceReservationFailureException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public ResourceReservationFailureException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RqdRetryReportException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RqdRetryReportException.java index 5677026e8..d8e3992bb 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RqdRetryReportException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RqdRetryReportException.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher; import com.imageworks.spcue.SpcueRuntimeException; @@ -24,24 +20,23 @@ @SuppressWarnings("serial") public class RqdRetryReportException extends SpcueRuntimeException { - public RqdRetryReportException() { - // TODO Auto-generated constructor stub - } + public RqdRetryReportException() { + // TODO Auto-generated constructor stub + } - public RqdRetryReportException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public RqdRetryReportException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public RqdRetryReportException(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public RqdRetryReportException(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public RqdRetryReportException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public RqdRetryReportException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHost.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHost.java index 77b414dcb..58cf4c568 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHost.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHost.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import org.springframework.beans.factory.annotation.Autowired; @@ -35,103 +31,92 @@ */ public class DispatchBookHost extends KeyRunnable { - private Environment env; - private ShowInterface show = null; - private GroupInterface group = null; - private JobInterface job = null; - private DispatchHost host; - private Dispatcher dispatcher; - - public DispatchHost getDispatchHost() { - this.setKey(host.getId()); - return host; - } - - public DispatchBookHost(DispatchHost host, Dispatcher d, Environment env) { - super(host.getId()); - this.host = host; - this.dispatcher = d; - this.env = env; - } - - public DispatchBookHost(DispatchHost host, JobInterface job, Dispatcher d, Environment env) { - super(host.getId() + "_job_" + job.getJobId()); - this.host = host; - this.job = job; - this.dispatcher = d; - this.env = env; - } - - public DispatchBookHost(DispatchHost host, GroupInterface group, Dispatcher d, Environment env) { - super(host.getId() + "_group_" + group.getGroupId()); - this.host = host; - this.group = group; - this.dispatcher = d; - this.env = env; - } + private Environment env; + private ShowInterface show = null; + private GroupInterface group = null; + private JobInterface job = null; + private DispatchHost host; + private Dispatcher dispatcher; + + public DispatchHost getDispatchHost() { + this.setKey(host.getId()); + return host; + } + + public DispatchBookHost(DispatchHost host, Dispatcher d, Environment env) { + super(host.getId()); + this.host = host; + this.dispatcher = d; + this.env = env; + } + + public DispatchBookHost(DispatchHost host, JobInterface job, Dispatcher d, Environment env) { + super(host.getId() + "_job_" + job.getJobId()); + this.host = host; + this.job = job; + this.dispatcher = d; + this.env = env; + } + + public DispatchBookHost(DispatchHost host, GroupInterface group, Dispatcher d, Environment env) { + super(host.getId() + "_group_" + group.getGroupId()); + this.host = host; + this.group = group; + this.dispatcher = d; + this.env = env; + } + + public DispatchBookHost(DispatchHost host, ShowInterface show, Dispatcher d, Environment env) { + super(host.getId() + "_name_" + show.getName()); + this.host = host; + this.show = show; + this.dispatcher = d; + this.env = env; + } + + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + if (show != null) { + dispatcher.dispatchHost(host, show); + } else if (group != null) { + dispatcher.dispatchHost(host, group); + } else if (job != null) { + dispatcher.dispatchHost(host, job); + } + long memReservedMin = + env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); + long memGpuReservedMin = + env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_min", Long.class); + + // Try to book any remaining resources + if (host.hasAdditionalResources(Dispatcher.CORE_POINTS_RESERVED_MIN, memReservedMin, + Dispatcher.GPU_UNITS_RESERVED_MIN, memGpuReservedMin)) { + dispatcher.dispatchHost(host); + } - public DispatchBookHost(DispatchHost host, ShowInterface show, Dispatcher d, Environment env) { - super(host.getId() + "_name_" + show.getName()); - this.host = host; - this.show = show; - this.dispatcher = d; - this.env = env; + if (host.hasAdditionalResources(Dispatcher.CORE_POINTS_RESERVED_MIN, memReservedMin, + Dispatcher.GPU_UNITS_RESERVED_MIN, memGpuReservedMin)) { + dispatcher.dispatchHostToAllShows(host); + } + } + }.execute(); + } + + @Override + public int hashCode() { + return host.name.hashCode(); + }; + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; } - - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - if (show != null) { - dispatcher.dispatchHost(host, show); - } - else if (group != null) { - dispatcher.dispatchHost(host, group); - } - else if (job != null) { - dispatcher.dispatchHost(host, job); - } - long memReservedMin = env.getRequiredProperty( - "dispatcher.memory.mem_reserved_min", - Long.class); - long memGpuReservedMin = env.getRequiredProperty( - "dispatcher.memory.mem_gpu_reserved_min", - Long.class); - - // Try to book any remaining resources - if (host.hasAdditionalResources( - Dispatcher.CORE_POINTS_RESERVED_MIN, - memReservedMin, - Dispatcher.GPU_UNITS_RESERVED_MIN, - memGpuReservedMin)) { - dispatcher.dispatchHost(host); - } - - if (host.hasAdditionalResources( - Dispatcher.CORE_POINTS_RESERVED_MIN, - memReservedMin, - Dispatcher.GPU_UNITS_RESERVED_MIN, - memGpuReservedMin)) { - dispatcher.dispatchHostToAllShows(host); - } - } - }.execute(); + if (this.getClass() != other.getClass()) { + return false; } - - @Override - public int hashCode() { - return host.name.hashCode(); - }; - - @Override - public boolean equals(Object other) { - if (other == null) { - return false; - } - if (this.getClass() != other.getClass()) { - return false; - } - DispatchBookHost that = (DispatchBookHost) other; - return that.host.name.equals(host.name); - }; + DispatchBookHost that = (DispatchBookHost) other; + return that.host.name.equals(host.name); + }; } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHostLocal.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHostLocal.java index 737541a08..00ec6979c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHostLocal.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHostLocal.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import com.imageworks.spcue.DispatchHost; @@ -24,23 +20,22 @@ public class DispatchBookHostLocal extends KeyRunnable { - private DispatchHost host; - private Dispatcher dispatcher; + private DispatchHost host; + private Dispatcher dispatcher; - public DispatchBookHostLocal(DispatchHost host, Dispatcher d) { - super(host.getId()); + public DispatchBookHostLocal(DispatchHost host, Dispatcher d) { + super(host.getId()); - this.host = host; - this.dispatcher = d; - } + this.host = host; + this.dispatcher = d; + } - @Override - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - dispatcher.dispatchHost(host); - } - }.execute(); - } + @Override + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + dispatcher.dispatchHost(host); + } + }.execute(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchCommandTemplate.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchCommandTemplate.java index d345af959..b0406348e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchCommandTemplate.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchCommandTemplate.java @@ -2,42 +2,36 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import com.imageworks.spcue.util.CueExceptionUtil; /** - * A template that wraps the code within the run() method of each dispatch - * command. + * A template that wraps the code within the run() method of each dispatch command. * * @category command */ public abstract class DispatchCommandTemplate { - public abstract void wrapDispatchCommand(); + public abstract void wrapDispatchCommand(); - public void execute() { - try { - wrapDispatchCommand(); - } catch (java.lang.Throwable t) { - CueExceptionUtil.logStackTrace("Dispatch command template " - + this.getClass().toString() + " caught error ", t); - } + public void execute() { + try { + wrapDispatchCommand(); + } catch (java.lang.Throwable t) { + CueExceptionUtil.logStackTrace( + "Dispatch command template " + this.getClass().toString() + " caught error ", t); } + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchDropDepends.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchDropDepends.java index cf6428f3a..3e9b698b4 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchDropDepends.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchDropDepends.java @@ -2,24 +2,19 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; - import com.imageworks.spcue.FrameInterface; import com.imageworks.spcue.JobInterface; import com.imageworks.spcue.LayerInterface; @@ -34,55 +29,54 @@ */ public class DispatchDropDepends extends KeyRunnable { - JobInterface job; - LayerInterface layer; - FrameInterface frame; + JobInterface job; + LayerInterface layer; + FrameInterface frame; - DependTarget target; - DependManager dependManager; + DependTarget target; + DependManager dependManager; - public DispatchDropDepends(JobInterface job, DependTarget target, DependManager dependManager) { - super("disp_drop_dep_job_" + job.getJobId() + "_" + target.toString()); - this.job = job; - this.target = target; - this.dependManager = dependManager; - } + public DispatchDropDepends(JobInterface job, DependTarget target, DependManager dependManager) { + super("disp_drop_dep_job_" + job.getJobId() + "_" + target.toString()); + this.job = job; + this.target = target; + this.dependManager = dependManager; + } - public DispatchDropDepends(LayerInterface layer, DependTarget target, DependManager dependManager) { - super("disp_drop_dep_layer_" + layer.getLayerId() + "_" + target.toString()); - this.layer = layer; - this.target = target; - this.dependManager = dependManager; - } + public DispatchDropDepends(LayerInterface layer, DependTarget target, + DependManager dependManager) { + super("disp_drop_dep_layer_" + layer.getLayerId() + "_" + target.toString()); + this.layer = layer; + this.target = target; + this.dependManager = dependManager; + } - public DispatchDropDepends(FrameInterface frame, DependTarget target, DependManager dependManager) { - super("disp_drop_dep_frame_" + frame.getFrameId() + "_" + target.toString()); - this.frame = frame; - this.target = target; - this.dependManager = dependManager; - } + public DispatchDropDepends(FrameInterface frame, DependTarget target, + DependManager dependManager) { + super("disp_drop_dep_frame_" + frame.getFrameId() + "_" + target.toString()); + this.frame = frame; + this.target = target; + this.dependManager = dependManager; + } - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - if (job!=null) { - for (LightweightDependency d: dependManager.getWhatThisDependsOn(job, target)) { - dependManager.satisfyDepend(d); - } - } - else if (layer != null) { - for (LightweightDependency d: dependManager.getWhatThisDependsOn(layer, target)) { - dependManager.satisfyDepend(d); - } - } - else if (frame != null) { - for (LightweightDependency d: dependManager.getWhatThisDependsOn(frame, target)) { - dependManager.satisfyDepend(d); - } - } + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + if (job != null) { + for (LightweightDependency d : dependManager.getWhatThisDependsOn(job, target)) { + dependManager.satisfyDepend(d); + } + } else if (layer != null) { + for (LightweightDependency d : dependManager.getWhatThisDependsOn(layer, target)) { + dependManager.satisfyDepend(d); + } + } else if (frame != null) { + for (LightweightDependency d : dependManager.getWhatThisDependsOn(frame, target)) { + dependManager.satisfyDepend(d); + } + } - } - }.execute(); - } + } + }.execute(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchEatFrames.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchEatFrames.java index 32a2acf69..82108912f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchEatFrames.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchEatFrames.java @@ -2,28 +2,23 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import com.imageworks.spcue.Source; import com.imageworks.spcue.dao.criteria.FrameSearchInterface; import com.imageworks.spcue.service.JobManagerSupport; - /** * A command for eating an array of frames * @@ -31,23 +26,23 @@ */ public class DispatchEatFrames extends KeyRunnable { - private FrameSearchInterface search; - private Source source; - private JobManagerSupport jobManagerSupport; - - public DispatchEatFrames(FrameSearchInterface search, Source source, JobManagerSupport jobManagerSupport) { - super("disp_eat_frames_job_" + search.hashCode() + "_" + jobManagerSupport.hashCode()); - this.search = search; - this.source = source; - this.jobManagerSupport = jobManagerSupport; - } - - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - jobManagerSupport.eatFrames(search, source); - } - }.execute(); - } + private FrameSearchInterface search; + private Source source; + private JobManagerSupport jobManagerSupport; + + public DispatchEatFrames(FrameSearchInterface search, Source source, + JobManagerSupport jobManagerSupport) { + super("disp_eat_frames_job_" + search.hashCode() + "_" + jobManagerSupport.hashCode()); + this.search = search; + this.source = source; + this.jobManagerSupport = jobManagerSupport; + } + + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + jobManagerSupport.eatFrames(search, source); + } + }.execute(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchHandleHostReport.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchHandleHostReport.java index 1a18f06ad..47c6553a6 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchHandleHostReport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchHandleHostReport.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import com.imageworks.spcue.dispatcher.HostReportHandler; @@ -30,46 +26,41 @@ */ public class DispatchHandleHostReport extends KeyRunnable { - private HostReport hostReport; - private boolean isBootReport; - private HostReportHandler hostReportHandler; - public volatile int reportTime = (int) (System.currentTimeMillis() / 1000); + private HostReport hostReport; + private boolean isBootReport; + private HostReportHandler hostReportHandler; + public volatile int reportTime = (int) (System.currentTimeMillis() / 1000); - public DispatchHandleHostReport(HostReport report, HostReportHandler rqdReportManager) { - super("disp_handle_host_report_" + report.hashCode() + - "_" + rqdReportManager.hashCode()); - this.hostReport = report; - this.isBootReport = false; - this.hostReportHandler = rqdReportManager; - } + public DispatchHandleHostReport(HostReport report, HostReportHandler rqdReportManager) { + super("disp_handle_host_report_" + report.hashCode() + "_" + rqdReportManager.hashCode()); + this.hostReport = report; + this.isBootReport = false; + this.hostReportHandler = rqdReportManager; + } - public DispatchHandleHostReport(BootReport report, HostReportHandler rqdReportManager) { - super("disp_handle_host_report_" + report.hashCode() + - "_" + rqdReportManager.hashCode()); - HostReport hostReport = HostReport.newBuilder() - .setHost(report.getHost()) - .setCoreInfo(report.getCoreInfo()) - .build(); + public DispatchHandleHostReport(BootReport report, HostReportHandler rqdReportManager) { + super("disp_handle_host_report_" + report.hashCode() + "_" + rqdReportManager.hashCode()); + HostReport hostReport = + HostReport.newBuilder().setHost(report.getHost()).setCoreInfo(report.getCoreInfo()).build(); - this.hostReport = hostReport; - this.isBootReport = true; - this.hostReportHandler = rqdReportManager; - } + this.hostReport = hostReport; + this.isBootReport = true; + this.hostReportHandler = rqdReportManager; + } - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - hostReportHandler.handleHostReport(hostReport, isBootReport); - } - }.execute(); - } + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + hostReportHandler.handleHostReport(hostReport, isBootReport); + } + }.execute(); + } - public synchronized void updateReportTime() { - reportTime = (int) (System.currentTimeMillis() / 1000); - } + public synchronized void updateReportTime() { + reportTime = (int) (System.currentTimeMillis() / 1000); + } - public HostReport getHostReport() { - return hostReport; - } + public HostReport getHostReport() { + return hostReport; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchJobComplete.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchJobComplete.java index 1910321a8..e3f7e1ffb 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchJobComplete.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchJobComplete.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import com.imageworks.spcue.JobInterface; @@ -29,26 +25,26 @@ * @category command */ public class DispatchJobComplete extends KeyRunnable { - private JobInterface job; - private Source source; - private boolean isManualKill; - - private JobManagerSupport jobManagerSupport; - public DispatchJobComplete(JobInterface job, Source source, boolean isManualKill, - JobManagerSupport jobManagerSupport) { - super("disp_job_complete_" + job.getJobId() + "_" + source.toString()); - this.job = job; - this.source = source; - this.isManualKill = isManualKill; - this.jobManagerSupport = jobManagerSupport; - } - - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - jobManagerSupport.shutdownJob(job, source, isManualKill); - } - }.execute(); - } + private JobInterface job; + private Source source; + private boolean isManualKill; + + private JobManagerSupport jobManagerSupport; + + public DispatchJobComplete(JobInterface job, Source source, boolean isManualKill, + JobManagerSupport jobManagerSupport) { + super("disp_job_complete_" + job.getJobId() + "_" + source.toString()); + this.job = job; + this.source = source; + this.isManualKill = isManualKill; + this.jobManagerSupport = jobManagerSupport; + } + + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + jobManagerSupport.shutdownJob(job, source, isManualKill); + } + }.execute(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillFrames.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillFrames.java index 986d6bd05..2c2e97336 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillFrames.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillFrames.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import com.imageworks.spcue.Source; @@ -30,23 +26,23 @@ */ public class DispatchKillFrames extends KeyRunnable { - private FrameSearchInterface search; - private JobManagerSupport jobManagerSupport; - private Source source; - - public DispatchKillFrames(FrameSearchInterface search, Source source, JobManagerSupport jobManagerSupport) { - super("disp_kill_frames_" + source.toString() + "_" + jobManagerSupport.hashCode()); - this.search = search; - this.source = source; - this.jobManagerSupport = jobManagerSupport; - } - - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - jobManagerSupport.killProcs(search, source, true); - } - }.execute(); - } + private FrameSearchInterface search; + private JobManagerSupport jobManagerSupport; + private Source source; + + public DispatchKillFrames(FrameSearchInterface search, Source source, + JobManagerSupport jobManagerSupport) { + super("disp_kill_frames_" + source.toString() + "_" + jobManagerSupport.hashCode()); + this.search = search; + this.source = source; + this.jobManagerSupport = jobManagerSupport; + } + + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + jobManagerSupport.killProcs(search, source, true); + } + }.execute(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillProcs.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillProcs.java index d97966139..6176bc200 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillProcs.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillProcs.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import com.imageworks.spcue.Source; @@ -25,26 +21,26 @@ import java.util.Collection; public class DispatchKillProcs extends KeyRunnable { - private Collection procs; - private JobManagerSupport jobManagerSupport; - private Source source; - - public DispatchKillProcs(Collection procs, Source source, JobManagerSupport jobManagerSupport) { - super("disp_kill_procs_" + procs.hashCode() + "_" + source.toString() + - "_" + jobManagerSupport.hashCode()); - this.procs = procs; - this.source = source; - this.jobManagerSupport = jobManagerSupport; - } - - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - for (VirtualProc p: procs) { - jobManagerSupport.kill(p, source); - } - } - }.execute(); - } + private Collection procs; + private JobManagerSupport jobManagerSupport; + private Source source; + + public DispatchKillProcs(Collection procs, Source source, + JobManagerSupport jobManagerSupport) { + super("disp_kill_procs_" + procs.hashCode() + "_" + source.toString() + "_" + + jobManagerSupport.hashCode()); + this.procs = procs; + this.source = source; + this.jobManagerSupport = jobManagerSupport; + } + + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + for (VirtualProc p : procs) { + jobManagerSupport.kill(p, source); + } + } + }.execute(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchLaunchJob.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchLaunchJob.java index c3682866e..a88ce9b72 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchLaunchJob.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchLaunchJob.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import com.imageworks.spcue.service.JobLauncher; @@ -27,21 +23,20 @@ */ public class DispatchLaunchJob extends KeyRunnable { - private JobLauncher jobLauncher; - private JobSpec spec; - - public DispatchLaunchJob(JobSpec spec, JobLauncher jobLauncher) { - super("disp_launch_job_" + spec.getShow() + "_" + spec.getShot() + "_" + spec.getUid()); - this.spec = spec; - this.jobLauncher = jobLauncher; - } - - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - jobLauncher.launch(spec); - } - }.execute(); - } + private JobLauncher jobLauncher; + private JobSpec spec; + + public DispatchLaunchJob(JobSpec spec, JobLauncher jobLauncher) { + super("disp_launch_job_" + spec.getShow() + "_" + spec.getShot() + "_" + spec.getUid()); + this.spec = spec; + this.jobLauncher = jobLauncher; + } + + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + jobLauncher.launch(spec); + } + }.execute(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchMoveJobs.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchMoveJobs.java index 92ec0db29..262338b32 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchMoveJobs.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchMoveJobs.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import java.util.List; @@ -28,27 +24,26 @@ public class DispatchMoveJobs extends KeyRunnable { - private GroupDetail group; - private List jobs; - private GroupManager groupManager; - - public DispatchMoveJobs(GroupDetail group, List jobs, GroupManager groupManager) { - super("disp_move_jobs_" + group.getGroupId() + "_dept_" + group.getDepartmentId() + - "_show_" + group.getShowId()); - this.group = group; - this.jobs = jobs; - this.groupManager = groupManager; - } - - @Override - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - for (JobInterface job: jobs) { - groupManager.reparentJob(job, group, new Inherit[] { Inherit.All }); - } - } - }.execute(); - } + private GroupDetail group; + private List jobs; + private GroupManager groupManager; + + public DispatchMoveJobs(GroupDetail group, List jobs, GroupManager groupManager) { + super("disp_move_jobs_" + group.getGroupId() + "_dept_" + group.getDepartmentId() + "_show_" + + group.getShowId()); + this.group = group; + this.jobs = jobs; + this.groupManager = groupManager; + } + + @Override + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + for (JobInterface job : jobs) { + groupManager.reparentJob(job, group, new Inherit[] {Inherit.All}); + } + } + }.execute(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchNextFrame.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchNextFrame.java index 7e12eb90c..c7920aba0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchNextFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchNextFrame.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import com.imageworks.spcue.DispatchJob; @@ -30,23 +26,22 @@ */ public class DispatchNextFrame extends KeyRunnable { - private VirtualProc proc; - private DispatchJob job; - private Dispatcher dispatcher; - - public DispatchNextFrame(DispatchJob j, VirtualProc p, Dispatcher d) { - super("disp_next_frame_" + j.getJobId() + "_" + p.getProcId()); - this.job = j; - this.proc = p; - this.dispatcher = d; - } - - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - dispatcher.dispatchProcToJob(proc, job); - } - }.execute(); - } + private VirtualProc proc; + private DispatchJob job; + private Dispatcher dispatcher; + + public DispatchNextFrame(DispatchJob j, VirtualProc p, Dispatcher d) { + super("disp_next_frame_" + j.getJobId() + "_" + p.getProcId()); + this.job = j; + this.proc = p; + this.dispatcher = d; + } + + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + dispatcher.dispatchProcToJob(proc, job); + } + }.execute(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchReorderFrames.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchReorderFrames.java index 528474929..26cf95f9c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchReorderFrames.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchReorderFrames.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import com.imageworks.spcue.JobInterface; @@ -27,44 +23,40 @@ public class DispatchReorderFrames extends KeyRunnable { - private JobInterface job = null; - private LayerInterface layer = null; - private FrameSet frameSet; - private Order order; - private JobManagerSupport jobManagerSupport; - - public DispatchReorderFrames(JobInterface job, FrameSet frameSet, Order order, - JobManagerSupport jobManagerSupport) { - super("disp_reorder_frames_job_" + job.getJobId() + - "_" + jobManagerSupport.toString()); - this.job = job; - this.frameSet = frameSet; - this.order = order; - this.jobManagerSupport = jobManagerSupport; - } - - public DispatchReorderFrames(LayerInterface layer, FrameSet frameSet, Order order, - JobManagerSupport jobManagerSupport) { - super("disp_reorder_frames_layer_" + layer.getLayerId() + - "_" + jobManagerSupport.toString()); - this.layer = layer; - this.frameSet = frameSet; - this.order = order; - this.jobManagerSupport = jobManagerSupport; - } - - @Override - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - if (job != null) { - jobManagerSupport.reorderJob(job, frameSet, order); - } - else if (layer != null) { - jobManagerSupport.reorderLayer(layer, frameSet, order); - } - } - }.execute(); - } + private JobInterface job = null; + private LayerInterface layer = null; + private FrameSet frameSet; + private Order order; + private JobManagerSupport jobManagerSupport; + + public DispatchReorderFrames(JobInterface job, FrameSet frameSet, Order order, + JobManagerSupport jobManagerSupport) { + super("disp_reorder_frames_job_" + job.getJobId() + "_" + jobManagerSupport.toString()); + this.job = job; + this.frameSet = frameSet; + this.order = order; + this.jobManagerSupport = jobManagerSupport; + } + + public DispatchReorderFrames(LayerInterface layer, FrameSet frameSet, Order order, + JobManagerSupport jobManagerSupport) { + super("disp_reorder_frames_layer_" + layer.getLayerId() + "_" + jobManagerSupport.toString()); + this.layer = layer; + this.frameSet = frameSet; + this.order = order; + this.jobManagerSupport = jobManagerSupport; + } + + @Override + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + if (job != null) { + jobManagerSupport.reorderJob(job, frameSet, order); + } else if (layer != null) { + jobManagerSupport.reorderLayer(layer, frameSet, order); + } + } + }.execute(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRetryFrames.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRetryFrames.java index 8546423dd..0de3a787e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRetryFrames.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRetryFrames.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import com.imageworks.spcue.Source; @@ -30,24 +26,23 @@ */ public class DispatchRetryFrames extends KeyRunnable { - private FrameSearchInterface search; - private Source source; - private JobManagerSupport jobManagerSupport; - - - public DispatchRetryFrames(FrameSearchInterface search, Source source, JobManagerSupport jobManagerSupport) { - super("disp_retry_frames_" + search.hashCode() + "_" + source.toString()); - this.search = search; - this.source = source; - this.jobManagerSupport = jobManagerSupport; - } - - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - jobManagerSupport.retryFrames(search, source); - } - }.execute(); - } + private FrameSearchInterface search; + private Source source; + private JobManagerSupport jobManagerSupport; + + public DispatchRetryFrames(FrameSearchInterface search, Source source, + JobManagerSupport jobManagerSupport) { + super("disp_retry_frames_" + search.hashCode() + "_" + source.toString()); + this.search = search; + this.source = source; + this.jobManagerSupport = jobManagerSupport; + } + + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + jobManagerSupport.retryFrames(search, source); + } + }.execute(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrame.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrame.java index fe9bde60e..11fba3e9b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrame.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import org.apache.logging.log4j.Logger; @@ -29,34 +25,33 @@ public class DispatchRqdKillFrame extends KeyRunnable { - private static final Logger logger = LogManager.getLogger(DispatchRqdKillFrame.class); - - private String message; - private String hostname; - private String frameId; - - private final RqdClient rqdClient; - - public DispatchRqdKillFrame(String hostname, String frameId, String message, RqdClient rqdClient) { - super("disp_rqd_kill_frame_" + hostname + "_" + frameId + "_" + rqdClient.toString()); - this.hostname = hostname; - this.frameId = frameId; - this.message = message; - this.rqdClient = rqdClient; - } - - @Override - public void run() { - long startTime = System.currentTimeMillis(); - try { - rqdClient.killFrame(hostname, frameId, message); - } catch (RqdClientException e) { - logger.info("Failed to contact host " + hostname + ", " + e); - } finally { - long elapsedTime = System.currentTimeMillis() - startTime; - logger.info("RQD communication with " + hostname + - " took " + elapsedTime + "ms"); - } + private static final Logger logger = LogManager.getLogger(DispatchRqdKillFrame.class); + + private String message; + private String hostname; + private String frameId; + + private final RqdClient rqdClient; + + public DispatchRqdKillFrame(String hostname, String frameId, String message, + RqdClient rqdClient) { + super("disp_rqd_kill_frame_" + hostname + "_" + frameId + "_" + rqdClient.toString()); + this.hostname = hostname; + this.frameId = frameId; + this.message = message; + this.rqdClient = rqdClient; + } + + @Override + public void run() { + long startTime = System.currentTimeMillis(); + try { + rqdClient.killFrame(hostname, frameId, message); + } catch (RqdClientException e) { + logger.info("Failed to contact host " + hostname + ", " + e); + } finally { + long elapsedTime = System.currentTimeMillis() - startTime; + logger.info("RQD communication with " + hostname + " took " + elapsedTime + "ms"); } + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrameMemory.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrameMemory.java index 301f77479..f070692cb 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrameMemory.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrameMemory.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import com.imageworks.spcue.FrameInterface; @@ -30,49 +26,48 @@ /** * A runnable to communicate with rqd requesting for a frame to be killed due to memory issues. *

- * Before killing a frame, the database is updated to mark the frame status as EXIT_STATUS_MEMORY_FAILURE, - * this allows the FrameCompleteHandler to possibly retry the frame after increasing its memory requirements + * Before killing a frame, the database is updated to mark the frame status as + * EXIT_STATUS_MEMORY_FAILURE, this allows the FrameCompleteHandler to possibly retry the frame + * after increasing its memory requirements */ public class DispatchRqdKillFrameMemory extends KeyRunnable { - private static final Logger logger = LogManager.getLogger(DispatchRqdKillFrameMemory.class); + private static final Logger logger = LogManager.getLogger(DispatchRqdKillFrameMemory.class); - private String message; - private String hostname; - private DispatchSupport dispatchSupport; - private final RqdClient rqdClient; - private final boolean isTestMode; + private String message; + private String hostname; + private DispatchSupport dispatchSupport; + private final RqdClient rqdClient; + private final boolean isTestMode; - private FrameInterface frame; + private FrameInterface frame; - public DispatchRqdKillFrameMemory(String hostname, FrameInterface frame, String message, RqdClient rqdClient, - DispatchSupport dispatchSupport, boolean isTestMode) { - super("disp_rqd_kill_frame_" + frame.getFrameId() + "_" + rqdClient.toString()); - this.frame = frame; - this.hostname = hostname; - this.message = message; - this.rqdClient = rqdClient; - this.dispatchSupport = dispatchSupport; - this.isTestMode = isTestMode; - } + public DispatchRqdKillFrameMemory(String hostname, FrameInterface frame, String message, + RqdClient rqdClient, DispatchSupport dispatchSupport, boolean isTestMode) { + super("disp_rqd_kill_frame_" + frame.getFrameId() + "_" + rqdClient.toString()); + this.frame = frame; + this.hostname = hostname; + this.message = message; + this.rqdClient = rqdClient; + this.dispatchSupport = dispatchSupport; + this.isTestMode = isTestMode; + } - @Override - public void run() { - long startTime = System.currentTimeMillis(); - try { - if (dispatchSupport.updateFrameMemoryError(frame) && !isTestMode) { - rqdClient.killFrame(hostname, frame.getFrameId(), message); - } else { - logger.warn("Could not update frame " + frame.getFrameId() + - " status to EXIT_STATUS_MEMORY_FAILURE. Canceling kill request!"); - } - } catch (RqdClientException e) { - logger.warn("Failed to contact host " + hostname + ", " + e); - } finally { - long elapsedTime = System.currentTimeMillis() - startTime; - logger.info("RQD communication with " + hostname + - " took " + elapsedTime + "ms"); - } + @Override + public void run() { + long startTime = System.currentTimeMillis(); + try { + if (dispatchSupport.updateFrameMemoryError(frame) && !isTestMode) { + rqdClient.killFrame(hostname, frame.getFrameId(), message); + } else { + logger.warn("Could not update frame " + frame.getFrameId() + + " status to EXIT_STATUS_MEMORY_FAILURE. Canceling kill request!"); + } + } catch (RqdClientException e) { + logger.warn("Failed to contact host " + hostname + ", " + e); + } finally { + long elapsedTime = System.currentTimeMillis() - startTime; + logger.info("RQD communication with " + hostname + " took " + elapsedTime + "ms"); } + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchSatisfyDepends.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchSatisfyDepends.java index 5294a203c..39bae5229 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchSatisfyDepends.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchSatisfyDepends.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import com.imageworks.spcue.FrameInterface; @@ -32,50 +28,49 @@ */ public class DispatchSatisfyDepends extends KeyRunnable { - private JobInterface job = null; - private LayerInterface layer = null; - private FrameInterface frame = null; - private FrameSearchInterface search; - private JobManagerSupport jobManagerSupport; + private JobInterface job = null; + private LayerInterface layer = null; + private FrameInterface frame = null; + private FrameSearchInterface search; + private JobManagerSupport jobManagerSupport; - public DispatchSatisfyDepends(JobInterface job, JobManagerSupport jobManagerSupport) { - super("disp_sat_deps_" + job.getJobId() + "_" + jobManagerSupport.toString()); - this.job = job; - this.jobManagerSupport = jobManagerSupport; - } + public DispatchSatisfyDepends(JobInterface job, JobManagerSupport jobManagerSupport) { + super("disp_sat_deps_" + job.getJobId() + "_" + jobManagerSupport.toString()); + this.job = job; + this.jobManagerSupport = jobManagerSupport; + } - public DispatchSatisfyDepends(LayerInterface layer, JobManagerSupport jobManagerSupport) { - super("disp_sat_deps_" + layer.getLayerId() + "_" + jobManagerSupport.toString()); - this.layer = layer; - this.jobManagerSupport = jobManagerSupport; - } + public DispatchSatisfyDepends(LayerInterface layer, JobManagerSupport jobManagerSupport) { + super("disp_sat_deps_" + layer.getLayerId() + "_" + jobManagerSupport.toString()); + this.layer = layer; + this.jobManagerSupport = jobManagerSupport; + } - public DispatchSatisfyDepends(FrameInterface frame, JobManagerSupport jobManagerSupport) { - super("disp_sat_deps_" + frame.getFrameId() + "_" + jobManagerSupport.toString()); - this.frame = frame; - this.jobManagerSupport = jobManagerSupport; - } + public DispatchSatisfyDepends(FrameInterface frame, JobManagerSupport jobManagerSupport) { + super("disp_sat_deps_" + frame.getFrameId() + "_" + jobManagerSupport.toString()); + this.frame = frame; + this.jobManagerSupport = jobManagerSupport; + } - public DispatchSatisfyDepends(FrameSearchInterface search, JobManagerSupport jobManagerSupport) { - super("disp_sat_deps_" + search.hashCode() + "_" + jobManagerSupport.hashCode()); - this.search = search; - this.jobManagerSupport = jobManagerSupport; - } + public DispatchSatisfyDepends(FrameSearchInterface search, JobManagerSupport jobManagerSupport) { + super("disp_sat_deps_" + search.hashCode() + "_" + jobManagerSupport.hashCode()); + this.search = search; + this.jobManagerSupport = jobManagerSupport; + } - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - if (search != null) { - jobManagerSupport.satisfyWhatDependsOn(search); - } else if (frame != null) { - jobManagerSupport.satisfyWhatDependsOn(frame); - } else if (layer != null) { - jobManagerSupport.satisfyWhatDependsOn(layer); - } else { - jobManagerSupport.satisfyWhatDependsOn(job); - } - } - }.execute(); - } + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + if (search != null) { + jobManagerSupport.satisfyWhatDependsOn(search); + } else if (frame != null) { + jobManagerSupport.satisfyWhatDependsOn(frame); + } else if (layer != null) { + jobManagerSupport.satisfyWhatDependsOn(layer); + } else { + jobManagerSupport.satisfyWhatDependsOn(job); + } + } + }.execute(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchShutdownJobIfCompleted.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchShutdownJobIfCompleted.java index b4eb11a07..a106644ec 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchShutdownJobIfCompleted.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchShutdownJobIfCompleted.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import com.imageworks.spcue.JobInterface; @@ -24,29 +20,29 @@ import com.imageworks.spcue.service.JobManagerSupport; /** - * A command for shutting down a job if it is completed. - * This is a workaround for when Cuebot failed to shutdown a job due to database access error. + * A command for shutting down a job if it is completed. This is a workaround for when Cuebot failed + * to shutdown a job due to database access error. * * @category command */ public class DispatchShutdownJobIfCompleted extends KeyRunnable { - private JobInterface job; - - private JobManagerSupport jobManagerSupport; - public DispatchShutdownJobIfCompleted(JobInterface job, JobManagerSupport jobManagerSupport) { - super("disp_st_job_comp_" + job.getJobId()); - this.job = job; - this.jobManagerSupport = jobManagerSupport; - } - - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - if (jobManagerSupport.isJobComplete(job)) { - jobManagerSupport.shutdownJob(job, new Source("natural"), false); - } - } - }.execute(); - } + private JobInterface job; + + private JobManagerSupport jobManagerSupport; + + public DispatchShutdownJobIfCompleted(JobInterface job, JobManagerSupport jobManagerSupport) { + super("disp_st_job_comp_" + job.getJobId()); + this.job = job; + this.jobManagerSupport = jobManagerSupport; + } + + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + if (jobManagerSupport.isJobComplete(job)) { + jobManagerSupport.shutdownJob(job, new Source("natural"), false); + } + } + }.execute(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchStaggerFrames.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchStaggerFrames.java index b0430b892..1bad4f6fe 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchStaggerFrames.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchStaggerFrames.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import com.imageworks.spcue.JobInterface; @@ -25,40 +21,40 @@ public class DispatchStaggerFrames extends KeyRunnable { - private JobInterface job = null; - private LayerInterface layer = null; - private String range; - private int stagger; - private JobManagerSupport jobManagerSupport; - - public DispatchStaggerFrames(JobInterface job, String range, int stagger, JobManagerSupport jobManagerSupport) { - super("disp_stag_frames_" + job.getJobId() + "_" + range); - this.job = job; - this.range = range; - this.stagger = stagger; - this.jobManagerSupport = jobManagerSupport; - } - - public DispatchStaggerFrames(LayerInterface layer, String range, int stagger, JobManagerSupport jobManagerSupport) { - super("disp_stag_frames_" + layer.getLayerId() + "_" + range); - this.layer = layer; - this.range = range; - this.stagger = stagger; - this.jobManagerSupport = jobManagerSupport; - } - - @Override - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - if (job != null) { - jobManagerSupport.staggerJob(job, range, stagger); - } - else if (layer != null) { - jobManagerSupport.staggerJob(layer, range, stagger); - } - } - }.execute(); - } + private JobInterface job = null; + private LayerInterface layer = null; + private String range; + private int stagger; + private JobManagerSupport jobManagerSupport; + + public DispatchStaggerFrames(JobInterface job, String range, int stagger, + JobManagerSupport jobManagerSupport) { + super("disp_stag_frames_" + job.getJobId() + "_" + range); + this.job = job; + this.range = range; + this.stagger = stagger; + this.jobManagerSupport = jobManagerSupport; + } + + public DispatchStaggerFrames(LayerInterface layer, String range, int stagger, + JobManagerSupport jobManagerSupport) { + super("disp_stag_frames_" + layer.getLayerId() + "_" + range); + this.layer = layer; + this.range = range; + this.stagger = stagger; + this.jobManagerSupport = jobManagerSupport; + } + + @Override + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + if (job != null) { + jobManagerSupport.staggerJob(job, range, stagger); + } else if (layer != null) { + jobManagerSupport.staggerJob(layer, range, stagger); + } + } + }.execute(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/KeyRunnable.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/KeyRunnable.java index bdbdb87da..dfee490cf 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/KeyRunnable.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/KeyRunnable.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import com.imageworks.spcue.DispatchHost; @@ -24,18 +20,17 @@ public abstract class KeyRunnable implements Runnable { - private String key; + private String key; - public KeyRunnable(String key) { - this.key = key; - } + public KeyRunnable(String key) { + this.key = key; + } - public String getKey() { - return key; - } + public String getKey() { + return key; + } - public void setKey(String key) { - this.key = key; - } + public void setKey(String key) { + this.key = key; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/ManageReparentHosts.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/ManageReparentHosts.java index 15ab1384e..0bdda38ca 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/ManageReparentHosts.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/ManageReparentHosts.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.dispatcher.commands; import java.util.List; @@ -27,25 +23,25 @@ import com.imageworks.spcue.service.HostManager; public class ManageReparentHosts extends KeyRunnable { - AllocationInterface alloc; - List hosts; - HostManager hostManager; - - public ManageReparentHosts(AllocationInterface alloc, List hosts, HostManager hostManager) { - super(alloc.getAllocationId()); - this.alloc = alloc; - this.hosts = hosts; - this.hostManager = hostManager; - } - - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - for (HostInterface host : hosts) { - hostManager.setAllocation(host, alloc); - } - } - }.execute(); - } + AllocationInterface alloc; + List hosts; + HostManager hostManager; + + public ManageReparentHosts(AllocationInterface alloc, List hosts, + HostManager hostManager) { + super(alloc.getAllocationId()); + this.alloc = alloc; + this.hosts = hosts; + this.hostManager = hostManager; + } + + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + for (HostInterface host : hosts) { + hostManager.setAllocation(host, alloc); + } + } + }.execute(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClient.java b/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClient.java index 658289fd1..90522b3ff 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClient.java +++ b/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClient.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.rqd; @@ -25,79 +23,78 @@ public interface RqdClient { - /** - * Setting to true pretends all remote procedures execute perfectly. - * - * @param tests - */ - public void setTestMode(boolean tests); + /** + * Setting to true pretends all remote procedures execute perfectly. + * + * @param tests + */ + public void setTestMode(boolean tests); - /** - * Returns a RunningFrameInfo - * - * @param proc - * @return - */ - RunningFrameInfo getFrameStatus(VirtualProc proc); + /** + * Returns a RunningFrameInfo + * + * @param proc + * @return + */ + RunningFrameInfo getFrameStatus(VirtualProc proc); - /** - * Sets the host lock to the provided state. - * - * @param host - * @param lock - */ - public void setHostLock(HostInterface host, LockState lock); + /** + * Sets the host lock to the provided state. + * + * @param host + * @param lock + */ + public void setHostLock(HostInterface host, LockState lock); - /** - * Locks the host. - * - * @param host - */ - public void lockHost(HostInterface host); + /** + * Locks the host. + * + * @param host + */ + public void lockHost(HostInterface host); - /** - * Unlocks the host. - * - * @param host - */ - public void unlockHost(HostInterface host); + /** + * Unlocks the host. + * + * @param host + */ + public void unlockHost(HostInterface host); - /** - * Reboots the host now. - * - * @param host - */ - public void rebootNow(HostInterface host); + /** + * Reboots the host now. + * + * @param host + */ + public void rebootNow(HostInterface host); - /** - * Reboots the host when idle - * - * @param host - */ - public void rebootWhenIdle(HostInterface host); + /** + * Reboots the host when idle + * + * @param host + */ + public void rebootWhenIdle(HostInterface host); - /** - * Attempts to launch a frame - * - * @param frame - * @param resource - * @return RunningFramePrx - */ - void launchFrame(RunFrame frame, VirtualProc proc); + /** + * Attempts to launch a frame + * + * @param frame + * @param resource + * @return RunningFramePrx + */ + void launchFrame(RunFrame frame, VirtualProc proc); - /** - * Kills a running frame by resource - * - * @param resource - */ - void killFrame(VirtualProc Proc, String message); + /** + * Kills a running frame by resource + * + * @param resource + */ + void killFrame(VirtualProc Proc, String message); - /** - * Kills a running frame - * - * @param hostName - * @param frameId - */ - void killFrame(String hostName, String frameId, String message); + /** + * Kills a running frame + * + * @param hostName + * @param frameId + */ + void killFrame(String hostName, String frameId, String message); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientException.java b/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientException.java index a987a67bd..4609b94e3 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientException.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.rqd; import com.imageworks.spcue.SpcueRuntimeException; @@ -24,24 +20,23 @@ @SuppressWarnings("serial") public class RqdClientException extends SpcueRuntimeException { - public RqdClientException() { - // TODO Auto-generated constructor stub - } + public RqdClientException() { + // TODO Auto-generated constructor stub + } - public RqdClientException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public RqdClientException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public RqdClientException(String message) { - super(message); - // TODO Auto-generated constructor stub - } + public RqdClientException(String message) { + super(message); + // TODO Auto-generated constructor stub + } - public RqdClientException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public RqdClientException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientGrpc.java b/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientGrpc.java index 40554904b..27aad081c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientGrpc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientGrpc.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.rqd; @@ -50,187 +48,171 @@ import com.imageworks.spcue.grpc.rqd.RunningFrameStatusResponse; public final class RqdClientGrpc implements RqdClient { - private static final Logger logger = LogManager.getLogger(RqdClientGrpc.class); - - private final int rqdCacheSize; - private final int rqdCacheExpiration; - private final int rqdCacheConcurrency; - private final int rqdServerPort; - private final int rqdTaskDeadlineSeconds; - private LoadingCache channelCache; - - private boolean testMode = false; - - - public RqdClientGrpc(int rqdServerPort, int rqdCacheSize, int rqdCacheExpiration, - int rqdCacheConcurrency, int rqdTaskDeadline) { - this.rqdServerPort = rqdServerPort; - this.rqdCacheSize = rqdCacheSize; - this.rqdCacheExpiration = rqdCacheExpiration; - this.rqdCacheConcurrency = rqdCacheConcurrency; - this.rqdTaskDeadlineSeconds = rqdTaskDeadline; + private static final Logger logger = LogManager.getLogger(RqdClientGrpc.class); + + private final int rqdCacheSize; + private final int rqdCacheExpiration; + private final int rqdCacheConcurrency; + private final int rqdServerPort; + private final int rqdTaskDeadlineSeconds; + private LoadingCache channelCache; + + private boolean testMode = false; + + public RqdClientGrpc(int rqdServerPort, int rqdCacheSize, int rqdCacheExpiration, + int rqdCacheConcurrency, int rqdTaskDeadline) { + this.rqdServerPort = rqdServerPort; + this.rqdCacheSize = rqdCacheSize; + this.rqdCacheExpiration = rqdCacheExpiration; + this.rqdCacheConcurrency = rqdCacheConcurrency; + this.rqdTaskDeadlineSeconds = rqdTaskDeadline; + } + + private void buildChannelCache() { + this.channelCache = + CacheBuilder.newBuilder().maximumSize(rqdCacheSize).concurrencyLevel(rqdCacheConcurrency) + .expireAfterAccess(rqdCacheExpiration, TimeUnit.MINUTES) + .removalListener(new RemovalListener() { + @Override + public void onRemoval(RemovalNotification removal) { + ManagedChannel conn = removal.getValue(); + conn.shutdown(); + } + }).build(new CacheLoader() { + @Override + public ManagedChannel load(String host) throws Exception { + ManagedChannelBuilder channelBuilder = + ManagedChannelBuilder.forAddress(host, rqdServerPort).usePlaintext(); + return channelBuilder.build(); + } + }); + } + + private RqdInterfaceGrpc.RqdInterfaceBlockingStub getStub(String host) throws ExecutionException { + if (channelCache == null) { + buildChannelCache(); } - - private void buildChannelCache() { - this.channelCache = CacheBuilder.newBuilder() - .maximumSize(rqdCacheSize) - .concurrencyLevel(rqdCacheConcurrency) - .expireAfterAccess(rqdCacheExpiration, TimeUnit.MINUTES) - .removalListener(new RemovalListener() { - @Override - public void onRemoval(RemovalNotification removal){ - ManagedChannel conn = removal.getValue(); - conn.shutdown(); - } - }) - .build( - new CacheLoader() { - @Override - public ManagedChannel load(String host) throws Exception { - ManagedChannelBuilder channelBuilder = ManagedChannelBuilder - .forAddress(host, rqdServerPort) - .usePlaintext(); - return channelBuilder.build(); - } - }); + ManagedChannel channel = channelCache.get(host); + return RqdInterfaceGrpc.newBlockingStub(channel).withDeadlineAfter(rqdTaskDeadlineSeconds, + TimeUnit.SECONDS); + } + + private RunningFrameGrpc.RunningFrameBlockingStub getRunningFrameStub(String host) + throws ExecutionException { + if (channelCache == null) { + buildChannelCache(); } - - private RqdInterfaceGrpc.RqdInterfaceBlockingStub getStub(String host) throws ExecutionException { - if (channelCache == null) { - buildChannelCache(); - } - ManagedChannel channel = channelCache.get(host); - return RqdInterfaceGrpc - .newBlockingStub(channel) - .withDeadlineAfter(rqdTaskDeadlineSeconds, TimeUnit.SECONDS); + ManagedChannel channel = channelCache.get(host); + return RunningFrameGrpc.newBlockingStub(channel).withDeadlineAfter(rqdTaskDeadlineSeconds, + TimeUnit.SECONDS); + } + + public void setHostLock(HostInterface host, LockState lock) { + if (lock == LockState.OPEN) { + logger.debug("Unlocking RQD host"); + unlockHost(host); + } else if (lock == LockState.LOCKED) { + logger.debug("Locking RQD host"); + lockHost(host); + } else { + logger.debug("Unknown LockState passed to setHostLock."); } + } - private RunningFrameGrpc.RunningFrameBlockingStub getRunningFrameStub(String host) throws ExecutionException { - if (channelCache == null) { - buildChannelCache(); - } - ManagedChannel channel = channelCache.get(host); - return RunningFrameGrpc - .newBlockingStub(channel) - .withDeadlineAfter(rqdTaskDeadlineSeconds, TimeUnit.SECONDS); - } + public void lockHost(HostInterface host) { + RqdStaticLockAllRequest request = RqdStaticLockAllRequest.newBuilder().build(); - public void setHostLock(HostInterface host, LockState lock) { - if (lock == LockState.OPEN) { - logger.debug("Unlocking RQD host"); - unlockHost(host); - } else if (lock == LockState.LOCKED) { - logger.debug("Locking RQD host"); - lockHost(host); - } else { - logger.debug("Unknown LockState passed to setHostLock."); - } + try { + getStub(host.getName()).lockAll(request); + } catch (StatusRuntimeException | ExecutionException e) { + throw new RqdClientException("failed to lock host: " + host.getName(), e); } + } - public void lockHost(HostInterface host) { - RqdStaticLockAllRequest request = RqdStaticLockAllRequest.newBuilder().build(); + public void unlockHost(HostInterface host) { + RqdStaticUnlockAllRequest request = RqdStaticUnlockAllRequest.newBuilder().build(); - try { - getStub(host.getName()).lockAll(request); - } catch (StatusRuntimeException | ExecutionException e) { - throw new RqdClientException("failed to lock host: " + host.getName(), e); - } + try { + getStub(host.getName()).unlockAll(request); + } catch (StatusRuntimeException | ExecutionException e) { + throw new RqdClientException("failed to unlock host: " + host.getName(), e); } + } - public void unlockHost(HostInterface host) { - RqdStaticUnlockAllRequest request = RqdStaticUnlockAllRequest.newBuilder().build(); + public void rebootNow(HostInterface host) { + RqdStaticRebootNowRequest request = RqdStaticRebootNowRequest.newBuilder().build(); - try { - getStub(host.getName()).unlockAll(request); - } catch (StatusRuntimeException | ExecutionException e) { - throw new RqdClientException("failed to unlock host: " + host.getName(), e); - } + try { + getStub(host.getName()).rebootNow(request); + } catch (StatusRuntimeException | ExecutionException e) { + throw new RqdClientException("failed to reboot host: " + host.getName(), e); } + } - public void rebootNow(HostInterface host) { - RqdStaticRebootNowRequest request = RqdStaticRebootNowRequest.newBuilder().build(); + public void rebootWhenIdle(HostInterface host) { + RqdStaticRebootIdleRequest request = RqdStaticRebootIdleRequest.newBuilder().build(); - try { - getStub(host.getName()).rebootNow(request); - } catch (StatusRuntimeException | ExecutionException e) { - throw new RqdClientException("failed to reboot host: " + host.getName(), e); - } + if (testMode) { + return; } - public void rebootWhenIdle(HostInterface host) { - RqdStaticRebootIdleRequest request = RqdStaticRebootIdleRequest.newBuilder().build(); + try { + getStub(host.getName()).rebootIdle(request); + } catch (StatusRuntimeException | ExecutionException e) { + throw new RqdClientException("failed to reboot host: " + host.getName(), e); + } + } - if (testMode) { - return; - } + public void killFrame(VirtualProc proc, String message) { + killFrame(proc.hostName, proc.frameId, message); + } - try { - getStub(host.getName()).rebootIdle(request); - } catch (StatusRuntimeException | ExecutionException e) { - throw new RqdClientException("failed to reboot host: " + host.getName(), e); - } - } + public void killFrame(String host, String frameId, String message) { + RqdStaticKillRunningFrameRequest request = RqdStaticKillRunningFrameRequest.newBuilder() + .setFrameId(frameId).setMessage(message).build(); - public void killFrame(VirtualProc proc, String message) { - killFrame(proc.hostName, proc.frameId, message); + if (testMode) { + return; } - public void killFrame(String host, String frameId, String message) { - RqdStaticKillRunningFrameRequest request = - RqdStaticKillRunningFrameRequest.newBuilder() - .setFrameId(frameId) - .setMessage(message) - .build(); - - if (testMode) { - return; - } - - try { - logger.info("killing frame on " + host + ", source: " + message); - getStub(host).killRunningFrame(request); - } catch(StatusRuntimeException | ExecutionException e) { - throw new RqdClientException("failed to kill frame " + frameId, e); - } + try { + logger.info("killing frame on " + host + ", source: " + message); + getStub(host).killRunningFrame(request); + } catch (StatusRuntimeException | ExecutionException e) { + throw new RqdClientException("failed to kill frame " + frameId, e); } - - public RunningFrameInfo getFrameStatus(VirtualProc proc) { - try { - RqdStaticGetRunFrameResponse getRunFrameResponse = - getStub(proc.hostName) - .getRunFrame( - RqdStaticGetRunFrameRequest.newBuilder() - .setFrameId(proc.frameId) - .build()); - RunningFrameStatusResponse frameStatusResponse = - getRunningFrameStub(proc.hostName) - .status(RunningFrameStatusRequest.newBuilder() - .setRunFrame(getRunFrameResponse.getRunFrame()) - .build()); - return frameStatusResponse.getRunningFrameInfo(); - } catch(StatusRuntimeException | ExecutionException e) { - throw new RqdClientException("failed to obtain status for frame " + proc.frameId, e); - } + } + + public RunningFrameInfo getFrameStatus(VirtualProc proc) { + try { + RqdStaticGetRunFrameResponse getRunFrameResponse = getStub(proc.hostName) + .getRunFrame(RqdStaticGetRunFrameRequest.newBuilder().setFrameId(proc.frameId).build()); + RunningFrameStatusResponse frameStatusResponse = + getRunningFrameStub(proc.hostName).status(RunningFrameStatusRequest.newBuilder() + .setRunFrame(getRunFrameResponse.getRunFrame()).build()); + return frameStatusResponse.getRunningFrameInfo(); + } catch (StatusRuntimeException | ExecutionException e) { + throw new RqdClientException("failed to obtain status for frame " + proc.frameId, e); } + } - public void launchFrame(final RunFrame frame, final VirtualProc proc) { - RqdStaticLaunchFrameRequest request = - RqdStaticLaunchFrameRequest.newBuilder().setRunFrame(frame).build(); + public void launchFrame(final RunFrame frame, final VirtualProc proc) { + RqdStaticLaunchFrameRequest request = + RqdStaticLaunchFrameRequest.newBuilder().setRunFrame(frame).build(); - if (testMode) { - return; - } - - try { - getStub(proc.hostName).launchFrame(request); - } catch (StatusRuntimeException | ExecutionException e) { - throw new RqdClientException("failed to launch frame", e); - } + if (testMode) { + return; } - @Override - public void setTestMode(boolean testMode) { - this.testMode = testMode; + try { + getStub(proc.hostName).launchFrame(request); + } catch (StatusRuntimeException | ExecutionException e) { + throw new RqdClientException("failed to launch frame", e); } -} + } + @Override + public void setTestMode(boolean testMode) { + this.testMode = testMode; + } +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/CueStatic.java b/cuebot/src/main/java/com/imageworks/spcue/servant/CueStatic.java index b5a6633a9..68d773301 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/CueStatic.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/CueStatic.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.servant; import io.grpc.stub.StreamObserver; @@ -33,128 +29,121 @@ public class CueStatic extends CueInterfaceGrpc.CueInterfaceImplBase { - private Whiteboard whiteboard; - private DispatchQueue manageQueue; - private DispatchQueue dispatchQueue; - private HostReportQueue reportQueue; - private BookingQueue bookingQueue; - private DispatchSupport dispatchSupport; - - @Override - public void getSystemStats(CueGetSystemStatsRequest request, - StreamObserver responseObserver) { - SystemStats stats = SystemStats.newBuilder() - .setDispatchThreads(dispatchQueue.getActiveCount()) - .setDispatchWaiting(dispatchQueue.getSize()) - .setDispatchRemainingCapacity(dispatchQueue.getRemainingCapacity()) - .setDispatchExecuted(dispatchQueue.getCompletedTaskCount()) - .setDispatchRejected(dispatchQueue.getRejectedTaskCount()) - - .setManageThreads(manageQueue.getActiveCount()) - .setManageWaiting(manageQueue.getSize()) - .setManageRemainingCapacity(manageQueue.getRemainingCapacity()) - .setManageExecuted(manageQueue.getCompletedTaskCount()) - .setManageRejected(manageQueue.getRejectedTaskCount()) - - .setReportThreads(reportQueue.getActiveCount()) - .setReportWaiting(reportQueue.getQueue().size()) - .setReportRemainingCapacity(reportQueue.getQueue().remainingCapacity()) - .setReportExecuted(reportQueue.getTaskCount()) - .setReportRejected(reportQueue.getRejectedTaskCount()) - - .setBookingWaiting(bookingQueue.getSize()) - .setBookingRemainingCapacity(bookingQueue.getRemainingCapacity()) - .setBookingThreads(bookingQueue.getActiveCount()) - .setBookingExecuted(bookingQueue.getCompletedTaskCount()) - .setBookingRejected(bookingQueue.getRejectedTaskCount()) - .setBookingSleepMillis(0) - - .setHostBalanceSuccess(DispatchSupport.balanceSuccess.get()) - .setHostBalanceFailed(DispatchSupport.balanceFailed.get()) - .setKilledOffenderProcs(DispatchSupport.killedOffenderProcs.get()) - .setKilledOomProcs(DispatchSupport.killedOomProcs.get()) - .setClearedProcs(DispatchSupport.clearedProcs.get()) - .setBookingRetries(DispatchSupport.bookingRetries.get()) - .setBookingErrors(DispatchSupport.bookingErrors.get()) - .setBookedProcs(DispatchSupport.bookedProcs.get()) - - // TODO(gregdenton) Reimplement these with gRPC. (Issue #69) - // .setReqForData(IceServer.dataRequests.get()) - // .setReqForFunction(IceServer.rpcRequests.get()) - // .setReqErrors(IceServer.errors.get()) - - .setUnbookedProcs(DispatchSupport.unbookedProcs.get()) - .setPickedUpCores(DispatchSupport.pickedUpCoresCount.get()) - .setStrandedCores(DispatchSupport.strandedCoresCount.get()) - .build(); - responseObserver.onNext(CueGetSystemStatsResponse.newBuilder() - .setStats(stats) - .build()); - responseObserver.onCompleted(); - } - - public boolean isDispatchQueueHealthy() { - return this.dispatchQueue.isHealthy(); - } - - public boolean isManageQueueHealthy() { - return this.manageQueue.isHealthy(); - } - - public boolean isReportQueueHealthy() { - return this.reportQueue.isHealthy(); - } - - public boolean isBookingQueueHealthy() { - return this.bookingQueue.isHealthy(); - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public DispatchQueue getManageQueue() { - return manageQueue; - } - - public void setManageQueue(DispatchQueue manageQueue) { - this.manageQueue = manageQueue; - } - - public DispatchQueue getDispatchQueue() { - return dispatchQueue; - } - - public void setDispatchQueue(DispatchQueue dispatchQueue) { - this.dispatchQueue = dispatchQueue; - } - - public HostReportQueue getReportQueue() { - return reportQueue; - } - - public void setReportQueue(HostReportQueue reportQueue) { - this.reportQueue = reportQueue; - } - - public BookingQueue getBookingQueue() { - return bookingQueue; - } - - public void setBookingQueue(BookingQueue bookingQueue) { - this.bookingQueue = bookingQueue; - } - - public DispatchSupport getDispatchSupport() { - return dispatchSupport; - } - - public void setDispatchSupport(DispatchSupport dispatchSupport) { - this.dispatchSupport = dispatchSupport; - } + private Whiteboard whiteboard; + private DispatchQueue manageQueue; + private DispatchQueue dispatchQueue; + private HostReportQueue reportQueue; + private BookingQueue bookingQueue; + private DispatchSupport dispatchSupport; + + @Override + public void getSystemStats(CueGetSystemStatsRequest request, + StreamObserver responseObserver) { + SystemStats stats = SystemStats.newBuilder().setDispatchThreads(dispatchQueue.getActiveCount()) + .setDispatchWaiting(dispatchQueue.getSize()) + .setDispatchRemainingCapacity(dispatchQueue.getRemainingCapacity()) + .setDispatchExecuted(dispatchQueue.getCompletedTaskCount()) + .setDispatchRejected(dispatchQueue.getRejectedTaskCount()) + + .setManageThreads(manageQueue.getActiveCount()).setManageWaiting(manageQueue.getSize()) + .setManageRemainingCapacity(manageQueue.getRemainingCapacity()) + .setManageExecuted(manageQueue.getCompletedTaskCount()) + .setManageRejected(manageQueue.getRejectedTaskCount()) + + .setReportThreads(reportQueue.getActiveCount()) + .setReportWaiting(reportQueue.getQueue().size()) + .setReportRemainingCapacity(reportQueue.getQueue().remainingCapacity()) + .setReportExecuted(reportQueue.getTaskCount()) + .setReportRejected(reportQueue.getRejectedTaskCount()) + + .setBookingWaiting(bookingQueue.getSize()) + .setBookingRemainingCapacity(bookingQueue.getRemainingCapacity()) + .setBookingThreads(bookingQueue.getActiveCount()) + .setBookingExecuted(bookingQueue.getCompletedTaskCount()) + .setBookingRejected(bookingQueue.getRejectedTaskCount()).setBookingSleepMillis(0) + + .setHostBalanceSuccess(DispatchSupport.balanceSuccess.get()) + .setHostBalanceFailed(DispatchSupport.balanceFailed.get()) + .setKilledOffenderProcs(DispatchSupport.killedOffenderProcs.get()) + .setKilledOomProcs(DispatchSupport.killedOomProcs.get()) + .setClearedProcs(DispatchSupport.clearedProcs.get()) + .setBookingRetries(DispatchSupport.bookingRetries.get()) + .setBookingErrors(DispatchSupport.bookingErrors.get()) + .setBookedProcs(DispatchSupport.bookedProcs.get()) + + // TODO(gregdenton) Reimplement these with gRPC. (Issue #69) + // .setReqForData(IceServer.dataRequests.get()) + // .setReqForFunction(IceServer.rpcRequests.get()) + // .setReqErrors(IceServer.errors.get()) + + .setUnbookedProcs(DispatchSupport.unbookedProcs.get()) + .setPickedUpCores(DispatchSupport.pickedUpCoresCount.get()) + .setStrandedCores(DispatchSupport.strandedCoresCount.get()).build(); + responseObserver.onNext(CueGetSystemStatsResponse.newBuilder().setStats(stats).build()); + responseObserver.onCompleted(); + } + + public boolean isDispatchQueueHealthy() { + return this.dispatchQueue.isHealthy(); + } + + public boolean isManageQueueHealthy() { + return this.manageQueue.isHealthy(); + } + + public boolean isReportQueueHealthy() { + return this.reportQueue.isHealthy(); + } + + public boolean isBookingQueueHealthy() { + return this.bookingQueue.isHealthy(); + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public DispatchQueue getManageQueue() { + return manageQueue; + } + + public void setManageQueue(DispatchQueue manageQueue) { + this.manageQueue = manageQueue; + } + + public DispatchQueue getDispatchQueue() { + return dispatchQueue; + } + + public void setDispatchQueue(DispatchQueue dispatchQueue) { + this.dispatchQueue = dispatchQueue; + } + + public HostReportQueue getReportQueue() { + return reportQueue; + } + + public void setReportQueue(HostReportQueue reportQueue) { + this.reportQueue = reportQueue; + } + + public BookingQueue getBookingQueue() { + return bookingQueue; + } + + public void setBookingQueue(BookingQueue bookingQueue) { + this.bookingQueue = bookingQueue; + } + + public DispatchSupport getDispatchSupport() { + return dispatchSupport; + } + + public void setDispatchSupport(DispatchSupport dispatchSupport) { + this.dispatchSupport = dispatchSupport; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAction.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAction.java index 321f56395..caca2612d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAction.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAction.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.servant; import com.imageworks.spcue.SpcueRuntimeException; @@ -38,58 +35,61 @@ public class ManageAction extends ActionInterfaceGrpc.ActionInterfaceImplBase { - private FilterManager filterManager; - private Whiteboard whiteboard; + private FilterManager filterManager; + private Whiteboard whiteboard; - @Override - public void delete(ActionDeleteRequest request, StreamObserver responseObserver) { - Action requestAction = request.getAction(); - ActionEntity existingAction = filterManager.getAction(requestAction.getId()); - FilterEntity filterEntity = filterManager.getFilter(existingAction); - ActionEntity actionToDelete = ActionEntity.build(filterEntity, requestAction, requestAction.getId()); - filterManager.deleteAction(actionToDelete); - responseObserver.onNext(ActionDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + @Override + public void delete(ActionDeleteRequest request, + StreamObserver responseObserver) { + Action requestAction = request.getAction(); + ActionEntity existingAction = filterManager.getAction(requestAction.getId()); + FilterEntity filterEntity = filterManager.getFilter(existingAction); + ActionEntity actionToDelete = + ActionEntity.build(filterEntity, requestAction, requestAction.getId()); + filterManager.deleteAction(actionToDelete); + responseObserver.onNext(ActionDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } - @Override - public void getParentFilter(ActionGetParentFilterRequest request, - StreamObserver responseObserver) { - Filter filter = whiteboard.getFilter(ActionEntity.build(request.getAction())); - responseObserver.onNext(ActionGetParentFilterResponse.newBuilder().setFilter(filter).build()); - responseObserver.onCompleted(); - } + @Override + public void getParentFilter(ActionGetParentFilterRequest request, + StreamObserver responseObserver) { + Filter filter = whiteboard.getFilter(ActionEntity.build(request.getAction())); + responseObserver.onNext(ActionGetParentFilterResponse.newBuilder().setFilter(filter).build()); + responseObserver.onCompleted(); + } - @Override - public void commit(ActionCommitRequest request, StreamObserver responseObserver) { - Action requestAction = request.getAction(); - // Getting an action to have filterId populated from the DB - try { - ActionEntity persistedAction = filterManager.getAction(requestAction.getId()); - ActionEntity newAction = ActionEntity.build(persistedAction, requestAction, requestAction.getId()); - filterManager.updateAction(newAction); - responseObserver.onNext(ActionCommitResponse.newBuilder().build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - throw new SpcueRuntimeException("Invalid actionId on Action commit: " + - requestAction.getId()); - } + @Override + public void commit(ActionCommitRequest request, + StreamObserver responseObserver) { + Action requestAction = request.getAction(); + // Getting an action to have filterId populated from the DB + try { + ActionEntity persistedAction = filterManager.getAction(requestAction.getId()); + ActionEntity newAction = + ActionEntity.build(persistedAction, requestAction, requestAction.getId()); + filterManager.updateAction(newAction); + responseObserver.onNext(ActionCommitResponse.newBuilder().build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + throw new SpcueRuntimeException( + "Invalid actionId on Action commit: " + requestAction.getId()); } + } - public FilterManager getFilterManager() { - return filterManager; - } + public FilterManager getFilterManager() { + return filterManager; + } - public void setFilterManager(FilterManager filterManager) { - this.filterManager = filterManager; - } + public void setFilterManager(FilterManager filterManager) { + this.filterManager = filterManager; + } - public Whiteboard getWhiteboard() { - return whiteboard; - } + public Whiteboard getWhiteboard() { + return whiteboard; + } - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAllocation.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAllocation.java index 46edc2025..71c0af17b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAllocation.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAllocation.java @@ -52,251 +52,225 @@ import com.imageworks.spcue.util.CueUtil; public class ManageAllocation extends AllocationInterfaceGrpc.AllocationInterfaceImplBase { - private AllocationDao allocationDao; - private DispatchQueue manageQueue; - private Whiteboard whiteboard; - private AdminManager adminManager; - private HostManager hostManager; - private HostSearchFactory hostSearchFactory; - - public ManageAllocation() {} - - @Override - public void create( - AllocCreateRequest request, StreamObserver responseObserver) { - String new_name = request.getName(); - // If they pass name in the format ., just remove the facility. - if (CueUtil.verifyAllocationNameFormat(request.getName())) { - new_name = CueUtil.splitAllocationName(request.getName())[1]; - } - - AllocationEntity detail = new AllocationEntity(); - detail.name = new_name; - detail.tag = request.getTag(); - adminManager.createAllocation( - adminManager.getFacility(request.getFacility().getName()), detail); - - responseObserver.onNext( - AllocCreateResponse.newBuilder() - .setAllocation(whiteboard.getAllocation(detail.id)) - .build()); - responseObserver.onCompleted(); + private AllocationDao allocationDao; + private DispatchQueue manageQueue; + private Whiteboard whiteboard; + private AdminManager adminManager; + private HostManager hostManager; + private HostSearchFactory hostSearchFactory; + + public ManageAllocation() {} + + @Override + public void create(AllocCreateRequest request, + StreamObserver responseObserver) { + String new_name = request.getName(); + // If they pass name in the format ., just remove the facility. + if (CueUtil.verifyAllocationNameFormat(request.getName())) { + new_name = CueUtil.splitAllocationName(request.getName())[1]; } - @Override - public void getAll( - AllocGetAllRequest request, StreamObserver responseObserver) { - responseObserver.onNext( - AllocGetAllResponse.newBuilder() - .setAllocations(whiteboard.getAllocations()) - .build()); - responseObserver.onCompleted(); + AllocationEntity detail = new AllocationEntity(); + detail.name = new_name; + detail.tag = request.getTag(); + adminManager.createAllocation(adminManager.getFacility(request.getFacility().getName()), + detail); + + responseObserver.onNext(AllocCreateResponse.newBuilder() + .setAllocation(whiteboard.getAllocation(detail.id)).build()); + responseObserver.onCompleted(); + } + + @Override + public void getAll(AllocGetAllRequest request, + StreamObserver responseObserver) { + responseObserver.onNext( + AllocGetAllResponse.newBuilder().setAllocations(whiteboard.getAllocations()).build()); + responseObserver.onCompleted(); + } + + @Override + public void find(AllocFindRequest request, StreamObserver responseObserver) { + try { + responseObserver.onNext(AllocFindResponse.newBuilder() + .setAllocation(whiteboard.findAllocation(request.getName())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); } - - @Override - public void find( - AllocFindRequest request, StreamObserver responseObserver) { - try { - responseObserver.onNext( - AllocFindResponse.newBuilder() - .setAllocation(whiteboard.findAllocation(request.getName())) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void get(AllocGetRequest request, StreamObserver responseObserver) { - try { - responseObserver.onNext( - AllocGetResponse.newBuilder() - .setAllocation(whiteboard.findAllocation(request.getId())) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - private AllocationEntity findAllocationDetail(String facility, String name) { - // If they pass name in the format ., just remove the facility. - if (CueUtil.verifyAllocationNameFormat(name)) { - name = CueUtil.splitAllocationName(name)[1]; - } - return adminManager.findAllocationDetail(facility, name); - } - - @Override - public void delete( - AllocDeleteRequest request, StreamObserver responseObserver) { - AllocationEntity alloc = findAllocationDetail( - request.getAllocation().getFacility(), request.getAllocation().getName()); - adminManager.deleteAllocation(alloc); - responseObserver.onNext(AllocDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void findHosts( - AllocFindHostsRequest request, - StreamObserver responseObserver) { - HostSearchCriteria searchCriteria = request.getR().toBuilder() - .addAllocs(request.getAllocation().getId()) - .build(); - responseObserver.onNext(AllocFindHostsResponse.newBuilder() - .setHosts(whiteboard.getHosts(hostSearchFactory.create(searchCriteria))) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void getHosts( - AllocGetHostsRequest request, StreamObserver responseObserver) { - responseObserver.onNext(AllocGetHostsResponse.newBuilder() - .setHosts( - whiteboard.getHosts( - hostSearchFactory.create( - toAllocationEntity(request.getAllocation())))) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void getSubscriptions( - AllocGetSubscriptionsRequest request, - StreamObserver responseObserver) { - AllocationEntity allocEntity = toAllocationEntity(request.getAllocation()); - responseObserver.onNext(AllocGetSubscriptionsResponse.newBuilder() - .setSubscriptions(whiteboard.getSubscriptions(allocEntity)) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void reparentHosts( - AllocReparentHostsRequest request, - StreamObserver responseObserver) { - AllocationEntity allocEntity = toAllocationEntity(request.getAllocation()); - List hosts = request.getHosts().getHostsList(); - List hostEntities = hosts.stream() - .map(HostEntity::new) - .collect(Collectors.toList()); - manageQueue.execute(new ManageReparentHosts(allocEntity, hostEntities, hostManager)); - responseObserver.onNext(AllocReparentHostsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setBillable( - AllocSetBillableRequest request, - StreamObserver responseObserver) { - AllocationEntity alloc = findAllocationDetail( - request.getAllocation().getFacility(), request.getAllocation().getName()); - adminManager.setAllocationBillable(alloc, request.getValue()); - responseObserver.onNext(AllocSetBillableResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setName( - AllocSetNameRequest request, StreamObserver responseObserver) { - AllocationEntity alloc = findAllocationDetail( - request.getAllocation().getFacility(), request.getAllocation().getName()); - adminManager.setAllocationName(alloc, request.getName()); - responseObserver.onNext(AllocSetNameResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setTag( - AllocSetTagRequest request, StreamObserver responseObserver) { - AllocationEntity alloc = findAllocationDetail( - request.getAllocation().getFacility(), request.getAllocation().getName()); - adminManager.setAllocationTag(alloc, request.getTag()); - responseObserver.onNext(AllocSetTagResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void getDefault( - AllocGetDefaultRequest request, - StreamObserver responseObserver) { - AllocationEntity alloc = adminManager.getDefaultAllocation(); - responseObserver.onNext(AllocGetDefaultResponse.newBuilder() - .setAllocation(whiteboard.getAllocation(alloc.id)) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void setDefault( - AllocSetDefaultRequest request, - StreamObserver responseObserver) { - AllocationEntity alloc = findAllocationDetail( - request.getAllocation().getFacility(), request.getAllocation().getName()); - adminManager.setDefaultAllocation(alloc); - responseObserver.onNext(AllocSetDefaultResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public AdminManager getAdminManager() { - return adminManager; - } - - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } - - public AllocationDao getAllocationDao() { - return allocationDao; - } - - public void setAllocationDao(AllocationDao allocationDao) { - this.allocationDao = allocationDao; - } - - public DispatchQueue getManageQueue() { - return manageQueue; - } - - public void setManageQueue(DispatchQueue manageQueue) { - this.manageQueue = manageQueue; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public HostManager getHostManager() { - return hostManager; - } - - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; - } - - public void setHostSearchFactory(HostSearchFactory hostSearchFactory) { - this.hostSearchFactory = hostSearchFactory; + } + + @Override + public void get(AllocGetRequest request, StreamObserver responseObserver) { + try { + responseObserver.onNext(AllocGetResponse.newBuilder() + .setAllocation(whiteboard.findAllocation(request.getId())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); } + } - private AllocationEntity toAllocationEntity(Allocation allocGrpc) { - AllocationEntity allocEntity = new AllocationEntity(); - allocEntity.id = allocGrpc.getId(); - allocEntity.name = allocGrpc.getName(); - allocEntity.tag = allocGrpc.getTag(); - allocEntity.facilityId = allocGrpc.getFacility(); - return allocEntity; + private AllocationEntity findAllocationDetail(String facility, String name) { + // If they pass name in the format ., just remove the facility. + if (CueUtil.verifyAllocationNameFormat(name)) { + name = CueUtil.splitAllocationName(name)[1]; } + return adminManager.findAllocationDetail(facility, name); + } + + @Override + public void delete(AllocDeleteRequest request, + StreamObserver responseObserver) { + AllocationEntity alloc = findAllocationDetail(request.getAllocation().getFacility(), + request.getAllocation().getName()); + adminManager.deleteAllocation(alloc); + responseObserver.onNext(AllocDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void findHosts(AllocFindHostsRequest request, + StreamObserver responseObserver) { + HostSearchCriteria searchCriteria = + request.getR().toBuilder().addAllocs(request.getAllocation().getId()).build(); + responseObserver.onNext(AllocFindHostsResponse.newBuilder() + .setHosts(whiteboard.getHosts(hostSearchFactory.create(searchCriteria))).build()); + responseObserver.onCompleted(); + } + + @Override + public void getHosts(AllocGetHostsRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(AllocGetHostsResponse.newBuilder() + .setHosts(whiteboard + .getHosts(hostSearchFactory.create(toAllocationEntity(request.getAllocation())))) + .build()); + responseObserver.onCompleted(); + } + + @Override + public void getSubscriptions(AllocGetSubscriptionsRequest request, + StreamObserver responseObserver) { + AllocationEntity allocEntity = toAllocationEntity(request.getAllocation()); + responseObserver.onNext(AllocGetSubscriptionsResponse.newBuilder() + .setSubscriptions(whiteboard.getSubscriptions(allocEntity)).build()); + responseObserver.onCompleted(); + } + + @Override + public void reparentHosts(AllocReparentHostsRequest request, + StreamObserver responseObserver) { + AllocationEntity allocEntity = toAllocationEntity(request.getAllocation()); + List hosts = request.getHosts().getHostsList(); + List hostEntities = + hosts.stream().map(HostEntity::new).collect(Collectors.toList()); + manageQueue.execute(new ManageReparentHosts(allocEntity, hostEntities, hostManager)); + responseObserver.onNext(AllocReparentHostsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setBillable(AllocSetBillableRequest request, + StreamObserver responseObserver) { + AllocationEntity alloc = findAllocationDetail(request.getAllocation().getFacility(), + request.getAllocation().getName()); + adminManager.setAllocationBillable(alloc, request.getValue()); + responseObserver.onNext(AllocSetBillableResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setName(AllocSetNameRequest request, + StreamObserver responseObserver) { + AllocationEntity alloc = findAllocationDetail(request.getAllocation().getFacility(), + request.getAllocation().getName()); + adminManager.setAllocationName(alloc, request.getName()); + responseObserver.onNext(AllocSetNameResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setTag(AllocSetTagRequest request, + StreamObserver responseObserver) { + AllocationEntity alloc = findAllocationDetail(request.getAllocation().getFacility(), + request.getAllocation().getName()); + adminManager.setAllocationTag(alloc, request.getTag()); + responseObserver.onNext(AllocSetTagResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void getDefault(AllocGetDefaultRequest request, + StreamObserver responseObserver) { + AllocationEntity alloc = adminManager.getDefaultAllocation(); + responseObserver.onNext(AllocGetDefaultResponse.newBuilder() + .setAllocation(whiteboard.getAllocation(alloc.id)).build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefault(AllocSetDefaultRequest request, + StreamObserver responseObserver) { + AllocationEntity alloc = findAllocationDetail(request.getAllocation().getFacility(), + request.getAllocation().getName()); + adminManager.setDefaultAllocation(alloc); + responseObserver.onNext(AllocSetDefaultResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public AdminManager getAdminManager() { + return adminManager; + } + + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } + + public AllocationDao getAllocationDao() { + return allocationDao; + } + + public void setAllocationDao(AllocationDao allocationDao) { + this.allocationDao = allocationDao; + } + + public DispatchQueue getManageQueue() { + return manageQueue; + } + + public void setManageQueue(DispatchQueue manageQueue) { + this.manageQueue = manageQueue; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public HostManager getHostManager() { + return hostManager; + } + + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; + } + + public void setHostSearchFactory(HostSearchFactory hostSearchFactory) { + this.hostSearchFactory = hostSearchFactory; + } + + private AllocationEntity toAllocationEntity(Allocation allocGrpc) { + AllocationEntity allocEntity = new AllocationEntity(); + allocEntity.id = allocGrpc.getId(); + allocEntity.name = allocGrpc.getName(); + allocEntity.tag = allocGrpc.getTag(); + allocEntity.facilityId = allocGrpc.getFacility(); + return allocEntity; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageComment.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageComment.java index 82ba1e173..839fde67c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageComment.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageComment.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.servant; import io.grpc.stub.StreamObserver; @@ -31,33 +27,34 @@ public class ManageComment extends CommentInterfaceGrpc.CommentInterfaceImplBase { - private CommentManager commentManager; - - @Override - public void delete(CommentDeleteRequest request, StreamObserver responseObserver) { - commentManager.deleteComment(request.getComment().getId()); - responseObserver.onNext(CommentDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void save(CommentSaveRequest request, StreamObserver responseObserver) { - CommentDetail c = new CommentDetail(); - c.id = request.getComment().getId(); - c.message = request.getComment().getMessage(); - c.subject = request.getComment().getSubject(); - commentManager.saveComment(c); - CommentSaveResponse response = CommentSaveResponse.newBuilder().build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - public CommentManager getCommentManager() { - return commentManager; - } - - public void setCommentManager(CommentManager commentManager) { - this.commentManager = commentManager; - } + private CommentManager commentManager; + + @Override + public void delete(CommentDeleteRequest request, + StreamObserver responseObserver) { + commentManager.deleteComment(request.getComment().getId()); + responseObserver.onNext(CommentDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void save(CommentSaveRequest request, + StreamObserver responseObserver) { + CommentDetail c = new CommentDetail(); + c.id = request.getComment().getId(); + c.message = request.getComment().getMessage(); + c.subject = request.getComment().getSubject(); + commentManager.saveComment(c); + CommentSaveResponse response = CommentSaveResponse.newBuilder().build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + public CommentManager getCommentManager() { + return commentManager; + } + + public void setCommentManager(CommentManager commentManager) { + this.commentManager = commentManager; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDeed.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDeed.java index 057be1843..258a7a7b7 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDeed.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDeed.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.servant; import io.grpc.stub.StreamObserver; @@ -37,53 +33,55 @@ public class ManageDeed extends DeedInterfaceGrpc.DeedInterfaceImplBase { - private OwnerManager ownerManager; - private Whiteboard whiteboard; - - @Override - public void delete(DeedDeleteRequest request, StreamObserver responseObserver) { - ownerManager.removeDeed(toEntity(request.getDeed())); - responseObserver.onNext(DeedDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void getHost(DeedGetHostRequest request, StreamObserver responseObserver) { - Host host = whiteboard.getHost(toEntity(request.getDeed())); - responseObserver.onNext(DeedGetHostResponse.newBuilder().setHost(host).build()); - responseObserver.onCompleted(); - } - - @Override - public void getOwner(DeedGetOwnerRequest request, StreamObserver responseObserver) { - Owner owner = whiteboard.getOwner(toEntity(request.getDeed())); - responseObserver.onNext(DeedGetOwnerResponse.newBuilder().setOwner(owner).build()); - responseObserver.onCompleted(); - } - - public OwnerManager getOwnerManager() { - return ownerManager; - } - - public void setOwnerManager(OwnerManager ownerManager) { - this.ownerManager = ownerManager; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - private DeedEntity toEntity(Deed deed) { - DeedEntity entity = new DeedEntity(); - entity.id = deed.getId(); - entity.host = deed.getHost(); - entity.owner = deed.getOwner(); - entity.show = deed.getShow(); - return entity; - } + private OwnerManager ownerManager; + private Whiteboard whiteboard; + + @Override + public void delete(DeedDeleteRequest request, + StreamObserver responseObserver) { + ownerManager.removeDeed(toEntity(request.getDeed())); + responseObserver.onNext(DeedDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void getHost(DeedGetHostRequest request, + StreamObserver responseObserver) { + Host host = whiteboard.getHost(toEntity(request.getDeed())); + responseObserver.onNext(DeedGetHostResponse.newBuilder().setHost(host).build()); + responseObserver.onCompleted(); + } + + @Override + public void getOwner(DeedGetOwnerRequest request, + StreamObserver responseObserver) { + Owner owner = whiteboard.getOwner(toEntity(request.getDeed())); + responseObserver.onNext(DeedGetOwnerResponse.newBuilder().setOwner(owner).build()); + responseObserver.onCompleted(); + } + + public OwnerManager getOwnerManager() { + return ownerManager; + } + + public void setOwnerManager(OwnerManager ownerManager) { + this.ownerManager = ownerManager; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + private DeedEntity toEntity(Deed deed) { + DeedEntity entity = new DeedEntity(); + entity.id = deed.getId(); + entity.host = deed.getHost(); + entity.owner = deed.getOwner(); + entity.show = deed.getShow(); + return entity; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepartment.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepartment.java index 0fa2d3e01..610639019 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepartment.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepartment.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.servant; import java.util.List; @@ -60,159 +56,165 @@ public class ManageDepartment extends DepartmentInterfaceGrpc.DepartmentInterfaceImplBase { - private AdminManager adminManager; - private DepartmentManager departmentManager; - private Whiteboard whiteboard; - - private TaskSeq.Builder addTasksToDepartment(Map tmap, PointDetail deptConfig) { - TaskSeq.Builder builder = TaskSeq.newBuilder(); - for (Map.Entry e: tmap.entrySet()) { - TaskEntity t = new TaskEntity(deptConfig, e.getKey(), e.getValue()); - departmentManager.createTask(t); - builder.addTasks(toTask(t)); - } - return builder; - } - - @Override - public void addDepartmentName(DeptAddDeptNameRequest request, StreamObserver responseObserver) { - adminManager.createDepartment(request.getName()); - responseObserver.onNext(DeptAddDeptNameResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void addTask(DeptAddTaskRequest request, StreamObserver responseObserver) { - TaskEntity t = new TaskEntity( - departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()), - request.getShot(), Convert.coresToCoreUnits(request.getMinCores())); - departmentManager.createTask(t); - Task createdTask = toTask(t); - responseObserver.onNext(DeptAddTaskResponse.newBuilder().setTask(createdTask).build()); - responseObserver.onCompleted(); - } - - @Override - public void addTasks(DeptAddTasksRequest request, StreamObserver responseObserver) { - PointDetail deptConfig = departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); - TaskSeq.Builder builder = addTasksToDepartment(request.getTmapMap(), deptConfig); - responseObserver.onNext(DeptAddTasksResponse.newBuilder().setTasks(builder.build()).build()); - responseObserver.onCompleted(); - } - - @Override - public void clearTasks(DeptClearTasksRequest request, StreamObserver responseObserver) { - PointDetail deptConfig = departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); - departmentManager.clearTasks(deptConfig); - responseObserver.onNext(DeptClearTasksResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void clearTaskAdjustments(DeptClearTaskAdjustmentsRequest request, - StreamObserver responseObserver) { - PointDetail deptConfig = departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); - departmentManager.clearTaskAdjustments(deptConfig); - responseObserver.onNext(DeptClearTaskAdjustmentsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void disableTiManaged(DeptDisableTiManagedRequest request, - StreamObserver responseObserver) { - PointDetail deptConfig = departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); - departmentManager.disableTiManaged(deptConfig); - responseObserver.onNext(DeptDisableTiManagedResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void enableTiManaged(DeptEnableTiManagedRequest request, - StreamObserver responseObserver) { - PointDetail deptConfig = departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); - departmentManager.enableTiManaged(deptConfig, request.getTiTask(), - Convert.coresToWholeCoreUnits(request.getManagedCores())); - responseObserver.onNext(DeptEnableTiManagedResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void getDepartmentNames(DeptGetDepartmentNamesRequest request, - StreamObserver responseObserver) { - responseObserver.onNext(DeptGetDepartmentNamesResponse.newBuilder() - .addAllNames(whiteboard.getDepartmentNames()) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void getTasks(DeptGetTasksRequest request, StreamObserver responseObserver) { - PointDetail deptConfig = departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); - TaskSeq tasks = whiteboard.getTasks(deptConfig, deptConfig); - TaskSeq taskSeq = TaskSeq.newBuilder().addAllTasks(tasks.getTasksList()).build(); - responseObserver.onNext(DeptGetTasksResponse.newBuilder().setTasks(taskSeq).build()); - responseObserver.onCompleted(); - } - - @Override - public void removeDepartmentName(DeptRemoveDepartmentNameRequest request, - StreamObserver responseObserver) { - adminManager.removeDepartment(adminManager.findDepartment(request.getName())); - responseObserver.onNext(DeptRemoveDepartmentNameResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void replaceTasks(DeptReplaceTaskRequest request, StreamObserver responseObserver) { - PointDetail deptConfig = departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); - departmentManager.clearTasks(deptConfig); - TaskSeq.Builder builder = addTasksToDepartment(request.getTmapMap(), deptConfig); - responseObserver.onNext(DeptReplaceTaskResponse.newBuilder().setTasks(builder.build()).build()); - responseObserver.onCompleted(); - } - - public void setManagedCores(DeptSetManagedCoresRequest request, - StreamObserver responseObserver) { - PointDetail deptConfig = departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); - departmentManager.setManagedCores(deptConfig, - Convert.coresToWholeCoreUnits(request.getManagedCores())); - responseObserver.onNext(DeptSetManagedCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public AdminManager getAdminManager() { - return adminManager; - } - - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } - - public DepartmentManager getDepartmentManager() { - return departmentManager; - } - - public void setDepartmentManager(DepartmentManager departmentManager) { - this.departmentManager = departmentManager; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - private Task toTask(TaskEntity detail) { - return Task.newBuilder() - .setId(detail.id) - .setName(detail.name) - .setShot(detail.shot) - .setDept(detail.deptId) - .setMinCores(detail.minCoreUnits) - .setPointId(detail.pointId) - .build(); - } + private AdminManager adminManager; + private DepartmentManager departmentManager; + private Whiteboard whiteboard; + + private TaskSeq.Builder addTasksToDepartment(Map tmap, PointDetail deptConfig) { + TaskSeq.Builder builder = TaskSeq.newBuilder(); + for (Map.Entry e : tmap.entrySet()) { + TaskEntity t = new TaskEntity(deptConfig, e.getKey(), e.getValue()); + departmentManager.createTask(t); + builder.addTasks(toTask(t)); + } + return builder; + } + + @Override + public void addDepartmentName(DeptAddDeptNameRequest request, + StreamObserver responseObserver) { + adminManager.createDepartment(request.getName()); + responseObserver.onNext(DeptAddDeptNameResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void addTask(DeptAddTaskRequest request, + StreamObserver responseObserver) { + TaskEntity t = + new TaskEntity(departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()), + request.getShot(), Convert.coresToCoreUnits(request.getMinCores())); + departmentManager.createTask(t); + Task createdTask = toTask(t); + responseObserver.onNext(DeptAddTaskResponse.newBuilder().setTask(createdTask).build()); + responseObserver.onCompleted(); + } + + @Override + public void addTasks(DeptAddTasksRequest request, + StreamObserver responseObserver) { + PointDetail deptConfig = + departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); + TaskSeq.Builder builder = addTasksToDepartment(request.getTmapMap(), deptConfig); + responseObserver.onNext(DeptAddTasksResponse.newBuilder().setTasks(builder.build()).build()); + responseObserver.onCompleted(); + } + + @Override + public void clearTasks(DeptClearTasksRequest request, + StreamObserver responseObserver) { + PointDetail deptConfig = + departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); + departmentManager.clearTasks(deptConfig); + responseObserver.onNext(DeptClearTasksResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void clearTaskAdjustments(DeptClearTaskAdjustmentsRequest request, + StreamObserver responseObserver) { + PointDetail deptConfig = + departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); + departmentManager.clearTaskAdjustments(deptConfig); + responseObserver.onNext(DeptClearTaskAdjustmentsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void disableTiManaged(DeptDisableTiManagedRequest request, + StreamObserver responseObserver) { + PointDetail deptConfig = + departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); + departmentManager.disableTiManaged(deptConfig); + responseObserver.onNext(DeptDisableTiManagedResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void enableTiManaged(DeptEnableTiManagedRequest request, + StreamObserver responseObserver) { + PointDetail deptConfig = + departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); + departmentManager.enableTiManaged(deptConfig, request.getTiTask(), + Convert.coresToWholeCoreUnits(request.getManagedCores())); + responseObserver.onNext(DeptEnableTiManagedResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void getDepartmentNames(DeptGetDepartmentNamesRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(DeptGetDepartmentNamesResponse.newBuilder() + .addAllNames(whiteboard.getDepartmentNames()).build()); + responseObserver.onCompleted(); + } + + @Override + public void getTasks(DeptGetTasksRequest request, + StreamObserver responseObserver) { + PointDetail deptConfig = + departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); + TaskSeq tasks = whiteboard.getTasks(deptConfig, deptConfig); + TaskSeq taskSeq = TaskSeq.newBuilder().addAllTasks(tasks.getTasksList()).build(); + responseObserver.onNext(DeptGetTasksResponse.newBuilder().setTasks(taskSeq).build()); + responseObserver.onCompleted(); + } + + @Override + public void removeDepartmentName(DeptRemoveDepartmentNameRequest request, + StreamObserver responseObserver) { + adminManager.removeDepartment(adminManager.findDepartment(request.getName())); + responseObserver.onNext(DeptRemoveDepartmentNameResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void replaceTasks(DeptReplaceTaskRequest request, + StreamObserver responseObserver) { + PointDetail deptConfig = + departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); + departmentManager.clearTasks(deptConfig); + TaskSeq.Builder builder = addTasksToDepartment(request.getTmapMap(), deptConfig); + responseObserver.onNext(DeptReplaceTaskResponse.newBuilder().setTasks(builder.build()).build()); + responseObserver.onCompleted(); + } + + public void setManagedCores(DeptSetManagedCoresRequest request, + StreamObserver responseObserver) { + PointDetail deptConfig = + departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); + departmentManager.setManagedCores(deptConfig, + Convert.coresToWholeCoreUnits(request.getManagedCores())); + responseObserver.onNext(DeptSetManagedCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public AdminManager getAdminManager() { + return adminManager; + } + + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } + + public DepartmentManager getDepartmentManager() { + return departmentManager; + } + + public void setDepartmentManager(DepartmentManager departmentManager) { + this.departmentManager = departmentManager; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + private Task toTask(TaskEntity detail) { + return Task.newBuilder().setId(detail.id).setName(detail.name).setShot(detail.shot) + .setDept(detail.deptId).setMinCores(detail.minCoreUnits).setPointId(detail.pointId).build(); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepend.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepend.java index 9fad37ef1..a86c8a049 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepend.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepend.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.servant; import io.grpc.Status; @@ -40,75 +36,73 @@ public class ManageDepend extends DependInterfaceGrpc.DependInterfaceImplBase { - private static final Logger logger = LogManager.getLogger(ManageDepend.class); + private static final Logger logger = LogManager.getLogger(ManageDepend.class); + + private DependManager dependManager; + private DispatchQueue manageQueue; + private Whiteboard whiteboard; + + @Override + public void getDepend(DependGetDependRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(DependGetDependResponse.newBuilder() + .setDepend(whiteboard.getDepend(request.getId())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + } + } - private DependManager dependManager; - private DispatchQueue manageQueue; - private Whiteboard whiteboard; + public void satisfy(DependSatisfyRequest request, + StreamObserver responseObserver) { - @Override - public void getDepend(DependGetDependRequest request, StreamObserver responseObserver) { + LightweightDependency depend = dependManager.getDepend(request.getDepend().getId()); + String key = "manage_dep_sat_req_" + request.getDepend().getId(); + manageQueue.execute(new KeyRunnable(key) { + public void run() { try { - responseObserver.onNext(DependGetDependResponse.newBuilder() - .setDepend(whiteboard.getDepend(request.getId())) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); + logger.info("dropping dependency: " + depend.id); + dependManager.satisfyDepend(depend); + } catch (Exception e) { + logger.error("error satisfying dependency: " + depend.getId() + " , " + e); } - } - - public void satisfy(DependSatisfyRequest request, StreamObserver responseObserver) { - - LightweightDependency depend = dependManager.getDepend(request.getDepend().getId()); - String key = "manage_dep_sat_req_" + request.getDepend().getId(); - manageQueue.execute(new KeyRunnable(key) { - public void run() { - try { - logger.info("dropping dependency: " + depend.id); - dependManager.satisfyDepend(depend); - } catch (Exception e) { - logger.error("error satisfying dependency: " - + depend.getId() + " , " + e); - } - } - }); - responseObserver.onNext(DependSatisfyResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public void unsatisfy(DependUnsatisfyRequest request, StreamObserver responseObserver) { - LightweightDependency depend = dependManager.getDepend(request.getDepend().getId()); - dependManager.unsatisfyDepend(depend); - responseObserver.onNext(DependUnsatisfyResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public DependManager getDependManager() { - return dependManager; - } - - public void setDependManager(DependManager dependManager) { - this.dependManager = dependManager; - } - - public DispatchQueue getManageQueue() { - return manageQueue; - } - - public void setManageQueue(DispatchQueue manageQueue) { - this.manageQueue = manageQueue; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } + } + }); + responseObserver.onNext(DependSatisfyResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public void unsatisfy(DependUnsatisfyRequest request, + StreamObserver responseObserver) { + LightweightDependency depend = dependManager.getDepend(request.getDepend().getId()); + dependManager.unsatisfyDepend(depend); + responseObserver.onNext(DependUnsatisfyResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public DependManager getDependManager() { + return dependManager; + } + + public void setDependManager(DependManager dependManager) { + this.dependManager = dependManager; + } + + public DispatchQueue getManageQueue() { + return manageQueue; + } + + public void setManageQueue(DispatchQueue manageQueue) { + this.manageQueue = manageQueue; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFacility.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFacility.java index ada170aee..0fe0f6606 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFacility.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFacility.java @@ -17,68 +17,67 @@ import com.imageworks.spcue.service.Whiteboard; public class ManageFacility extends FacilityInterfaceGrpc.FacilityInterfaceImplBase { - private AdminManager adminManager; - private Whiteboard whiteboard; + private AdminManager adminManager; + private Whiteboard whiteboard; - public ManageFacility() {} + public ManageFacility() {} - // TODO(bcipriano) Add error handling. (Issue #59) + // TODO(bcipriano) Add error handling. (Issue #59) - @Override - public void create(FacilityCreateRequest request, StreamObserver responseObserver) { - adminManager.createFacility(request.getName()); - FacilityCreateResponse response = FacilityCreateResponse.newBuilder() - .setFacility(whiteboard.getFacility(request.getName())) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } + @Override + public void create(FacilityCreateRequest request, + StreamObserver responseObserver) { + adminManager.createFacility(request.getName()); + FacilityCreateResponse response = FacilityCreateResponse.newBuilder() + .setFacility(whiteboard.getFacility(request.getName())).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } - @Override - public void get(FacilityGetRequest request, StreamObserver responseObserver) { - try { - FacilityGetResponse response = FacilityGetResponse.newBuilder() - .setFacility(whiteboard.getFacility(request.getName())) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); - } + @Override + public void get(FacilityGetRequest request, + StreamObserver responseObserver) { + try { + FacilityGetResponse response = FacilityGetResponse.newBuilder() + .setFacility(whiteboard.getFacility(request.getName())).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); } + } - @Override - public void rename(FacilityRenameRequest request, StreamObserver responseObserver) { - adminManager.setFacilityName( - adminManager.getFacility(request.getFacility().getName()), - request.getNewName()); - responseObserver.onNext(FacilityRenameResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + @Override + public void rename(FacilityRenameRequest request, + StreamObserver responseObserver) { + adminManager.setFacilityName(adminManager.getFacility(request.getFacility().getName()), + request.getNewName()); + responseObserver.onNext(FacilityRenameResponse.newBuilder().build()); + responseObserver.onCompleted(); + } - @Override - public void delete(FacilityDeleteRequest request, StreamObserver responseObserver) { - adminManager.deleteFacility(adminManager.getFacility(request.getName())); - responseObserver.onNext(FacilityDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + @Override + public void delete(FacilityDeleteRequest request, + StreamObserver responseObserver) { + adminManager.deleteFacility(adminManager.getFacility(request.getName())); + responseObserver.onNext(FacilityDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } - public AdminManager getAdminManager() { - return adminManager; - } + public AdminManager getAdminManager() { + return adminManager; + } - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } - public Whiteboard getWhiteboard() { - return whiteboard; - } + public Whiteboard getWhiteboard() { + return whiteboard; + } - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFilter.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFilter.java index 8749a5787..4ea6beb38 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFilter.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFilter.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.servant; import java.util.List; @@ -76,224 +72,230 @@ public class ManageFilter extends FilterInterfaceGrpc.FilterInterfaceImplBase { - private Whiteboard whiteboard; - private FilterManager filterManager; - private FilterDao filterDao; - private GroupDao groupDao; - private DispatchQueue manageQueue; - - @Override - public void findFilter(FilterFindFilterRequest request, StreamObserver responseObserver) { - try { - responseObserver.onNext(FilterFindFilterResponse.newBuilder() - .setFilter(whiteboard.findFilter(request.getShow(), request.getName())) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void createAction(FilterCreateActionRequest request, - StreamObserver responseObserver) { - ActionEntity actionDetail = ActionEntity.build(getFilterEntity(request.getFilter()), request.getData()); - filterManager.createAction(actionDetail); - Action action = whiteboard.getAction(actionDetail); - FilterCreateActionResponse response = FilterCreateActionResponse.newBuilder().setAction(action).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void createMatcher(FilterCreateMatcherRequest request, - StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - MatcherEntity matcherDetail = MatcherEntity.build(filter, request.getData()); - matcherDetail.filterId = filter.id; - filterManager.createMatcher(matcherDetail); - Matcher newMatcher = whiteboard.getMatcher(matcherDetail); - FilterCreateMatcherResponse response = FilterCreateMatcherResponse.newBuilder() - .setMatcher(newMatcher) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void delete(FilterDeleteRequest request, StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - String key = "manage_filter_del_req_" + filter.getId(); - manageQueue.execute(new KeyRunnable(key) { - public void run() { - filterManager.deleteFilter(filter); - } - }); - responseObserver.onNext(FilterDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void getActions(FilterGetActionsRequest request, StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - FilterGetActionsResponse response = FilterGetActionsResponse.newBuilder() - .setActions(whiteboard.getActions(filter)) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getMatchers(FilterGetMatchersRequest request, - StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - FilterGetMatchersResponse response = FilterGetMatchersResponse.newBuilder() - .setMatchers(whiteboard.getMatchers(filter)) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void lowerOrder(FilterLowerOrderRequest request, StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - filterManager.lowerFilterOrder(filter); - responseObserver.onNext(FilterLowerOrderResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void raiseOrder(FilterRaiseOrderRequest request, StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - filterManager.raiseFilterOrder(filter); - responseObserver.onNext(FilterRaiseOrderResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void orderFirst(FilterOrderFirstRequest request, StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - filterManager.setFilterOrder(filter, 0); - responseObserver.onNext(FilterOrderFirstResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void orderLast(FilterOrderLastRequest request, StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - filterManager.setFilterOrder(filter, 9999); - responseObserver.onNext(FilterOrderLastResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void runFilterOnGroup(FilterRunFilterOnGroupRequest request, - StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - filterManager.runFilterOnGroup(filter, groupDao.getGroup(request.getGroup().getId())); - responseObserver.onNext(FilterRunFilterOnGroupResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setEnabled(FilterSetEnabledRequest request, StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - filterDao.updateSetFilterEnabled(filter, request.getEnabled()); - responseObserver.onNext(FilterSetEnabledResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setName(FilterSetNameRequest request, StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - filterDao.updateSetFilterName(filter, request.getName()); - responseObserver.onNext(FilterSetNameResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setType(FilterSetTypeRequest request, StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - filterDao.updateSetFilterType(filter, request.getType()); - responseObserver.onNext(FilterSetTypeResponse.newBuilder().build()); - responseObserver.onCompleted(); + private Whiteboard whiteboard; + private FilterManager filterManager; + private FilterDao filterDao; + private GroupDao groupDao; + private DispatchQueue manageQueue; + + @Override + public void findFilter(FilterFindFilterRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(FilterFindFilterResponse.newBuilder() + .setFilter(whiteboard.findFilter(request.getShow(), request.getName())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); } - - public void setOrder(FilterSetOrderRequest request, StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - filterManager.setFilterOrder(filter, (double) request.getOrder()); - responseObserver.onNext(FilterSetOrderResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public void runFilterOnJobs(FilterRunFilterOnJobsRequest request, - StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - for (Job job: request.getJobs().getJobsList()) { - filterManager.runFilterOnJob(filter, job.getId()); - } - responseObserver.onNext(FilterRunFilterOnJobsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public FilterDao getFilterDao() { - return filterDao; - } - - public void setFilterDao(FilterDao filterDao) { - this.filterDao = filterDao; - } - - public FilterManager getFilterManager() { - return filterManager; - } - - public void setFilterManager(FilterManager filterManager) { - this.filterManager = filterManager; - } - - public GroupDao getGroupDao() { - return groupDao; - } - - public void setGroupDao(GroupDao groupDao) { - this.groupDao = groupDao; - } - - public DispatchQueue getManageQueue() { - return manageQueue; - } - - public void setManageQueue(DispatchQueue manageQueue) { - this.manageQueue = manageQueue; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - private FilterEntity getFilterEntity(Filter filter) { - return filterManager.getFilter(filter.getId()); - } - - private ActionEntity toActionEntity(Action action) { - ActionEntity entity = new ActionEntity(); - entity.id = action.getId(); - entity.type = action.getType(); - entity.valueType = action.getValueType(); - entity.groupValue = action.getGroupValue(); - entity.stringValue = action.getStringValue(); - entity.intValue = action.getIntegerValue(); - entity.floatValue = action.getFloatValue(); - entity.booleanValue = action.getBooleanValue(); - return entity; + } + + @Override + public void createAction(FilterCreateActionRequest request, + StreamObserver responseObserver) { + ActionEntity actionDetail = + ActionEntity.build(getFilterEntity(request.getFilter()), request.getData()); + filterManager.createAction(actionDetail); + Action action = whiteboard.getAction(actionDetail); + FilterCreateActionResponse response = + FilterCreateActionResponse.newBuilder().setAction(action).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void createMatcher(FilterCreateMatcherRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + MatcherEntity matcherDetail = MatcherEntity.build(filter, request.getData()); + matcherDetail.filterId = filter.id; + filterManager.createMatcher(matcherDetail); + Matcher newMatcher = whiteboard.getMatcher(matcherDetail); + FilterCreateMatcherResponse response = + FilterCreateMatcherResponse.newBuilder().setMatcher(newMatcher).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void delete(FilterDeleteRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + String key = "manage_filter_del_req_" + filter.getId(); + manageQueue.execute(new KeyRunnable(key) { + public void run() { + filterManager.deleteFilter(filter); + } + }); + responseObserver.onNext(FilterDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void getActions(FilterGetActionsRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + FilterGetActionsResponse response = + FilterGetActionsResponse.newBuilder().setActions(whiteboard.getActions(filter)).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getMatchers(FilterGetMatchersRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + FilterGetMatchersResponse response = + FilterGetMatchersResponse.newBuilder().setMatchers(whiteboard.getMatchers(filter)).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void lowerOrder(FilterLowerOrderRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + filterManager.lowerFilterOrder(filter); + responseObserver.onNext(FilterLowerOrderResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void raiseOrder(FilterRaiseOrderRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + filterManager.raiseFilterOrder(filter); + responseObserver.onNext(FilterRaiseOrderResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void orderFirst(FilterOrderFirstRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + filterManager.setFilterOrder(filter, 0); + responseObserver.onNext(FilterOrderFirstResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void orderLast(FilterOrderLastRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + filterManager.setFilterOrder(filter, 9999); + responseObserver.onNext(FilterOrderLastResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void runFilterOnGroup(FilterRunFilterOnGroupRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + filterManager.runFilterOnGroup(filter, groupDao.getGroup(request.getGroup().getId())); + responseObserver.onNext(FilterRunFilterOnGroupResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setEnabled(FilterSetEnabledRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + filterDao.updateSetFilterEnabled(filter, request.getEnabled()); + responseObserver.onNext(FilterSetEnabledResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setName(FilterSetNameRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + filterDao.updateSetFilterName(filter, request.getName()); + responseObserver.onNext(FilterSetNameResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setType(FilterSetTypeRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + filterDao.updateSetFilterType(filter, request.getType()); + responseObserver.onNext(FilterSetTypeResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public void setOrder(FilterSetOrderRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + filterManager.setFilterOrder(filter, (double) request.getOrder()); + responseObserver.onNext(FilterSetOrderResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public void runFilterOnJobs(FilterRunFilterOnJobsRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + for (Job job : request.getJobs().getJobsList()) { + filterManager.runFilterOnJob(filter, job.getId()); } + responseObserver.onNext(FilterRunFilterOnJobsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public FilterDao getFilterDao() { + return filterDao; + } + + public void setFilterDao(FilterDao filterDao) { + this.filterDao = filterDao; + } + + public FilterManager getFilterManager() { + return filterManager; + } + + public void setFilterManager(FilterManager filterManager) { + this.filterManager = filterManager; + } + + public GroupDao getGroupDao() { + return groupDao; + } + + public void setGroupDao(GroupDao groupDao) { + this.groupDao = groupDao; + } + + public DispatchQueue getManageQueue() { + return manageQueue; + } + + public void setManageQueue(DispatchQueue manageQueue) { + this.manageQueue = manageQueue; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + private FilterEntity getFilterEntity(Filter filter) { + return filterManager.getFilter(filter.getId()); + } + + private ActionEntity toActionEntity(Action action) { + ActionEntity entity = new ActionEntity(); + entity.id = action.getId(); + entity.type = action.getType(); + entity.valueType = action.getValueType(); + entity.groupValue = action.getGroupValue(); + entity.stringValue = action.getStringValue(); + entity.intValue = action.getIntegerValue(); + entity.floatValue = action.getFloatValue(); + entity.booleanValue = action.getBooleanValue(); + return entity; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFrame.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFrame.java index 7ef91a2e9..d26ef635d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFrame.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.servant; import io.grpc.Status; @@ -87,345 +83,323 @@ public class ManageFrame extends FrameInterfaceGrpc.FrameInterfaceImplBase { - private JobManager jobManager; - private DependManager dependManager; - private JobManagerSupport jobManagerSupport; - private FrameDao frameDao; - private DispatchQueue manageQueue; - private Whiteboard whiteboard; - private LocalBookingSupport localBookingSupport; - private FrameSearchFactory frameSearchFactory; - - @Override - public void findFrame(FrameFindFrameRequest request, StreamObserver responseObserver) { - try { - responseObserver.onNext(FrameFindFrameResponse.newBuilder() - .setFrame(whiteboard.findFrame(request.getJob(), request.getLayer(), request.getFrame())) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void getFrame(FrameGetFrameRequest request, StreamObserver responseObserver) { - try { - responseObserver.onNext(FrameGetFrameResponse.newBuilder() - .setFrame(whiteboard.getFrame(request.getId())) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void getFrames(FrameGetFramesRequest request, StreamObserver responseObserver) { - responseObserver.onNext(FrameGetFramesResponse.newBuilder() - .setFrames( - whiteboard.getFrames( - frameSearchFactory.create( - jobManagerSupport.getJobManager().findJob(request.getJob()), - request.getR()))) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void eat(FrameEatRequest request, StreamObserver responseObserver) { - FrameEntity frame = getFrameEntity(request.getFrame()); - manageQueue.execute( - new DispatchEatFrames( - frameSearchFactory.create(frame), - new Source(request.toString()), - jobManagerSupport)); - responseObserver.onNext(FrameEatResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void kill(FrameKillRequest request, StreamObserver responseObserver) { - FrameEntity frame = getFrameEntity(request.getFrame()); - manageQueue.execute( - new DispatchKillFrames( - frameSearchFactory.create(frame), - new Source(request.toString()), - jobManagerSupport)); - responseObserver.onNext(FrameKillResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void retry(FrameRetryRequest request, StreamObserver responseObserver) { - FrameEntity frame = getFrameEntity(request.getFrame()); - manageQueue.execute( - new DispatchRetryFrames( - frameSearchFactory.create(frame), - new Source(request.toString()), - jobManagerSupport)); - responseObserver.onNext(FrameRetryResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void createDependencyOnFrame(FrameCreateDependencyOnFrameRequest request, - StreamObserver responseObserver) { - updateManagers(); - FrameEntity frame = getFrameEntity(request.getFrame()); - FrameOnFrame depend = new FrameOnFrame(frame, - jobManager.getFrameDetail(request.getDependOnFrame().getId())); - dependManager.createDepend(depend); - Depend dependency = whiteboard.getDepend(depend); - responseObserver.onNext(FrameCreateDependencyOnFrameResponse.newBuilder() - .setDepend(dependency) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void createDependencyOnJob(FrameCreateDependencyOnJobRequest request, - StreamObserver responseObserver) { - updateManagers(); - FrameEntity frame = getFrameEntity(request.getFrame()); - FrameOnJob depend = new FrameOnJob(frame, jobManager.getJobDetail(request.getJob().getId())); - dependManager.createDepend(depend); - Depend dependency = whiteboard.getDepend(depend); - responseObserver.onNext(FrameCreateDependencyOnJobResponse.newBuilder() - .setDepend(dependency) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void createDependencyOnLayer(FrameCreateDependencyOnLayerRequest request, - StreamObserver responseObserver) { - updateManagers(); - FrameEntity frame = getFrameEntity(request.getFrame()); - FrameOnLayer depend = new FrameOnLayer(frame, jobManager.getLayerDetail(request.getLayer().getId())); - dependManager.createDepend(depend); - Depend dependency = whiteboard.getDepend(depend); - responseObserver.onNext(FrameCreateDependencyOnLayerResponse.newBuilder() - .setDepend(dependency) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void getWhatDependsOnThis(FrameGetWhatDependsOnThisRequest request, - StreamObserver responseObserver) { - FrameEntity frame = getFrameEntity(request.getFrame()); - responseObserver.onNext(FrameGetWhatDependsOnThisResponse.newBuilder() - .setDepends(whiteboard.getWhatDependsOnThis(frame)) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void getWhatThisDependsOn(FrameGetWhatThisDependsOnRequest request, - StreamObserver responseObserver) { - FrameEntity frame = getFrameEntity(request.getFrame()); - responseObserver.onNext(FrameGetWhatThisDependsOnResponse.newBuilder() - .setDepends(whiteboard.getWhatThisDependsOn(frame)) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void markAsDepend(FrameMarkAsDependRequest request, - StreamObserver responseObserver) { - updateManagers(); - FrameEntity frame = getFrameEntity(request.getFrame()); - jobManager.markFrameAsDepend(frame); - responseObserver.onNext(FrameMarkAsDependResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void markAsWaiting(FrameMarkAsWaitingRequest request, StreamObserver responseObserver) { - updateManagers(); - FrameEntity frame = getFrameEntity(request.getFrame()); - jobManager.markFrameAsWaiting(frame); - responseObserver.onNext(FrameMarkAsWaitingResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void dropDepends(FrameDropDependsRequest request, StreamObserver responseObserver) { - updateManagers(); - FrameEntity frame = getFrameEntity(request.getFrame()); - manageQueue.execute(new DispatchDropDepends(frame, request.getTarget(), dependManager)); - responseObserver.onNext(FrameDropDependsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void addRenderPartition(FrameAddRenderPartitionRequest request, - StreamObserver responseObserver) { - updateManagers(); - FrameEntity frame = getFrameEntity(request.getFrame()); - LocalHostAssignment lha = new LocalHostAssignment(); - lha.setFrameId(frame.id); - lha.setThreads(request.getThreads()); - lha.setMaxCoreUnits(request.getMaxCores() * 100); - lha.setMaxMemory(request.getMaxMemory()); - lha.setMaxGpuMemory(request.getMaxGpuMemory()); - lha.setType(RenderPartitionType.FRAME_PARTITION); - - if (localBookingSupport.bookLocal(frame, request.getHost(), request.getUsername(), lha)) { - RenderPartition partition = whiteboard.getRenderPartition(lha); - - responseObserver.onNext(FrameAddRenderPartitionResponse.newBuilder() - .setRenderPartition(partition) - .build()); - responseObserver.onCompleted(); - } else { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find suitable frames.") - .augmentDescription("customException()") - .asRuntimeException()); - } - } - - @Override - public void setCheckpointState(FrameSetCheckpointStateRequest request, - StreamObserver responseObserver) { - updateManagers(); - FrameEntity frame = getFrameEntity(request.getFrame()); - jobManager.updateCheckpointState(frame, request.getState()); - responseObserver.onNext(FrameSetCheckpointStateResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setFrameStateDisplayOverride(FrameStateDisplayOverrideRequest request, - StreamObserver responseObserver){ - updateManagers(); - Frame frame = request.getFrame(); - FrameStateDisplayOverride override = request.getOverride(); - - FrameStateDisplayOverrideSeq existing_overrides = frameDao.getFrameStateDisplayOverrides( - frame.getId()); - // if override already exists, do nothing - // if override is for a state that already has an override but diff color/text, update - // if override is new, add - boolean newOverride = true; - for (FrameStateDisplayOverride eo : existing_overrides.getOverridesList()) { - if (eo.equals(override)) { - newOverride = false; - break; - } - else if (eo.getState().equals(override.getState()) && - !(eo.getColor().equals(override.getColor()) && - eo.getText().equals(override.getText()))) { - newOverride = false; - frameDao.updateFrameStateDisplayOverride(frame.getId(), override); - break; - } - } - - if (newOverride) { - frameDao.setFrameStateDisplayOverride(frame.getId(), override); - } - responseObserver.onNext(FrameStateDisplayOverrideResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void getFrameStateDisplayOverrides(GetFrameStateDisplayOverridesRequest request, - StreamObserver responseObserver){ - try { - updateManagers(); - Frame frame = request.getFrame(); - responseObserver.onNext(GetFrameStateDisplayOverridesResponse.newBuilder() - .setOverrides(frameDao.getFrameStateDisplayOverrides(frame.getId())) - .build()); - responseObserver.onCompleted(); - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("No Frame State display overrides found.") - .asRuntimeException()); - } - } - - public JobManager getJobManager() { - return jobManager; - } - - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } - - public DispatchQueue getManageQueue() { - return manageQueue; - } - - public void setManageQueue(DispatchQueue dispatchQueue) { - this.manageQueue = dispatchQueue; - } - - public FrameDao getFrameDao() { - return frameDao; - } - - public void setFrameDao(FrameDao frameDao) { - this.frameDao = frameDao; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public DependManager getDependManager() { - return dependManager; - } - - public void setDependManager(DependManager dependManager) { - this.dependManager = dependManager; - } - - public JobManagerSupport getJobManagerSupport() { - return jobManagerSupport; - } - - public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { - this.jobManagerSupport = jobManagerSupport; - } - - public LocalBookingSupport getLocalBookingSupport() { - return localBookingSupport; - } - - public void setLocalBookingSupport(LocalBookingSupport localBookingSupport) { - this.localBookingSupport = localBookingSupport; - } - - private FrameEntity getFrameEntity(Frame frame) { - return frameDao.getFrameDetail(frame.getId()); - } - - private void updateManagers() { - setDependManager(jobManagerSupport.getDependManager()); - setJobManager(jobManagerSupport.getJobManager()); - } - - public FrameSearchFactory getFrameSearchFactory() { - return frameSearchFactory; - } - - public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { - this.frameSearchFactory = frameSearchFactory; - } + private JobManager jobManager; + private DependManager dependManager; + private JobManagerSupport jobManagerSupport; + private FrameDao frameDao; + private DispatchQueue manageQueue; + private Whiteboard whiteboard; + private LocalBookingSupport localBookingSupport; + private FrameSearchFactory frameSearchFactory; + + @Override + public void findFrame(FrameFindFrameRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(FrameFindFrameResponse.newBuilder() + .setFrame(whiteboard.findFrame(request.getJob(), request.getLayer(), request.getFrame())) + .build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + } + } + + @Override + public void getFrame(FrameGetFrameRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(FrameGetFrameResponse.newBuilder() + .setFrame(whiteboard.getFrame(request.getId())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + } + } + + @Override + public void getFrames(FrameGetFramesRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(FrameGetFramesResponse.newBuilder() + .setFrames(whiteboard.getFrames(frameSearchFactory + .create(jobManagerSupport.getJobManager().findJob(request.getJob()), request.getR()))) + .build()); + responseObserver.onCompleted(); + } + + @Override + public void eat(FrameEatRequest request, StreamObserver responseObserver) { + FrameEntity frame = getFrameEntity(request.getFrame()); + manageQueue.execute(new DispatchEatFrames(frameSearchFactory.create(frame), + new Source(request.toString()), jobManagerSupport)); + responseObserver.onNext(FrameEatResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void kill(FrameKillRequest request, StreamObserver responseObserver) { + FrameEntity frame = getFrameEntity(request.getFrame()); + manageQueue.execute(new DispatchKillFrames(frameSearchFactory.create(frame), + new Source(request.toString()), jobManagerSupport)); + responseObserver.onNext(FrameKillResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void retry(FrameRetryRequest request, + StreamObserver responseObserver) { + FrameEntity frame = getFrameEntity(request.getFrame()); + manageQueue.execute(new DispatchRetryFrames(frameSearchFactory.create(frame), + new Source(request.toString()), jobManagerSupport)); + responseObserver.onNext(FrameRetryResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void createDependencyOnFrame(FrameCreateDependencyOnFrameRequest request, + StreamObserver responseObserver) { + updateManagers(); + FrameEntity frame = getFrameEntity(request.getFrame()); + FrameOnFrame depend = + new FrameOnFrame(frame, jobManager.getFrameDetail(request.getDependOnFrame().getId())); + dependManager.createDepend(depend); + Depend dependency = whiteboard.getDepend(depend); + responseObserver + .onNext(FrameCreateDependencyOnFrameResponse.newBuilder().setDepend(dependency).build()); + responseObserver.onCompleted(); + } + + @Override + public void createDependencyOnJob(FrameCreateDependencyOnJobRequest request, + StreamObserver responseObserver) { + updateManagers(); + FrameEntity frame = getFrameEntity(request.getFrame()); + FrameOnJob depend = new FrameOnJob(frame, jobManager.getJobDetail(request.getJob().getId())); + dependManager.createDepend(depend); + Depend dependency = whiteboard.getDepend(depend); + responseObserver + .onNext(FrameCreateDependencyOnJobResponse.newBuilder().setDepend(dependency).build()); + responseObserver.onCompleted(); + } + + @Override + public void createDependencyOnLayer(FrameCreateDependencyOnLayerRequest request, + StreamObserver responseObserver) { + updateManagers(); + FrameEntity frame = getFrameEntity(request.getFrame()); + FrameOnLayer depend = + new FrameOnLayer(frame, jobManager.getLayerDetail(request.getLayer().getId())); + dependManager.createDepend(depend); + Depend dependency = whiteboard.getDepend(depend); + responseObserver + .onNext(FrameCreateDependencyOnLayerResponse.newBuilder().setDepend(dependency).build()); + responseObserver.onCompleted(); + } + + @Override + public void getWhatDependsOnThis(FrameGetWhatDependsOnThisRequest request, + StreamObserver responseObserver) { + FrameEntity frame = getFrameEntity(request.getFrame()); + responseObserver.onNext(FrameGetWhatDependsOnThisResponse.newBuilder() + .setDepends(whiteboard.getWhatDependsOnThis(frame)).build()); + responseObserver.onCompleted(); + } + + @Override + public void getWhatThisDependsOn(FrameGetWhatThisDependsOnRequest request, + StreamObserver responseObserver) { + FrameEntity frame = getFrameEntity(request.getFrame()); + responseObserver.onNext(FrameGetWhatThisDependsOnResponse.newBuilder() + .setDepends(whiteboard.getWhatThisDependsOn(frame)).build()); + responseObserver.onCompleted(); + } + + @Override + public void markAsDepend(FrameMarkAsDependRequest request, + StreamObserver responseObserver) { + updateManagers(); + FrameEntity frame = getFrameEntity(request.getFrame()); + jobManager.markFrameAsDepend(frame); + responseObserver.onNext(FrameMarkAsDependResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void markAsWaiting(FrameMarkAsWaitingRequest request, + StreamObserver responseObserver) { + updateManagers(); + FrameEntity frame = getFrameEntity(request.getFrame()); + jobManager.markFrameAsWaiting(frame); + responseObserver.onNext(FrameMarkAsWaitingResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void dropDepends(FrameDropDependsRequest request, + StreamObserver responseObserver) { + updateManagers(); + FrameEntity frame = getFrameEntity(request.getFrame()); + manageQueue.execute(new DispatchDropDepends(frame, request.getTarget(), dependManager)); + responseObserver.onNext(FrameDropDependsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void addRenderPartition(FrameAddRenderPartitionRequest request, + StreamObserver responseObserver) { + updateManagers(); + FrameEntity frame = getFrameEntity(request.getFrame()); + LocalHostAssignment lha = new LocalHostAssignment(); + lha.setFrameId(frame.id); + lha.setThreads(request.getThreads()); + lha.setMaxCoreUnits(request.getMaxCores() * 100); + lha.setMaxMemory(request.getMaxMemory()); + lha.setMaxGpuMemory(request.getMaxGpuMemory()); + lha.setType(RenderPartitionType.FRAME_PARTITION); + + if (localBookingSupport.bookLocal(frame, request.getHost(), request.getUsername(), lha)) { + RenderPartition partition = whiteboard.getRenderPartition(lha); + + responseObserver.onNext( + FrameAddRenderPartitionResponse.newBuilder().setRenderPartition(partition).build()); + responseObserver.onCompleted(); + } else { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find suitable frames.") + .augmentDescription("customException()").asRuntimeException()); + } + } + + @Override + public void setCheckpointState(FrameSetCheckpointStateRequest request, + StreamObserver responseObserver) { + updateManagers(); + FrameEntity frame = getFrameEntity(request.getFrame()); + jobManager.updateCheckpointState(frame, request.getState()); + responseObserver.onNext(FrameSetCheckpointStateResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setFrameStateDisplayOverride(FrameStateDisplayOverrideRequest request, + StreamObserver responseObserver) { + updateManagers(); + Frame frame = request.getFrame(); + FrameStateDisplayOverride override = request.getOverride(); + + FrameStateDisplayOverrideSeq existing_overrides = + frameDao.getFrameStateDisplayOverrides(frame.getId()); + // if override already exists, do nothing + // if override is for a state that already has an override but diff color/text, + // update + // if override is new, add + boolean newOverride = true; + for (FrameStateDisplayOverride eo : existing_overrides.getOverridesList()) { + if (eo.equals(override)) { + newOverride = false; + break; + } else if (eo.getState().equals(override.getState()) + && !(eo.getColor().equals(override.getColor()) + && eo.getText().equals(override.getText()))) { + newOverride = false; + frameDao.updateFrameStateDisplayOverride(frame.getId(), override); + break; + } + } + + if (newOverride) { + frameDao.setFrameStateDisplayOverride(frame.getId(), override); + } + responseObserver.onNext(FrameStateDisplayOverrideResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void getFrameStateDisplayOverrides(GetFrameStateDisplayOverridesRequest request, + StreamObserver responseObserver) { + try { + updateManagers(); + Frame frame = request.getFrame(); + responseObserver.onNext(GetFrameStateDisplayOverridesResponse.newBuilder() + .setOverrides(frameDao.getFrameStateDisplayOverrides(frame.getId())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL + .withDescription("No Frame State display overrides found.").asRuntimeException()); + } + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public DispatchQueue getManageQueue() { + return manageQueue; + } + + public void setManageQueue(DispatchQueue dispatchQueue) { + this.manageQueue = dispatchQueue; + } + + public FrameDao getFrameDao() { + return frameDao; + } + + public void setFrameDao(FrameDao frameDao) { + this.frameDao = frameDao; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public DependManager getDependManager() { + return dependManager; + } + + public void setDependManager(DependManager dependManager) { + this.dependManager = dependManager; + } + + public JobManagerSupport getJobManagerSupport() { + return jobManagerSupport; + } + + public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { + this.jobManagerSupport = jobManagerSupport; + } + + public LocalBookingSupport getLocalBookingSupport() { + return localBookingSupport; + } + + public void setLocalBookingSupport(LocalBookingSupport localBookingSupport) { + this.localBookingSupport = localBookingSupport; + } + + private FrameEntity getFrameEntity(Frame frame) { + return frameDao.getFrameDetail(frame.getId()); + } + + private void updateManagers() { + setDependManager(jobManagerSupport.getDependManager()); + setJobManager(jobManagerSupport.getJobManager()); + } + + public FrameSearchFactory getFrameSearchFactory() { + return frameSearchFactory; + } + + public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { + this.frameSearchFactory = frameSearchFactory; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageGroup.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageGroup.java index b8f3cd43e..e9ed05324 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageGroup.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageGroup.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.servant; import java.util.ArrayList; @@ -84,278 +80,274 @@ public class ManageGroup extends GroupInterfaceGrpc.GroupInterfaceImplBase { - private GroupDao groupDao; - private JobDao jobDao; - private GroupManager groupManager; - private AdminManager adminManager; - private Whiteboard whiteboard; - private DispatchQueue manageQueue; - - @Override - public void getGroup(GroupGetGroupRequest request, StreamObserver responseObserver) { - try { - responseObserver.onNext(GroupGetGroupResponse.newBuilder() - .setGroup(whiteboard.getGroup(request.getId())) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void findGroup(GroupFindGroupRequest request, StreamObserver responseObserver) { - try { - responseObserver.onNext(GroupFindGroupResponse.newBuilder() - .setGroup(whiteboard.findGroup(request.getShow(), request.getName())) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void reparentGroups(GroupReparentGroupsRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - GroupSeq groupSeq = request.getGroups(); - List groupIds = new ArrayList(groupSeq.getGroupsCount()); - for (Group g: groupSeq.getGroupsList()) { - groupIds.add(g.getId()); - } - groupManager.reparentGroupIds(group, groupIds); - responseObserver.onNext(GroupReparentGroupsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void reparentJobs(GroupReparentJobsRequest request, StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - final GroupDetail gDetail = groupDao.getGroupDetail(group.getId()); - for (Job job: request.getJobs().getJobsList()) { - groupManager.reparentJob( - jobDao.getJob(job.getId()), - gDetail, - new Inherit[] { Inherit.All }); - } - responseObserver.onNext(GroupReparentJobsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void createSubGroup(GroupCreateSubGroupRequest request, StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - GroupDetail newGroup = new GroupDetail(); - newGroup.name = request.getName(); - newGroup.parentId = group.getId(); - newGroup.showId = group.getShowId(); - groupManager.createGroup(newGroup, group); - Group subgroup = whiteboard.getGroup(newGroup.id); - responseObserver.onNext(GroupCreateSubGroupResponse.newBuilder() - .setGroup(subgroup) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void delete(GroupDeleteRequest request, StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - try { - groupManager.deleteGroup(group); - } catch (Exception e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to remove group, be sure that there are no " + - "jobs or filter actions pointing at the group.") - .withCause(e) - .asRuntimeException()); - } - responseObserver.onNext(GroupDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setDefaultJobMaxCores(GroupSetDefJobMaxCoresRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupDefaultJobMaxCores(group, Convert.coresToWholeCoreUnits(request.getMaxCores())); - responseObserver.onNext(GroupSetDefJobMaxCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setDefaultJobMinCores(GroupSetDefJobMinCoresRequest request, StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupDefaultJobMinCores(group, Convert.coresToWholeCoreUnits(request.getMinCores())); - responseObserver.onNext(GroupSetDefJobMinCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setDefaultJobMaxGpus(GroupSetDefJobMaxGpusRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupDefaultJobMaxGpus(group, request.getMaxGpus()); - responseObserver.onNext(GroupSetDefJobMaxGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setDefaultJobMinGpus(GroupSetDefJobMinGpusRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupDefaultJobMinGpus(group, request.getMinGpus()); - responseObserver.onNext(GroupSetDefJobMinGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setName(GroupSetNameRequest request, StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupDao.updateName(group, request.getName()); - responseObserver.onNext(GroupSetNameResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setGroup(GroupSetGroupRequest request, StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - GroupInterface parentGroup = groupDao.getGroup(request.getParentGroup().getId()); - groupManager.setGroupParent(group, groupDao.getGroupDetail(parentGroup.getGroupId())); - responseObserver.onNext(GroupSetGroupResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setDepartment(GroupSetDeptRequest request, StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupDepartment(group, adminManager.findDepartment(request.getDept())); - responseObserver.onNext(GroupSetDeptResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setDefaultJobPriority(GroupSetDefJobPriorityRequest request, StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupDefaultJobPriority(group, request.getPriority()); - responseObserver.onNext(GroupSetDefJobPriorityResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void getGroups(GroupGetGroupsRequest request, StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - GroupSeq groupSeq = whiteboard.getGroups(group); - responseObserver.onNext(GroupGetGroupsResponse.newBuilder() - .setGroups(groupSeq) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void getJobs(GroupGetJobsRequest request, StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - JobSeq jobSeq = whiteboard.getJobs(group); - responseObserver.onNext(GroupGetJobsResponse.newBuilder() - .setJobs(jobSeq) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void setMaxCores(GroupSetMaxCoresRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupMaxCores(group, - Convert.coresToWholeCoreUnits(request.getMaxCores())); - responseObserver.onNext(GroupSetMaxCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setMinCores(GroupSetMinCoresRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupMinCores(group, - Convert.coresToWholeCoreUnits(request.getMinCores())); - responseObserver.onNext(GroupSetMinCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setMaxGpus(GroupSetMaxGpusRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupMaxGpus(group, request.getMaxGpus()); - responseObserver.onNext(GroupSetMaxGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setMinGpus(GroupSetMinGpusRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupMinGpus(group, request.getMinGpus()); - responseObserver.onNext(GroupSetMinGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public GroupDao getGroupDao() { - return groupDao; - } - - public void setGroupDao(GroupDao groupDao) { - this.groupDao = groupDao; - } - - public GroupManager getGroupManager() { - return groupManager; - } - - public void setGroupManager(GroupManager groupManager) { - this.groupManager = groupManager; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public AdminManager getAdminManager() { - return adminManager; - } - - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } - - public JobDao getJobDao() { - return jobDao; - } - - public DispatchQueue getManageQueue() { - return manageQueue; - } - - public void setManageQueue(DispatchQueue manageQueue) { - this.manageQueue = manageQueue; - } - - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } - - private GroupInterface getGroupInterface(Group group) { - return groupDao.getGroup(group.getId()); - } + private GroupDao groupDao; + private JobDao jobDao; + private GroupManager groupManager; + private AdminManager adminManager; + private Whiteboard whiteboard; + private DispatchQueue manageQueue; + + @Override + public void getGroup(GroupGetGroupRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(GroupGetGroupResponse.newBuilder() + .setGroup(whiteboard.getGroup(request.getId())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + } + } + + @Override + public void findGroup(GroupFindGroupRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(GroupFindGroupResponse.newBuilder() + .setGroup(whiteboard.findGroup(request.getShow(), request.getName())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + } + } + + @Override + public void reparentGroups(GroupReparentGroupsRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + GroupSeq groupSeq = request.getGroups(); + List groupIds = new ArrayList(groupSeq.getGroupsCount()); + for (Group g : groupSeq.getGroupsList()) { + groupIds.add(g.getId()); + } + groupManager.reparentGroupIds(group, groupIds); + responseObserver.onNext(GroupReparentGroupsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void reparentJobs(GroupReparentJobsRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + final GroupDetail gDetail = groupDao.getGroupDetail(group.getId()); + for (Job job : request.getJobs().getJobsList()) { + groupManager.reparentJob(jobDao.getJob(job.getId()), gDetail, new Inherit[] {Inherit.All}); + } + responseObserver.onNext(GroupReparentJobsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void createSubGroup(GroupCreateSubGroupRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + GroupDetail newGroup = new GroupDetail(); + newGroup.name = request.getName(); + newGroup.parentId = group.getId(); + newGroup.showId = group.getShowId(); + groupManager.createGroup(newGroup, group); + Group subgroup = whiteboard.getGroup(newGroup.id); + responseObserver.onNext(GroupCreateSubGroupResponse.newBuilder().setGroup(subgroup).build()); + responseObserver.onCompleted(); + } + + @Override + public void delete(GroupDeleteRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + try { + groupManager.deleteGroup(group); + } catch (Exception e) { + responseObserver + .onError(Status.INTERNAL + .withDescription("Failed to remove group, be sure that there are no " + + "jobs or filter actions pointing at the group.") + .withCause(e).asRuntimeException()); + } + responseObserver.onNext(GroupDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultJobMaxCores(GroupSetDefJobMaxCoresRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupDefaultJobMaxCores(group, + Convert.coresToWholeCoreUnits(request.getMaxCores())); + responseObserver.onNext(GroupSetDefJobMaxCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultJobMinCores(GroupSetDefJobMinCoresRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupDefaultJobMinCores(group, + Convert.coresToWholeCoreUnits(request.getMinCores())); + responseObserver.onNext(GroupSetDefJobMinCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultJobMaxGpus(GroupSetDefJobMaxGpusRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupDefaultJobMaxGpus(group, request.getMaxGpus()); + responseObserver.onNext(GroupSetDefJobMaxGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultJobMinGpus(GroupSetDefJobMinGpusRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupDefaultJobMinGpus(group, request.getMinGpus()); + responseObserver.onNext(GroupSetDefJobMinGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setName(GroupSetNameRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupDao.updateName(group, request.getName()); + responseObserver.onNext(GroupSetNameResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setGroup(GroupSetGroupRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + GroupInterface parentGroup = groupDao.getGroup(request.getParentGroup().getId()); + groupManager.setGroupParent(group, groupDao.getGroupDetail(parentGroup.getGroupId())); + responseObserver.onNext(GroupSetGroupResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDepartment(GroupSetDeptRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupDepartment(group, adminManager.findDepartment(request.getDept())); + responseObserver.onNext(GroupSetDeptResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultJobPriority(GroupSetDefJobPriorityRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupDefaultJobPriority(group, request.getPriority()); + responseObserver.onNext(GroupSetDefJobPriorityResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void getGroups(GroupGetGroupsRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + GroupSeq groupSeq = whiteboard.getGroups(group); + responseObserver.onNext(GroupGetGroupsResponse.newBuilder().setGroups(groupSeq).build()); + responseObserver.onCompleted(); + } + + @Override + public void getJobs(GroupGetJobsRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + JobSeq jobSeq = whiteboard.getJobs(group); + responseObserver.onNext(GroupGetJobsResponse.newBuilder().setJobs(jobSeq).build()); + responseObserver.onCompleted(); + } + + @Override + public void setMaxCores(GroupSetMaxCoresRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupMaxCores(group, Convert.coresToWholeCoreUnits(request.getMaxCores())); + responseObserver.onNext(GroupSetMaxCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setMinCores(GroupSetMinCoresRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupMinCores(group, Convert.coresToWholeCoreUnits(request.getMinCores())); + responseObserver.onNext(GroupSetMinCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setMaxGpus(GroupSetMaxGpusRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupMaxGpus(group, request.getMaxGpus()); + responseObserver.onNext(GroupSetMaxGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setMinGpus(GroupSetMinGpusRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupMinGpus(group, request.getMinGpus()); + responseObserver.onNext(GroupSetMinGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public GroupDao getGroupDao() { + return groupDao; + } + + public void setGroupDao(GroupDao groupDao) { + this.groupDao = groupDao; + } + + public GroupManager getGroupManager() { + return groupManager; + } + + public void setGroupManager(GroupManager groupManager) { + this.groupManager = groupManager; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public AdminManager getAdminManager() { + return adminManager; + } + + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } + + public JobDao getJobDao() { + return jobDao; + } + + public DispatchQueue getManageQueue() { + return manageQueue; + } + + public void setManageQueue(DispatchQueue manageQueue) { + this.manageQueue = manageQueue; + } + + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } + + private GroupInterface getGroupInterface(Group group) { + return groupDao.getGroup(group.getId()); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageHost.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageHost.java index 7728c4cb1..38ed6e1a1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageHost.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageHost.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.servant; import java.util.ArrayList; @@ -93,308 +89,302 @@ public class ManageHost extends HostInterfaceGrpc.HostInterfaceImplBase { - private HostManager hostManager; - private HostDao hostDao; - private AdminManager adminManager; - private CommentManager commentManager; - private RedirectManager redirectManager; - private JobManager jobManager; - private Whiteboard whiteboard; - private HostSearchFactory hostSearchFactory; - - @Override - public void getHosts(HostGetHostsRequest request, StreamObserver responseObserver) { - responseObserver.onNext(HostGetHostsResponse.newBuilder() - .setHosts(whiteboard.getHosts(hostSearchFactory.create(request.getR()))) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void getHostWhiteboard(HostGetHostWhiteboardRequest request, - StreamObserver responseObserver) { - responseObserver.onNext(HostGetHostWhiteboardResponse.newBuilder() - .setNestedHosts(whiteboard.getHostWhiteboard()) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void findHost(HostFindHostRequest request, - StreamObserver responseObserver) { - try { - responseObserver.onNext(HostFindHostResponse.newBuilder() - .setHost(whiteboard.findHost(request.getName())) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void getHost(HostGetHostRequest request, - StreamObserver responseObserver) { - try { - responseObserver.onNext(HostGetHostResponse.newBuilder() - .setHost(whiteboard.getHost(request.getId())) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void lock(HostLockRequest request, StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostManager.setHostLock(host, LockState.LOCKED, new Source("HostApi")); - responseObserver.onNext(HostLockResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void unlock(HostUnlockRequest request, StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostManager.setHostLock(host, LockState.OPEN, new Source("HostApi")); - responseObserver.onNext(HostUnlockResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void rebootWhenIdle(HostRebootWhenIdleRequest request, - StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostManager.rebootWhenIdle(host); - responseObserver.onNext(HostRebootWhenIdleResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void delete(HostDeleteRequest request, StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostManager.deleteHost(host); - responseObserver.onNext(HostDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void reboot(HostRebootRequest request, StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostManager.rebootNow(host); - responseObserver.onNext(HostRebootResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setAllocation(HostSetAllocationRequest request, - StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostManager.setAllocation(host, - adminManager.getAllocationDetail(request.getAllocationId())); - responseObserver.onNext(HostSetAllocationResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void addTags(HostAddTagsRequest request, StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostManager.addTags(host, request.getTagsList().toArray(new String[0])); - responseObserver.onNext(HostAddTagsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void removeTags(HostRemoveTagsRequest request, StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostManager.removeTags(host, request.getTagsList().toArray(new String[0])); - responseObserver.onNext(HostRemoveTagsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void renameTag(HostRenameTagRequest request, StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostManager.renameTag(host, request.getOldTag(), request.getNewTag()); - responseObserver.onNext(HostRenameTagResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void addComment(HostAddCommentRequest request, StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - CommentDetail c = new CommentDetail(); - Comment newComment = request.getNewComment(); - c.message = newComment.getMessage(); - c.subject = newComment.getSubject(); - c.user = newComment.getUser(); - c.timestamp = null; - commentManager.addComment(host, c); - responseObserver.onNext(HostAddCommentResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void getComments(HostGetCommentsRequest request, StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - CommentSeq commentSeq = whiteboard.getComments(host); - responseObserver.onNext(HostGetCommentsResponse.newBuilder() - .setComments(commentSeq) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void getProcs(HostGetProcsRequest request, StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - ProcSeq procs = whiteboard.getProcs(host); - responseObserver.onNext(HostGetProcsResponse.newBuilder() - .setProcs(procs) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void setThreadMode(HostSetThreadModeRequest request, StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostDao.updateThreadMode(host, request.getMode()); - responseObserver.onNext(HostSetThreadModeResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setHardwareState(HostSetHardwareStateRequest request, StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostDao.updateHostState(host, request.getState()); - responseObserver.onNext(HostSetHardwareStateResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void getOwner(HostGetOwnerRequest request, StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - responseObserver.onNext(HostGetOwnerResponse.newBuilder() - .setOwner(whiteboard.getOwner(host)) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void getDeed(HostGetDeedRequest request, StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - responseObserver.onNext(HostGetDeedResponse.newBuilder() - .setDeed(whiteboard.getDeed(host)) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void getRenderPartitions(HostGetRenderPartitionsRequest request, - StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - responseObserver.onNext(HostGetRenderPartitionsResponse.newBuilder() - .setRenderPartitions(whiteboard.getRenderPartitions(host)) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void redirectToJob(HostRedirectToJobRequest request, StreamObserver responseObserver) { - - List virtualProcs = new ArrayList<>(); - for (String procName: request.getProcNamesList()) { - virtualProcs.add(hostManager.getVirtualProc(procName)); - } - boolean value = redirectManager.addRedirect(virtualProcs, - jobManager.getJob(request.getJobId()), - new Source(request.toString())); - responseObserver.onNext(HostRedirectToJobResponse.newBuilder() - .setValue(value) - .build()); - responseObserver.onCompleted(); - } - - - @Override - public void setOs(HostSetOsRequest request, StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostDao.updateHostOs(host, request.getOs()); - responseObserver.onNext(HostSetOsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public HostManager getHostManager() { - return hostManager; - } - - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; - } - - public AdminManager getAdminManager() { - return adminManager; - } - - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } - - public CommentManager getCommentManager() { - return commentManager; - } - - public void setCommentManager(CommentManager commentManager) { - this.commentManager = commentManager; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public HostDao getHostDao() { - return hostDao; - } - - public void setHostDao(HostDao hostDao) { - this.hostDao = hostDao; - } - - public RedirectManager getRedirectManager() { - return redirectManager; - } - - public void setRedirectManager(RedirectManager redirectManager) { - this.redirectManager = redirectManager; - } - - public JobManager getJobManager() { - return jobManager; - } - - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } - - public HostSearchFactory getHostSearchFactory() { - return hostSearchFactory; - } - - public void setHostSearchFactory(HostSearchFactory hostSearchFactory) { - this.hostSearchFactory = hostSearchFactory; - } - - private HostInterface getHostInterface(Host host) { - return hostManager.getHost(host.getId()); - } + private HostManager hostManager; + private HostDao hostDao; + private AdminManager adminManager; + private CommentManager commentManager; + private RedirectManager redirectManager; + private JobManager jobManager; + private Whiteboard whiteboard; + private HostSearchFactory hostSearchFactory; + + @Override + public void getHosts(HostGetHostsRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(HostGetHostsResponse.newBuilder() + .setHosts(whiteboard.getHosts(hostSearchFactory.create(request.getR()))).build()); + responseObserver.onCompleted(); + } + + @Override + public void getHostWhiteboard(HostGetHostWhiteboardRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(HostGetHostWhiteboardResponse.newBuilder() + .setNestedHosts(whiteboard.getHostWhiteboard()).build()); + responseObserver.onCompleted(); + } + + @Override + public void findHost(HostFindHostRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(HostFindHostResponse.newBuilder() + .setHost(whiteboard.findHost(request.getName())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + } + } + + @Override + public void getHost(HostGetHostRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext( + HostGetHostResponse.newBuilder().setHost(whiteboard.getHost(request.getId())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + } + } + + @Override + public void lock(HostLockRequest request, StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostManager.setHostLock(host, LockState.LOCKED, new Source("HostApi")); + responseObserver.onNext(HostLockResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void unlock(HostUnlockRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostManager.setHostLock(host, LockState.OPEN, new Source("HostApi")); + responseObserver.onNext(HostUnlockResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void rebootWhenIdle(HostRebootWhenIdleRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostManager.rebootWhenIdle(host); + responseObserver.onNext(HostRebootWhenIdleResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void delete(HostDeleteRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostManager.deleteHost(host); + responseObserver.onNext(HostDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void reboot(HostRebootRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostManager.rebootNow(host); + responseObserver.onNext(HostRebootResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setAllocation(HostSetAllocationRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostManager.setAllocation(host, adminManager.getAllocationDetail(request.getAllocationId())); + responseObserver.onNext(HostSetAllocationResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void addTags(HostAddTagsRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostManager.addTags(host, request.getTagsList().toArray(new String[0])); + responseObserver.onNext(HostAddTagsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void removeTags(HostRemoveTagsRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostManager.removeTags(host, request.getTagsList().toArray(new String[0])); + responseObserver.onNext(HostRemoveTagsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void renameTag(HostRenameTagRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostManager.renameTag(host, request.getOldTag(), request.getNewTag()); + responseObserver.onNext(HostRenameTagResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void addComment(HostAddCommentRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + CommentDetail c = new CommentDetail(); + Comment newComment = request.getNewComment(); + c.message = newComment.getMessage(); + c.subject = newComment.getSubject(); + c.user = newComment.getUser(); + c.timestamp = null; + commentManager.addComment(host, c); + responseObserver.onNext(HostAddCommentResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void getComments(HostGetCommentsRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + CommentSeq commentSeq = whiteboard.getComments(host); + responseObserver.onNext(HostGetCommentsResponse.newBuilder().setComments(commentSeq).build()); + responseObserver.onCompleted(); + } + + @Override + public void getProcs(HostGetProcsRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + ProcSeq procs = whiteboard.getProcs(host); + responseObserver.onNext(HostGetProcsResponse.newBuilder().setProcs(procs).build()); + responseObserver.onCompleted(); + } + + @Override + public void setThreadMode(HostSetThreadModeRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostDao.updateThreadMode(host, request.getMode()); + responseObserver.onNext(HostSetThreadModeResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setHardwareState(HostSetHardwareStateRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostDao.updateHostState(host, request.getState()); + responseObserver.onNext(HostSetHardwareStateResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void getOwner(HostGetOwnerRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + responseObserver + .onNext(HostGetOwnerResponse.newBuilder().setOwner(whiteboard.getOwner(host)).build()); + responseObserver.onCompleted(); + } + + @Override + public void getDeed(HostGetDeedRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + responseObserver + .onNext(HostGetDeedResponse.newBuilder().setDeed(whiteboard.getDeed(host)).build()); + responseObserver.onCompleted(); + } + + @Override + public void getRenderPartitions(HostGetRenderPartitionsRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + responseObserver.onNext(HostGetRenderPartitionsResponse.newBuilder() + .setRenderPartitions(whiteboard.getRenderPartitions(host)).build()); + responseObserver.onCompleted(); + } + + @Override + public void redirectToJob(HostRedirectToJobRequest request, + StreamObserver responseObserver) { + + List virtualProcs = new ArrayList<>(); + for (String procName : request.getProcNamesList()) { + virtualProcs.add(hostManager.getVirtualProc(procName)); + } + boolean value = redirectManager.addRedirect(virtualProcs, jobManager.getJob(request.getJobId()), + new Source(request.toString())); + responseObserver.onNext(HostRedirectToJobResponse.newBuilder().setValue(value).build()); + responseObserver.onCompleted(); + } + + @Override + public void setOs(HostSetOsRequest request, StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostDao.updateHostOs(host, request.getOs()); + responseObserver.onNext(HostSetOsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public HostManager getHostManager() { + return hostManager; + } + + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; + } + + public AdminManager getAdminManager() { + return adminManager; + } + + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } + + public CommentManager getCommentManager() { + return commentManager; + } + + public void setCommentManager(CommentManager commentManager) { + this.commentManager = commentManager; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public HostDao getHostDao() { + return hostDao; + } + + public void setHostDao(HostDao hostDao) { + this.hostDao = hostDao; + } + + public RedirectManager getRedirectManager() { + return redirectManager; + } + + public void setRedirectManager(RedirectManager redirectManager) { + this.redirectManager = redirectManager; + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public HostSearchFactory getHostSearchFactory() { + return hostSearchFactory; + } + + public void setHostSearchFactory(HostSearchFactory hostSearchFactory) { + this.hostSearchFactory = hostSearchFactory; + } + + private HostInterface getHostInterface(Host host) { + return hostManager.getHost(host.getId()); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageJob.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageJob.java index a08ffa830..98432bdd4 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageJob.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageJob.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.servant; import java.util.ArrayList; @@ -166,893 +162,809 @@ import static com.imageworks.spcue.servant.ServantUtil.attemptChange; public class ManageJob extends JobInterfaceGrpc.JobInterfaceImplBase { - private static final Logger logger = LogManager.getLogger(ManageJob.class); - private Whiteboard whiteboard; - private JobManager jobManager; - private GroupManager groupManager; - private JobManagerSupport jobManagerSupport; - private JobDao jobDao; - private JobLauncher jobLauncher; - private DependManager dependManager; - private CommentManager commentManager; - private DispatchQueue manageQueue; - private Dispatcher localDispatcher; - private LocalBookingSupport localBookingSupport; - private FilterManager filterManager; - private JobInterface job; - private FrameSearchFactory frameSearchFactory; - private JobSearchFactory jobSearchFactory; - private final String property = "frame.finished_jobs_readonly"; - @Autowired - private Environment env; - - @Override - public void findJob(JobFindJobRequest request, StreamObserver responseObserver) { - try { - responseObserver.onNext(JobFindJobResponse.newBuilder() - .setJob(whiteboard.findJob(request.getName())) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void getJob(JobGetJobRequest request, StreamObserver responseObserver) { - try { - responseObserver.onNext(JobGetJobResponse.newBuilder() - .setJob(whiteboard.getJob(request.getId())) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void getJobs(JobGetJobsRequest request, StreamObserver responseObserver) { - responseObserver.onNext(JobGetJobsResponse.newBuilder() - .setJobs(whiteboard.getJobs(jobSearchFactory.create(request.getR()))) - .build()); + private static final Logger logger = LogManager.getLogger(ManageJob.class); + private Whiteboard whiteboard; + private JobManager jobManager; + private GroupManager groupManager; + private JobManagerSupport jobManagerSupport; + private JobDao jobDao; + private JobLauncher jobLauncher; + private DependManager dependManager; + private CommentManager commentManager; + private DispatchQueue manageQueue; + private Dispatcher localDispatcher; + private LocalBookingSupport localBookingSupport; + private FilterManager filterManager; + private JobInterface job; + private FrameSearchFactory frameSearchFactory; + private JobSearchFactory jobSearchFactory; + private final String property = "frame.finished_jobs_readonly"; + @Autowired + private Environment env; + + @Override + public void findJob(JobFindJobRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext( + JobFindJobResponse.newBuilder().setJob(whiteboard.findJob(request.getName())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + } + } + + @Override + public void getJob(JobGetJobRequest request, StreamObserver responseObserver) { + try { + responseObserver.onNext( + JobGetJobResponse.newBuilder().setJob(whiteboard.getJob(request.getId())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + } + } + + @Override + public void getJobs(JobGetJobsRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(JobGetJobsResponse.newBuilder() + .setJobs(whiteboard.getJobs(jobSearchFactory.create(request.getR()))).build()); + responseObserver.onCompleted(); + } + + @Override + public void getJobNames(JobGetJobNamesRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(JobGetJobNamesResponse.newBuilder() + .addAllNames(whiteboard.getJobNames(jobSearchFactory.create(request.getR()))).build()); + responseObserver.onCompleted(); + } + + @Override + public void isJobPending(JobIsJobPendingRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(JobIsJobPendingResponse.newBuilder() + .setValue(whiteboard.isJobPending(request.getName())).build()); + responseObserver.onCompleted(); + } + + @Override + public void getFrames(JobGetFramesRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + FrameSeq frameSeq = whiteboard.getFrames(frameSearchFactory.create(job, request.getReq())); + responseObserver.onNext(JobGetFramesResponse.newBuilder().setFrames(frameSeq).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void getLayers(JobGetLayersRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + LayerSeq layerSeq = whiteboard.getLayers(job); + responseObserver.onNext(JobGetLayersResponse.newBuilder().setLayers(layerSeq).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void kill(JobKillRequest request, StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + manageQueue.execute(new DispatchJobComplete(job, new Source(request.toString(), + request.getUsername(), request.getPid(), request.getHostKill(), request.getReason()), + true, jobManagerSupport)); + responseObserver.onNext(JobKillResponse.newBuilder().build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void launchSpecAndWait(JobLaunchSpecAndWaitRequest request, + StreamObserver responseObserver) { + try { + JobSpec spec = jobLauncher.parse(request.getSpec()); + jobLauncher.launch(spec); + JobSeq.Builder jobSeqBuilder = JobSeq.newBuilder(); + for (BuildableJob j : spec.getJobs()) { + jobSeqBuilder.addJobs(whiteboard.findJob(j.detail.name)); + } + responseObserver + .onNext(JobLaunchSpecAndWaitResponse.newBuilder().setJobs(jobSeqBuilder.build()).build()); + responseObserver.onCompleted(); + } catch (Exception e) { + logger.error("Failed to launch and add job.", e); + responseObserver.onError( + Status.INTERNAL.withDescription("Failed to launch and add job: " + e.getMessage()) + .withCause(e).asRuntimeException()); + } + } + + @Override + public void launchSpec(JobLaunchSpecRequest request, + StreamObserver responseObserver) { + try { + JobSpec spec = jobLauncher.parse(request.getSpec()); + List result = new ArrayList(8); + for (BuildableJob j : spec.getJobs()) { + result.add(j.detail.name); + } + jobLauncher.queueAndLaunch(spec); + responseObserver.onNext(JobLaunchSpecResponse.newBuilder().addAllNames(result).build()); + responseObserver.onCompleted(); + } catch (Exception e) { + logger.error("Failed to add job to launch queue.", e); + responseObserver.onError( + Status.INTERNAL.withDescription("Failed to add job to launch queue: " + e.getMessage()) + .withCause(e).asRuntimeException()); + } + } + + @Override + public void pause(JobPauseRequest request, StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + jobManager.setJobPaused(job, true); + responseObserver.onNext(JobPauseResponse.newBuilder().build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void resume(JobResumeRequest request, StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + jobManager.setJobPaused(job, false); + responseObserver.onNext(JobResumeResponse.newBuilder().build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void setMaxCores(JobSetMaxCoresRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + jobDao.updateMaxCores(job, Convert.coresToWholeCoreUnits(request.getVal())); + responseObserver.onNext(JobSetMaxCoresResponse.newBuilder().build()); responseObserver.onCompleted(); - } - - @Override - public void getJobNames(JobGetJobNamesRequest request, StreamObserver responseObserver) { - responseObserver.onNext(JobGetJobNamesResponse.newBuilder() - .addAllNames(whiteboard.getJobNames(jobSearchFactory.create(request.getR()))) - .build()); + } + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void setMinCores(JobSetMinCoresRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + jobDao.updateMinCores(job, Convert.coresToWholeCoreUnits(request.getVal())); + responseObserver.onNext(JobSetMinCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void setMaxGpus(JobSetMaxGpusRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + jobDao.updateMaxGpus(job, request.getVal()); + responseObserver.onNext(JobSetMaxGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void setMinGpus(JobSetMinGpusRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + jobDao.updateMinGpus(job, request.getVal()); + responseObserver.onNext(JobSetMinGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void setPriority(JobSetPriorityRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + jobDao.updatePriority(job, request.getVal()); + responseObserver.onNext(JobSetPriorityResponse.newBuilder().build()); responseObserver.onCompleted(); - } - - @Override - public void isJobPending(JobIsJobPendingRequest request, StreamObserver responseObserver) { - responseObserver.onNext(JobIsJobPendingResponse.newBuilder() - .setValue(whiteboard.isJobPending(request.getName())) - .build()); + } + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void getCurrent(JobGetCurrentRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + Job currentJob = whiteboard.getJob(job.getId()); + responseObserver.onNext(JobGetCurrentResponse.newBuilder().setJob(currentJob).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void eatFrames(JobEatFramesRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue.execute(new DispatchEatFrames(frameSearchFactory.create(job, request.getReq()), + new Source(request.toString()), jobManagerSupport)); + responseObserver.onNext(JobEatFramesResponse.newBuilder().build()); responseObserver.onCompleted(); - } - - @Override - public void getFrames(JobGetFramesRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - FrameSeq frameSeq = whiteboard.getFrames(frameSearchFactory.create(job, request.getReq())); - responseObserver.onNext(JobGetFramesResponse.newBuilder() - .setFrames(frameSeq) - .build()); - responseObserver.onCompleted(); - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void getLayers(JobGetLayersRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - LayerSeq layerSeq = whiteboard.getLayers(job); - responseObserver.onNext(JobGetLayersResponse.newBuilder() - .setLayers(layerSeq) - .build()); - responseObserver.onCompleted(); - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void kill(JobKillRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - manageQueue.execute(new DispatchJobComplete(job, - new Source(request.toString(), request.getUsername(), request.getPid(), - request.getHostKill(), request.getReason()), - true, jobManagerSupport)); - responseObserver.onNext(JobKillResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void launchSpecAndWait(JobLaunchSpecAndWaitRequest request, - StreamObserver responseObserver) { - try { - JobSpec spec = jobLauncher.parse(request.getSpec()); - jobLauncher.launch(spec); - JobSeq.Builder jobSeqBuilder = JobSeq.newBuilder(); - for (BuildableJob j : spec.getJobs()) { - jobSeqBuilder.addJobs(whiteboard.findJob(j.detail.name)); - } - responseObserver.onNext(JobLaunchSpecAndWaitResponse.newBuilder() - .setJobs(jobSeqBuilder.build()) - .build()); - responseObserver.onCompleted(); - } catch (Exception e) { - logger.error("Failed to launch and add job.", e); - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to launch and add job: " + e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void launchSpec(JobLaunchSpecRequest request, StreamObserver responseObserver) { - try { - JobSpec spec = jobLauncher.parse(request.getSpec()); - List result = new ArrayList(8); - for (BuildableJob j : spec.getJobs()) { - result.add(j.detail.name); - } - jobLauncher.queueAndLaunch(spec); - responseObserver.onNext(JobLaunchSpecResponse.newBuilder() - .addAllNames(result) - .build()); - responseObserver.onCompleted(); - } catch (Exception e) { - logger.error("Failed to add job to launch queue.", e); - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to add job to launch queue: " + e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void pause(JobPauseRequest request, StreamObserver responseObserver) { - try{ - setupJobData(request.getJob()); - jobManager.setJobPaused(job, true); - responseObserver.onNext(JobPauseResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void resume(JobResumeRequest request, StreamObserver responseObserver) { - try{ - setupJobData(request.getJob()); - jobManager.setJobPaused(job, false); - responseObserver.onNext(JobResumeResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void setMaxCores(JobSetMaxCoresRequest request, StreamObserver responseObserver) { - try{ - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - jobDao.updateMaxCores(job, Convert.coresToWholeCoreUnits(request.getVal())); - responseObserver.onNext(JobSetMaxCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void setMinCores(JobSetMinCoresRequest request, StreamObserver responseObserver) { - try{ - setupJobData(request.getJob()); - jobDao.updateMinCores(job, Convert.coresToWholeCoreUnits(request.getVal())); - responseObserver.onNext(JobSetMinCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void setMaxGpus(JobSetMaxGpusRequest request, StreamObserver responseObserver) { - try{ - setupJobData(request.getJob()); - jobDao.updateMaxGpus(job, request.getVal()); - responseObserver.onNext(JobSetMaxGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void setMinGpus(JobSetMinGpusRequest request, StreamObserver responseObserver) { - try{ - setupJobData(request.getJob()); - jobDao.updateMinGpus(job, request.getVal()); - responseObserver.onNext(JobSetMinGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void setPriority(JobSetPriorityRequest request, StreamObserver responseObserver) { - try{ - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - jobDao.updatePriority(job, request.getVal()); - responseObserver.onNext(JobSetPriorityResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void getCurrent(JobGetCurrentRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - Job currentJob = whiteboard.getJob(job.getId()); - responseObserver.onNext(JobGetCurrentResponse.newBuilder() - .setJob(currentJob) - .build()); - responseObserver.onCompleted(); - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void eatFrames(JobEatFramesRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - manageQueue.execute( - new DispatchEatFrames( - frameSearchFactory.create(job, request.getReq()), - new Source(request.toString()), - jobManagerSupport)); - responseObserver.onNext(JobEatFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void killFrames(JobKillFramesRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - manageQueue.execute( - new DispatchKillFrames( - frameSearchFactory.create(job, request.getReq()), - new Source(request.toString(), request.getUsername(), request.getPid(), - request.getHostKill(), request.getReason()), - jobManagerSupport)); - responseObserver.onNext(JobKillFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void markDoneFrames(JobMarkDoneFramesRequest request, - StreamObserver responseObserver) { - try{ - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - manageQueue.execute( - new DispatchSatisfyDepends( - frameSearchFactory.create(job, request.getReq()), jobManagerSupport)); - responseObserver.onNext(JobMarkDoneFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void retryFrames(JobRetryFramesRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - manageQueue.execute( - new DispatchRetryFrames( - frameSearchFactory.create(job, request.getReq()), - new Source(request.toString()), - jobManagerSupport)); - responseObserver.onNext(JobRetryFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void setAutoEat(JobSetAutoEatRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - jobDao.updateAutoEat(job, request.getValue()); - responseObserver.onNext(JobSetAutoEatResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void createDependencyOnFrame(JobCreateDependencyOnFrameRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - JobOnFrame depend = new JobOnFrame(job, - jobManager.getFrameDetail(request.getFrame().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(JobCreateDependencyOnFrameResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)) - .build()); - responseObserver.onCompleted(); - } - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void createDependencyOnJob(JobCreateDependencyOnJobRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - JobOnJob depend = new JobOnJob(job, - jobManager.getJobDetail(request.getOnJob().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(JobCreateDependencyOnJobResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)) - .build()); - responseObserver.onCompleted(); - } - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void createDependencyOnLayer(JobCreateDependencyOnLayerRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - JobOnLayer depend = new JobOnLayer(job, - jobManager.getLayerDetail(request.getLayer().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(JobCreateDependencyOnLayerResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)) - .build()); - responseObserver.onCompleted(); - } - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void getWhatDependsOnThis(JobGetWhatDependsOnThisRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - responseObserver.onNext(JobGetWhatDependsOnThisResponse.newBuilder() - .setDepends(whiteboard.getWhatDependsOnThis(job)) - .build()); - responseObserver.onCompleted(); - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void getWhatThisDependsOn(JobGetWhatThisDependsOnRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - responseObserver.onNext(JobGetWhatThisDependsOnResponse.newBuilder() - .setDepends(whiteboard.getWhatThisDependsOn(job)) - .build()); - responseObserver.onCompleted(); - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void getDepends(JobGetDependsRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - responseObserver.onNext(JobGetDependsResponse.newBuilder() - .setDepends(whiteboard.getDepends(job)) - .build()); - responseObserver.onCompleted(); - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void getUpdatedFrames(JobGetUpdatedFramesRequest request, StreamObserver responseObserver) { - try{ - setupJobData(request.getJob()); - UpdatedFrameCheckResult result = whiteboard.getUpdatedFrames(job, - ServantUtil.convertLayerFilterList(request.getLayerFilter()), request.getLastCheck()); - responseObserver.onNext(JobGetUpdatedFramesResponse.newBuilder() - .setUpdatedFrames(result.getUpdatedFrames()) - .setServerTime(result.getServerTime()) - .setState(result.getState()) - .build()); - responseObserver.onCompleted(); - - } catch (java.lang.IllegalArgumentException e) { - System.out.println(e); - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void setMaxRetries(JobSetMaxRetriesRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - jobDao.updateMaxFrameRetries(job, request.getMaxRetries()); - responseObserver.onNext(JobSetMaxRetriesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void addComment(JobAddCommentRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - Comment newComment = request.getNewComment(); - CommentDetail c = new CommentDetail(); - c.message = newComment.getMessage(); - c.subject = newComment.getSubject(); - c.user = newComment.getUser(); - c.timestamp = null; - commentManager.addComment(job, c); - responseObserver.onNext(JobAddCommentResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void getComments(JobGetCommentsRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - responseObserver.onNext(JobGetCommentsResponse.newBuilder() - .setComments(whiteboard.getComments(job)) - .build()); - responseObserver.onCompleted(); - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void dropDepends(JobDropDependsRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - manageQueue.execute(new DispatchDropDepends(job, request.getTarget(), dependManager)); - responseObserver.onNext(JobDropDependsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void setGroup(JobSetGroupRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - jobDao.updateParent(job, groupManager.getGroupDetail(request.getGroupId())); - responseObserver.onNext(JobSetGroupResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void markAsWaiting(JobMarkAsWaitingRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - jobManagerSupport.markFramesAsWaiting( - frameSearchFactory.create(job, request.getReq()), new Source(request.toString())); - responseObserver.onNext(JobMarkAsWaitingResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void reorderFrames(JobReorderFramesRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - manageQueue.execute(new DispatchReorderFrames(job, - new FrameSet(request.getRange()), request.getOrder(), jobManagerSupport)); - responseObserver.onNext(JobReorderFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void shutdownIfCompleted(JobShutdownIfCompletedRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - manageQueue.execute(new DispatchShutdownJobIfCompleted(job, jobManagerSupport)); - responseObserver.onNext(JobShutdownIfCompletedResponse.newBuilder().build()); + } + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void killFrames(JobKillFramesRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue.execute(new DispatchKillFrames(frameSearchFactory.create(job, request.getReq()), + new Source(request.toString(), request.getUsername(), request.getPid(), + request.getHostKill(), request.getReason()), + jobManagerSupport)); + responseObserver.onNext(JobKillFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void markDoneFrames(JobMarkDoneFramesRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue.execute(new DispatchSatisfyDepends( + frameSearchFactory.create(job, request.getReq()), jobManagerSupport)); + responseObserver.onNext(JobMarkDoneFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void retryFrames(JobRetryFramesRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue + .execute(new DispatchRetryFrames(frameSearchFactory.create(job, request.getReq()), + new Source(request.toString()), jobManagerSupport)); + responseObserver.onNext(JobRetryFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void setAutoEat(JobSetAutoEatRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + jobDao.updateAutoEat(job, request.getValue()); + responseObserver.onNext(JobSetAutoEatResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void createDependencyOnFrame(JobCreateDependencyOnFrameRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + JobOnFrame depend = + new JobOnFrame(job, jobManager.getFrameDetail(request.getFrame().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(JobCreateDependencyOnFrameResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)).build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void createDependencyOnJob(JobCreateDependencyOnJobRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + JobOnJob depend = new JobOnJob(job, jobManager.getJobDetail(request.getOnJob().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(JobCreateDependencyOnJobResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)).build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void createDependencyOnLayer(JobCreateDependencyOnLayerRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + JobOnLayer depend = + new JobOnLayer(job, jobManager.getLayerDetail(request.getLayer().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(JobCreateDependencyOnLayerResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)).build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void getWhatDependsOnThis(JobGetWhatDependsOnThisRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + responseObserver.onNext(JobGetWhatDependsOnThisResponse.newBuilder() + .setDepends(whiteboard.getWhatDependsOnThis(job)).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void getWhatThisDependsOn(JobGetWhatThisDependsOnRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + responseObserver.onNext(JobGetWhatThisDependsOnResponse.newBuilder() + .setDepends(whiteboard.getWhatThisDependsOn(job)).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void getDepends(JobGetDependsRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + responseObserver.onNext( + JobGetDependsResponse.newBuilder().setDepends(whiteboard.getDepends(job)).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void getUpdatedFrames(JobGetUpdatedFramesRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + UpdatedFrameCheckResult result = whiteboard.getUpdatedFrames(job, + ServantUtil.convertLayerFilterList(request.getLayerFilter()), request.getLastCheck()); + responseObserver.onNext( + JobGetUpdatedFramesResponse.newBuilder().setUpdatedFrames(result.getUpdatedFrames()) + .setServerTime(result.getServerTime()).setState(result.getState()).build()); + responseObserver.onCompleted(); + + } catch (java.lang.IllegalArgumentException e) { + System.out.println(e); + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void setMaxRetries(JobSetMaxRetriesRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + jobDao.updateMaxFrameRetries(job, request.getMaxRetries()); + responseObserver.onNext(JobSetMaxRetriesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void addComment(JobAddCommentRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + Comment newComment = request.getNewComment(); + CommentDetail c = new CommentDetail(); + c.message = newComment.getMessage(); + c.subject = newComment.getSubject(); + c.user = newComment.getUser(); + c.timestamp = null; + commentManager.addComment(job, c); + responseObserver.onNext(JobAddCommentResponse.newBuilder().build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void getComments(JobGetCommentsRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + responseObserver.onNext( + JobGetCommentsResponse.newBuilder().setComments(whiteboard.getComments(job)).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void dropDepends(JobDropDependsRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue.execute(new DispatchDropDepends(job, request.getTarget(), dependManager)); + responseObserver.onNext(JobDropDependsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void setGroup(JobSetGroupRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + jobDao.updateParent(job, groupManager.getGroupDetail(request.getGroupId())); + responseObserver.onNext(JobSetGroupResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void markAsWaiting(JobMarkAsWaitingRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + jobManagerSupport.markFramesAsWaiting(frameSearchFactory.create(job, request.getReq()), + new Source(request.toString())); + responseObserver.onNext(JobMarkAsWaitingResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void reorderFrames(JobReorderFramesRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue.execute(new DispatchReorderFrames(job, new FrameSet(request.getRange()), + request.getOrder(), jobManagerSupport)); + responseObserver.onNext(JobReorderFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void shutdownIfCompleted(JobShutdownIfCompletedRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + manageQueue.execute(new DispatchShutdownJobIfCompleted(job, jobManagerSupport)); + responseObserver.onNext(JobShutdownIfCompletedResponse.newBuilder().build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void staggerFrames(JobStaggerFramesRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue.execute(new DispatchStaggerFrames(job, request.getRange(), request.getStagger(), + jobManagerSupport)); + responseObserver.onNext(JobStaggerFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void addRenderPartition(JobAddRenderPartRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + LocalHostAssignment lha = new LocalHostAssignment(); + lha.setJobId(job.getId()); + lha.setThreads(request.getThreads()); + lha.setMaxCoreUnits(request.getMaxCores() * 100); + lha.setMaxMemory(request.getMaxMemory()); + lha.setMaxGpuUnits(request.getMaxGpus()); + lha.setMaxGpuMemory(request.getMaxGpuMemory()); + lha.setType(RenderPartitionType.JOB_PARTITION); + + if (localBookingSupport.bookLocal(job, request.getHost(), request.getUsername(), lha)) { + try { + RenderPartition renderPart = whiteboard.getRenderPartition(lha); + responseObserver.onNext( + JobAddRenderPartResponse.newBuilder().setRenderPartition(renderPart).build()); responseObserver.onCompleted(); - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void staggerFrames(JobStaggerFramesRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - manageQueue.execute( - new DispatchStaggerFrames(job, request.getRange(), request.getStagger(), jobManagerSupport)); - responseObserver.onNext(JobStaggerFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void addRenderPartition(JobAddRenderPartRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - LocalHostAssignment lha = new LocalHostAssignment(); - lha.setJobId(job.getId()); - lha.setThreads(request.getThreads()); - lha.setMaxCoreUnits(request.getMaxCores() * 100); - lha.setMaxMemory(request.getMaxMemory()); - lha.setMaxGpuUnits(request.getMaxGpus()); - lha.setMaxGpuMemory(request.getMaxGpuMemory()); - lha.setType(RenderPartitionType.JOB_PARTITION); - - if (localBookingSupport.bookLocal(job, request.getHost(), request.getUsername(), lha)) { - try { - RenderPartition renderPart = whiteboard.getRenderPartition(lha); - responseObserver.onNext(JobAddRenderPartResponse.newBuilder() - .setRenderPartition(renderPart) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to allocate render partition to host.") - .asRuntimeException()); - } - } else { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find suitable frames.") - .asRuntimeException()); - } - } - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - @Override - public void runFilters(JobRunFiltersRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - JobDetail jobDetail = jobManager.getJobDetail(job.getJobId()); - filterManager.runFiltersOnJob(jobDetail); - responseObserver.onNext(JobRunFiltersResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - public void addSubscriber(JobAddSubscriberRequest request, StreamObserver responseStreamObserver) { - try { - setupJobData(request.getJob()); - Set subscribers = Sets.newHashSet(jobManager.getEmail(job).split(",")); - subscribers.add(request.getSubscriber()); - jobManager.updateEmail(job, String.join(",", subscribers)); - responseStreamObserver.onNext(JobAddSubscriberResponse.newBuilder().build()); - responseStreamObserver.onCompleted(); - } - catch (EmptyResultDataAccessException e) { - responseStreamObserver.onError(Status.INTERNAL - .withDescription("Failed to find job data") - .asRuntimeException()); - } - } - - public JobManager getJobManager() { - return jobManager; - } - - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public DispatchQueue getManageQueue() { - return manageQueue; - } - - public void setManageQueue(DispatchQueue dispatchQueue) { - this.manageQueue = dispatchQueue; - } - - public DependManager getDependManager() { - return dependManager; - } - - public void setDependManager(DependManager dependManager) { - this.dependManager = dependManager; - } - - public JobDao getJobDao() { - return jobDao; - } - - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } - - public JobLauncher getJobLauncher() { - return jobLauncher; - } - - public void setJobLauncher(JobLauncher jobLauncher) { - this.jobLauncher = jobLauncher; - } - - public CommentManager getCommentManager() { - return commentManager; - } - - public void setCommentManager(CommentManager commentManager) { - this.commentManager = commentManager; - } - - public JobManagerSupport getJobManagerSupport() { - return jobManagerSupport; - } - - public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { - this.jobManagerSupport = jobManagerSupport; - } - - public GroupManager getGroupManager() { - return groupManager; - } - - public void setGroupManager(GroupManager groupManager) { - this.groupManager = groupManager; - } - - public Dispatcher getLocalDispatcher() { - return localDispatcher; - } - - public void setLocalDispatcher(Dispatcher localDispatcher) { - this.localDispatcher = localDispatcher; - } - - public LocalBookingSupport getLocalBookingSupport() { - return localBookingSupport; - } - - public void setLocalBookingSupport(LocalBookingSupport localBookingSupport) { - this.localBookingSupport = localBookingSupport; - } - - public FilterManager getFilterManager() { - return filterManager; - } - - public void setFilterManager(FilterManager filterManager) { - this.filterManager = filterManager; - } - - private void setupJobData(Job jobData) { - setJobManager(jobManagerSupport.getJobManager()); - setDependManager(jobManagerSupport.getDependManager()); - job = jobManager.getJob(jobData.getId()); - } - - public FrameSearchFactory getFrameSearchFactory() { - return frameSearchFactory; - } - - public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { - this.frameSearchFactory = frameSearchFactory; - } - - public JobSearchFactory getJobSearchFactory() { - return jobSearchFactory; - } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.INTERNAL.withDescription("Failed to allocate render partition to host.") + .asRuntimeException()); + } + } else { + responseObserver.onError(Status.INTERNAL + .withDescription("Failed to find suitable frames.").asRuntimeException()); + } + } + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + @Override + public void runFilters(JobRunFiltersRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + JobDetail jobDetail = jobManager.getJobDetail(job.getJobId()); + filterManager.runFiltersOnJob(jobDetail); + responseObserver.onNext(JobRunFiltersResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + public void addSubscriber(JobAddSubscriberRequest request, + StreamObserver responseStreamObserver) { + try { + setupJobData(request.getJob()); + Set subscribers = Sets.newHashSet(jobManager.getEmail(job).split(",")); + subscribers.add(request.getSubscriber()); + jobManager.updateEmail(job, String.join(",", subscribers)); + responseStreamObserver.onNext(JobAddSubscriberResponse.newBuilder().build()); + responseStreamObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseStreamObserver + .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); + } + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public DispatchQueue getManageQueue() { + return manageQueue; + } + + public void setManageQueue(DispatchQueue dispatchQueue) { + this.manageQueue = dispatchQueue; + } + + public DependManager getDependManager() { + return dependManager; + } + + public void setDependManager(DependManager dependManager) { + this.dependManager = dependManager; + } + + public JobDao getJobDao() { + return jobDao; + } + + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } + + public JobLauncher getJobLauncher() { + return jobLauncher; + } + + public void setJobLauncher(JobLauncher jobLauncher) { + this.jobLauncher = jobLauncher; + } + + public CommentManager getCommentManager() { + return commentManager; + } + + public void setCommentManager(CommentManager commentManager) { + this.commentManager = commentManager; + } + + public JobManagerSupport getJobManagerSupport() { + return jobManagerSupport; + } + + public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { + this.jobManagerSupport = jobManagerSupport; + } + + public GroupManager getGroupManager() { + return groupManager; + } + + public void setGroupManager(GroupManager groupManager) { + this.groupManager = groupManager; + } + + public Dispatcher getLocalDispatcher() { + return localDispatcher; + } + + public void setLocalDispatcher(Dispatcher localDispatcher) { + this.localDispatcher = localDispatcher; + } + + public LocalBookingSupport getLocalBookingSupport() { + return localBookingSupport; + } + + public void setLocalBookingSupport(LocalBookingSupport localBookingSupport) { + this.localBookingSupport = localBookingSupport; + } + + public FilterManager getFilterManager() { + return filterManager; + } + + public void setFilterManager(FilterManager filterManager) { + this.filterManager = filterManager; + } - public void setJobSearchFactory(JobSearchFactory jobSearchFactory) { - this.jobSearchFactory = jobSearchFactory; - } + private void setupJobData(Job jobData) { + setJobManager(jobManagerSupport.getJobManager()); + setDependManager(jobManagerSupport.getDependManager()); + job = jobManager.getJob(jobData.getId()); + } + + public FrameSearchFactory getFrameSearchFactory() { + return frameSearchFactory; + } + + public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { + this.frameSearchFactory = frameSearchFactory; + } + + public JobSearchFactory getJobSearchFactory() { + return jobSearchFactory; + } + + public void setJobSearchFactory(JobSearchFactory jobSearchFactory) { + this.jobSearchFactory = jobSearchFactory; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLayer.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLayer.java index f719f6da7..e125ca8c2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLayer.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLayer.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.servant; @@ -129,481 +127,480 @@ public class ManageLayer extends LayerInterfaceGrpc.LayerInterfaceImplBase { - private LayerDetail layer; - private FrameSearchInterface frameSearch; - private JobManager jobManager; - private DependManager dependManager; - private JobManagerSupport jobManagerSupport; - private LayerDao layerDao; - private DispatchQueue manageQueue; - private Whiteboard whiteboard; - private LocalBookingSupport localBookingSupport; - private FrameSearchFactory frameSearchFactory; - private final String property = "layer.finished_jobs_readonly"; - @Autowired - private Environment env; - - @Override - public void findLayer(LayerFindLayerRequest request, StreamObserver responseObserver) { - try { - responseObserver.onNext(LayerFindLayerResponse.newBuilder() - .setLayer(whiteboard.findLayer(request.getJob(), request.getLayer())) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void getLayer(LayerGetLayerRequest request, StreamObserver responseObserver) { - try { - responseObserver.onNext(LayerGetLayerResponse.newBuilder() - .setLayer(whiteboard.getLayer(request.getId())) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void eatFrames(LayerEatFramesRequest request, StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - manageQueue.execute(new DispatchEatFrames(frameSearch, - new Source(request.toString()), jobManagerSupport)); - responseObserver.onNext(LayerEatFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void getFrames(LayerGetFramesRequest request, StreamObserver responseObserver) { - updateLayer(request.getLayer()); - FrameSeq frames = whiteboard.getFrames(frameSearchFactory.create(layer, request.getS())); - responseObserver.onNext(LayerGetFramesResponse.newBuilder() - .setFrames(frames) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void killFrames(LayerKillFramesRequest request, StreamObserver responseObserver) { - updateLayer(request.getLayer()); - manageQueue.execute(new DispatchKillFrames(frameSearch, - new Source(request.toString(), request.getUsername(), request.getPid(), - request.getHostKill(), request.getReason()), jobManagerSupport)); - responseObserver.onNext(LayerKillFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void markdoneFrames(LayerMarkdoneFramesRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - manageQueue.execute(new DispatchSatisfyDepends(layer, jobManagerSupport)); - responseObserver.onNext(LayerMarkdoneFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void retryFrames(LayerRetryFramesRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - manageQueue.execute(new DispatchRetryFrames(frameSearch, - new Source(request.toString()), jobManagerSupport)); - responseObserver.onNext(LayerRetryFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setTags(LayerSetTagsRequest request, StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - layerDao.updateLayerTags(layer, new HashSet<>(request.getTagsList())); - responseObserver.onNext(LayerSetTagsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setMinCores(LayerSetMinCoresRequest request, StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - jobManager.setLayerMinCores(layer, Convert.coresToCoreUnits(request.getCores())); - responseObserver.onNext(LayerSetMinCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setMinGpus(LayerSetMinGpusRequest request, StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - jobManager.setLayerMinGpus(layer, request.getMinGpus()); - responseObserver.onNext(LayerSetMinGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setMinMemory(LayerSetMinMemoryRequest request, StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - layerDao.updateLayerMinMemory(layer, request.getMemory()); - responseObserver.onNext(LayerSetMinMemoryResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setMinGpuMemory(LayerSetMinGpuMemoryRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - layerDao.updateLayerMinGpuMemory(layer, request.getGpuMemory()); - responseObserver.onNext(LayerSetMinGpuMemoryResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void createDependencyOnFrame(LayerCreateDependOnFrameRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - LayerOnFrame depend = new LayerOnFrame(layer, jobManager.getFrameDetail(request.getFrame().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(LayerCreateDependOnFrameResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)) - .build()); - responseObserver.onCompleted(); - } - } - - @Override - public void createDependencyOnJob(LayerCreateDependOnJobRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - LayerOnJob depend = new LayerOnJob(layer, jobManager.getJobDetail(request.getJob().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(LayerCreateDependOnJobResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)) - .build()); - responseObserver.onCompleted(); - } - } - - @Override - public void createDependencyOnLayer(LayerCreateDependOnLayerRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - LayerOnLayer depend = new LayerOnLayer(layer, jobManager.getLayerDetail(request.getDependOnLayer().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(LayerCreateDependOnLayerResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)) - .build()); - responseObserver.onCompleted(); - } - } - - @Override - public void createFrameByFrameDependency(LayerCreateFrameByFrameDependRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - FrameByFrame depend = new FrameByFrame(layer, jobManager.getLayerDetail(request.getDependLayer().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(LayerCreateFrameByFrameDependResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)) - .build()); - responseObserver.onCompleted(); - } - } - - @Override - public void getWhatDependsOnThis(LayerGetWhatDependsOnThisRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - responseObserver.onNext(LayerGetWhatDependsOnThisResponse.newBuilder() - .setDepends(whiteboard.getWhatDependsOnThis(layer)) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void getWhatThisDependsOn(LayerGetWhatThisDependsOnRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - responseObserver.onNext(LayerGetWhatThisDependsOnResponse.newBuilder() - .setDepends(whiteboard.getWhatThisDependsOn(layer)) - .build()); + private LayerDetail layer; + private FrameSearchInterface frameSearch; + private JobManager jobManager; + private DependManager dependManager; + private JobManagerSupport jobManagerSupport; + private LayerDao layerDao; + private DispatchQueue manageQueue; + private Whiteboard whiteboard; + private LocalBookingSupport localBookingSupport; + private FrameSearchFactory frameSearchFactory; + private final String property = "layer.finished_jobs_readonly"; + @Autowired + private Environment env; + + @Override + public void findLayer(LayerFindLayerRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(LayerFindLayerResponse.newBuilder() + .setLayer(whiteboard.findLayer(request.getJob(), request.getLayer())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + } + } + + @Override + public void getLayer(LayerGetLayerRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(LayerGetLayerResponse.newBuilder() + .setLayer(whiteboard.getLayer(request.getId())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + } + } + + @Override + public void eatFrames(LayerEatFramesRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + manageQueue.execute( + new DispatchEatFrames(frameSearch, new Source(request.toString()), jobManagerSupport)); + responseObserver.onNext(LayerEatFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void getFrames(LayerGetFramesRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + FrameSeq frames = whiteboard.getFrames(frameSearchFactory.create(layer, request.getS())); + responseObserver.onNext(LayerGetFramesResponse.newBuilder().setFrames(frames).build()); + responseObserver.onCompleted(); + } + + @Override + public void killFrames(LayerKillFramesRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + manageQueue.execute( + new DispatchKillFrames(frameSearch, new Source(request.toString(), request.getUsername(), + request.getPid(), request.getHostKill(), request.getReason()), jobManagerSupport)); + responseObserver.onNext(LayerKillFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void markdoneFrames(LayerMarkdoneFramesRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + manageQueue.execute(new DispatchSatisfyDepends(layer, jobManagerSupport)); + responseObserver.onNext(LayerMarkdoneFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void retryFrames(LayerRetryFramesRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + manageQueue.execute( + new DispatchRetryFrames(frameSearch, new Source(request.toString()), jobManagerSupport)); + responseObserver.onNext(LayerRetryFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setTags(LayerSetTagsRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.updateLayerTags(layer, new HashSet<>(request.getTagsList())); + responseObserver.onNext(LayerSetTagsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setMinCores(LayerSetMinCoresRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + jobManager.setLayerMinCores(layer, Convert.coresToCoreUnits(request.getCores())); + responseObserver.onNext(LayerSetMinCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setMinGpus(LayerSetMinGpusRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + jobManager.setLayerMinGpus(layer, request.getMinGpus()); + responseObserver.onNext(LayerSetMinGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setMinMemory(LayerSetMinMemoryRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.updateLayerMinMemory(layer, request.getMemory()); + responseObserver.onNext(LayerSetMinMemoryResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setMinGpuMemory(LayerSetMinGpuMemoryRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.updateLayerMinGpuMemory(layer, request.getGpuMemory()); + responseObserver.onNext(LayerSetMinGpuMemoryResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void createDependencyOnFrame(LayerCreateDependOnFrameRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + LayerOnFrame depend = + new LayerOnFrame(layer, jobManager.getFrameDetail(request.getFrame().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(LayerCreateDependOnFrameResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)).build()); + responseObserver.onCompleted(); + } + } + + @Override + public void createDependencyOnJob(LayerCreateDependOnJobRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + LayerOnJob depend = new LayerOnJob(layer, jobManager.getJobDetail(request.getJob().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(LayerCreateDependOnJobResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)).build()); + responseObserver.onCompleted(); + } + } + + @Override + public void createDependencyOnLayer(LayerCreateDependOnLayerRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + LayerOnLayer depend = + new LayerOnLayer(layer, jobManager.getLayerDetail(request.getDependOnLayer().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(LayerCreateDependOnLayerResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)).build()); + responseObserver.onCompleted(); + } + } + + @Override + public void createFrameByFrameDependency(LayerCreateFrameByFrameDependRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + FrameByFrame depend = + new FrameByFrame(layer, jobManager.getLayerDetail(request.getDependLayer().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(LayerCreateFrameByFrameDependResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)).build()); + responseObserver.onCompleted(); + } + } + + @Override + public void getWhatDependsOnThis(LayerGetWhatDependsOnThisRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + responseObserver.onNext(LayerGetWhatDependsOnThisResponse.newBuilder() + .setDepends(whiteboard.getWhatDependsOnThis(layer)).build()); + responseObserver.onCompleted(); + } + + @Override + public void getWhatThisDependsOn(LayerGetWhatThisDependsOnRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + responseObserver.onNext(LayerGetWhatThisDependsOnResponse.newBuilder() + .setDepends(whiteboard.getWhatThisDependsOn(layer)).build()); + responseObserver.onCompleted(); + } + + @Override + public void dropDepends(LayerDropDependsRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + manageQueue.execute(new DispatchDropDepends(layer, request.getTarget(), dependManager)); + responseObserver.onNext(LayerDropDependsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void dropLimit(LayerDropLimitRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.dropLimit(layer, request.getLimitId()); + responseObserver.onNext(LayerDropLimitResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void reorderFrames(LayerReorderFramesRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + manageQueue.execute(new DispatchReorderFrames(layer, new FrameSet(request.getRange()), + request.getOrder(), jobManagerSupport)); + responseObserver.onNext(LayerReorderFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void staggerFrames(LayerStaggerFramesRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + manageQueue.execute(new DispatchStaggerFrames(layer, request.getRange(), request.getStagger(), + jobManagerSupport)); + responseObserver.onNext(LayerStaggerFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setThreadable(LayerSetThreadableRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.updateThreadable(layer, request.getThreadable()); + responseObserver.onNext(LayerSetThreadableResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setTimeout(LayerSetTimeoutRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.updateTimeout(layer, request.getTimeout()); + responseObserver.onNext(LayerSetTimeoutResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setTimeoutLLU(LayerSetTimeoutLLURequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.updateTimeoutLLU(layer, request.getTimeoutLlu()); + responseObserver.onNext(LayerSetTimeoutLLUResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void addLimit(LayerAddLimitRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.addLimit(layer, request.getLimitId()); + responseObserver.onNext(LayerAddLimitResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void addRenderPartition(LayerAddRenderPartitionRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + LocalHostAssignment lha = new LocalHostAssignment(); + lha.setThreads(request.getThreads()); + lha.setMaxCoreUnits(request.getMaxCores() * 100); + lha.setMaxMemory(request.getMaxMemory()); + lha.setMaxGpuUnits(request.getMaxGpus()); + lha.setMaxGpuMemory(request.getMaxGpuMemory()); + lha.setType(RenderPartitionType.LAYER_PARTITION); + if (localBookingSupport.bookLocal(layer, request.getHost(), request.getUsername(), lha)) { + RenderPartition partition = whiteboard.getRenderPartition(lha); + responseObserver.onNext( + LayerAddRenderPartitionResponse.newBuilder().setRenderPartition(partition).build()); responseObserver.onCompleted(); - } - - @Override - public void dropDepends(LayerDropDependsRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - manageQueue.execute(new DispatchDropDepends(layer, request.getTarget(), dependManager)); - responseObserver.onNext(LayerDropDependsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void dropLimit(LayerDropLimitRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - layerDao.dropLimit(layer, request.getLimitId()); - responseObserver.onNext(LayerDropLimitResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void reorderFrames(LayerReorderFramesRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - manageQueue.execute(new DispatchReorderFrames(layer, new FrameSet(request.getRange()), request.getOrder(), - jobManagerSupport)); - responseObserver.onNext(LayerReorderFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void staggerFrames(LayerStaggerFramesRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - manageQueue.execute(new DispatchStaggerFrames(layer, request.getRange(), request.getStagger(), - jobManagerSupport)); - responseObserver.onNext(LayerStaggerFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setThreadable(LayerSetThreadableRequest request, StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - layerDao.updateThreadable(layer, request.getThreadable()); - responseObserver.onNext(LayerSetThreadableResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setTimeout(LayerSetTimeoutRequest request, StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - layerDao.updateTimeout(layer, request.getTimeout()); - responseObserver.onNext(LayerSetTimeoutResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setTimeoutLLU(LayerSetTimeoutLLURequest request, StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - layerDao.updateTimeoutLLU(layer, request.getTimeoutLlu()); - responseObserver.onNext(LayerSetTimeoutLLUResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void addLimit(LayerAddLimitRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - layerDao.addLimit(layer, request.getLimitId()); - responseObserver.onNext(LayerAddLimitResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void addRenderPartition(LayerAddRenderPartitionRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - LocalHostAssignment lha = new LocalHostAssignment(); - lha.setThreads(request.getThreads()); - lha.setMaxCoreUnits(request.getMaxCores() * 100); - lha.setMaxMemory(request.getMaxMemory()); - lha.setMaxGpuUnits(request.getMaxGpus()); - lha.setMaxGpuMemory(request.getMaxGpuMemory()); - lha.setType(RenderPartitionType.LAYER_PARTITION); - if (localBookingSupport.bookLocal(layer, request.getHost(), request.getUsername(), lha)) { - RenderPartition partition = whiteboard.getRenderPartition(lha); - responseObserver.onNext(LayerAddRenderPartitionResponse.newBuilder() - .setRenderPartition(partition) - .build()); - responseObserver.onCompleted(); - } else { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find suitable frames.") - .asRuntimeException()); - } - } - - } - - @Override - public void registerOutputPath(LayerRegisterOutputPathRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - jobManager.registerLayerOutput(layer, request.getSpec()); - responseObserver.onNext(LayerRegisterOutputPathResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void getLimits(LayerGetLimitsRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - responseObserver.onNext(LayerGetLimitsResponse.newBuilder() - .addAllLimits(whiteboard.getLimits(layer)) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void getOutputPaths(LayerGetOutputPathsRequest request, StreamObserver responseObserver) { - updateLayer(request.getLayer()); - responseObserver.onNext(LayerGetOutputPathsResponse.newBuilder() - .addAllOutputPaths(jobManager.getLayerOutputs(layer)) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void enableMemoryOptimizer(LayerEnableMemoryOptimizerRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - jobManager.enableMemoryOptimizer(layer, request.getValue()); - responseObserver.onNext(LayerEnableMemoryOptimizerResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setMaxCores(LayerSetMaxCoresRequest request, StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - jobManager.setLayerMaxCores(layer, Convert.coresToWholeCoreUnits(request.getCores())); - responseObserver.onNext(LayerSetMaxCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setMaxGpus(LayerSetMaxGpusRequest request, StreamObserver responseObserver) { - updateLayer(request.getLayer()); - jobManager.setLayerMaxGpus(layer, request.getMaxGpus()); - responseObserver.onNext(LayerSetMaxGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public DependManager getDependManager() { - return dependManager; - } - - public void setDependManager(DependManager dependManager) { - this.dependManager = dependManager; - } - - public DispatchQueue getManageQueue() { - return manageQueue; - } - - public void setManageQueue(DispatchQueue dispatchQueue) { - this.manageQueue = dispatchQueue; - } - - public JobManager getJobManager() { - return jobManager; - } - - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } - - public LayerDetail getLayer() { - return layer; - } - - public void setLayer(LayerDetail layer) { - this.layer = layer; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public LayerDao getLayerDao() { - return layerDao; - } - - public void setLayerDao(LayerDao layerDao) { - this.layerDao = layerDao; - } - - public JobManagerSupport getJobManagerSupport() { - return jobManagerSupport; - } - - public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { - this.jobManagerSupport = jobManagerSupport; - } - - public LocalBookingSupport getLocalBookingSupport() { - return localBookingSupport; - } - - public void setLocalBookingSupport(LocalBookingSupport localBookingSupport) { - this.localBookingSupport = localBookingSupport; - } - - public FrameSearchFactory getFrameSearchFactory() { - return frameSearchFactory; - } - - public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { - this.frameSearchFactory = frameSearchFactory; - } - - private void updateLayer(Layer layerData) { - setJobManager(jobManagerSupport.getJobManager()); - setDependManager(jobManagerSupport.getDependManager()); - layer = layerDao.getLayerDetail(layerData.getId()); - frameSearch = frameSearchFactory.create(layer); - } + } else { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find suitable frames.") + .asRuntimeException()); + } + } + + } + + @Override + public void registerOutputPath(LayerRegisterOutputPathRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + jobManager.registerLayerOutput(layer, request.getSpec()); + responseObserver.onNext(LayerRegisterOutputPathResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void getLimits(LayerGetLimitsRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + responseObserver.onNext( + LayerGetLimitsResponse.newBuilder().addAllLimits(whiteboard.getLimits(layer)).build()); + responseObserver.onCompleted(); + } + + @Override + public void getOutputPaths(LayerGetOutputPathsRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + responseObserver.onNext(LayerGetOutputPathsResponse.newBuilder() + .addAllOutputPaths(jobManager.getLayerOutputs(layer)).build()); + responseObserver.onCompleted(); + } + + @Override + public void enableMemoryOptimizer(LayerEnableMemoryOptimizerRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + jobManager.enableMemoryOptimizer(layer, request.getValue()); + responseObserver.onNext(LayerEnableMemoryOptimizerResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setMaxCores(LayerSetMaxCoresRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + jobManager.setLayerMaxCores(layer, Convert.coresToWholeCoreUnits(request.getCores())); + responseObserver.onNext(LayerSetMaxCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setMaxGpus(LayerSetMaxGpusRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + jobManager.setLayerMaxGpus(layer, request.getMaxGpus()); + responseObserver.onNext(LayerSetMaxGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public DependManager getDependManager() { + return dependManager; + } + + public void setDependManager(DependManager dependManager) { + this.dependManager = dependManager; + } + + public DispatchQueue getManageQueue() { + return manageQueue; + } + + public void setManageQueue(DispatchQueue dispatchQueue) { + this.manageQueue = dispatchQueue; + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public LayerDetail getLayer() { + return layer; + } + + public void setLayer(LayerDetail layer) { + this.layer = layer; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public LayerDao getLayerDao() { + return layerDao; + } + + public void setLayerDao(LayerDao layerDao) { + this.layerDao = layerDao; + } + + public JobManagerSupport getJobManagerSupport() { + return jobManagerSupport; + } + + public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { + this.jobManagerSupport = jobManagerSupport; + } + + public LocalBookingSupport getLocalBookingSupport() { + return localBookingSupport; + } + + public void setLocalBookingSupport(LocalBookingSupport localBookingSupport) { + this.localBookingSupport = localBookingSupport; + } + + public FrameSearchFactory getFrameSearchFactory() { + return frameSearchFactory; + } + + public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { + this.frameSearchFactory = frameSearchFactory; + } + + private void updateLayer(Layer layerData) { + setJobManager(jobManagerSupport.getJobManager()); + setDependManager(jobManagerSupport.getDependManager()); + layer = layerDao.getLayerDetail(layerData.getId()); + frameSearch = frameSearchFactory.create(layer); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLimit.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLimit.java index 42925b7ae..20013b510 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLimit.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLimit.java @@ -20,87 +20,81 @@ import com.imageworks.spcue.service.AdminManager; import com.imageworks.spcue.service.Whiteboard; - public class ManageLimit extends LimitInterfaceGrpc.LimitInterfaceImplBase { - private AdminManager adminManager; - private Whiteboard whiteboard; - - @Override - public void create(LimitCreateRequest request, StreamObserver responseObserver) { - String limitId = adminManager.createLimit(request.getName(), request.getMaxValue()); - LimitCreateResponse response = LimitCreateResponse.newBuilder() - .setLimit(whiteboard.getLimit(limitId)) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void delete(LimitDeleteRequest request, StreamObserver responseObserver) { - adminManager.deleteLimit(adminManager.findLimit(request.getName())); - responseObserver.onNext(LimitDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void find(LimitFindRequest request, StreamObserver responseObserver) { - LimitFindResponse response = LimitFindResponse.newBuilder() - .setLimit(whiteboard.findLimit(request.getName())) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void get(LimitGetRequest request, StreamObserver responseObserver) { - LimitGetResponse response = LimitGetResponse.newBuilder() - .setLimit(whiteboard.getLimit(request.getId())) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getAll(LimitGetAllRequest request, - StreamObserver responseObserver) { - responseObserver.onNext(LimitGetAllResponse.newBuilder() - .addAllLimits(whiteboard.getLimits()) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void rename(LimitRenameRequest request, StreamObserver responseObserver) { - adminManager.setLimitName( - adminManager.findLimit(request.getOldName()), - request.getNewName()); - responseObserver.onNext(LimitRenameResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setMaxValue(LimitSetMaxValueRequest request, StreamObserver responseObserver) { - adminManager.setLimitMaxValue( - adminManager.findLimit(request.getName()), - request.getMaxValue()); - responseObserver.onNext(LimitSetMaxValueResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - - public AdminManager getAdminManager() { - return adminManager; - } - - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } + private AdminManager adminManager; + private Whiteboard whiteboard; + + @Override + public void create(LimitCreateRequest request, + StreamObserver responseObserver) { + String limitId = adminManager.createLimit(request.getName(), request.getMaxValue()); + LimitCreateResponse response = + LimitCreateResponse.newBuilder().setLimit(whiteboard.getLimit(limitId)).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void delete(LimitDeleteRequest request, + StreamObserver responseObserver) { + adminManager.deleteLimit(adminManager.findLimit(request.getName())); + responseObserver.onNext(LimitDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void find(LimitFindRequest request, StreamObserver responseObserver) { + LimitFindResponse response = + LimitFindResponse.newBuilder().setLimit(whiteboard.findLimit(request.getName())).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void get(LimitGetRequest request, StreamObserver responseObserver) { + LimitGetResponse response = + LimitGetResponse.newBuilder().setLimit(whiteboard.getLimit(request.getId())).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getAll(LimitGetAllRequest request, + StreamObserver responseObserver) { + responseObserver + .onNext(LimitGetAllResponse.newBuilder().addAllLimits(whiteboard.getLimits()).build()); + responseObserver.onCompleted(); + } + + @Override + public void rename(LimitRenameRequest request, + StreamObserver responseObserver) { + adminManager.setLimitName(adminManager.findLimit(request.getOldName()), request.getNewName()); + responseObserver.onNext(LimitRenameResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setMaxValue(LimitSetMaxValueRequest request, + StreamObserver responseObserver) { + adminManager.setLimitMaxValue(adminManager.findLimit(request.getName()), request.getMaxValue()); + responseObserver.onNext(LimitSetMaxValueResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public AdminManager getAdminManager() { + return adminManager; + } + + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageMatcher.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageMatcher.java index 75974067d..842f98f8c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageMatcher.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageMatcher.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.servant; import io.grpc.stub.StreamObserver; @@ -33,52 +29,52 @@ import com.imageworks.spcue.service.FilterManager; import com.imageworks.spcue.service.Whiteboard; - public class ManageMatcher extends MatcherInterfaceGrpc.MatcherInterfaceImplBase { - private FilterManager filterManager; - private Whiteboard whiteboard; - - public void delete(MatcherDeleteRequest request, StreamObserver responseObserver) { - filterManager.deleteMatcher(filterManager.getMatcher(request.getMatcher().getId())); - responseObserver.onNext(MatcherDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public void getParentFilter(MatcherGetParentFilterRequest request, - StreamObserver responseObserver) { - MatcherEntity matcherEntity = filterManager.getMatcher(request.getMatcher().getId()); - MatcherGetParentFilterResponse response = MatcherGetParentFilterResponse.newBuilder() - .setFilter(whiteboard.getFilter(matcherEntity)) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - public void commit(MatcherCommitRequest request, StreamObserver responseObserver) { - Matcher newMatcherData = request.getMatcher(); - String id = newMatcherData.getId(); - MatcherEntity oldMatcher = filterManager.getMatcher(id); - MatcherEntity newMatcher = MatcherEntity.build(filterManager.getFilter(oldMatcher), newMatcherData, id); - filterManager.updateMatcher(newMatcher); - responseObserver.onNext(MatcherCommitResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public FilterManager getFilterManager() { - return filterManager; - } - - public void setFilterManager(FilterManager filterManager) { - this.filterManager = filterManager; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } + private FilterManager filterManager; + private Whiteboard whiteboard; + + public void delete(MatcherDeleteRequest request, + StreamObserver responseObserver) { + filterManager.deleteMatcher(filterManager.getMatcher(request.getMatcher().getId())); + responseObserver.onNext(MatcherDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public void getParentFilter(MatcherGetParentFilterRequest request, + StreamObserver responseObserver) { + MatcherEntity matcherEntity = filterManager.getMatcher(request.getMatcher().getId()); + MatcherGetParentFilterResponse response = MatcherGetParentFilterResponse.newBuilder() + .setFilter(whiteboard.getFilter(matcherEntity)).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + public void commit(MatcherCommitRequest request, + StreamObserver responseObserver) { + Matcher newMatcherData = request.getMatcher(); + String id = newMatcherData.getId(); + MatcherEntity oldMatcher = filterManager.getMatcher(id); + MatcherEntity newMatcher = + MatcherEntity.build(filterManager.getFilter(oldMatcher), newMatcherData, id); + filterManager.updateMatcher(newMatcher); + responseObserver.onNext(MatcherCommitResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public FilterManager getFilterManager() { + return filterManager; + } + + public void setFilterManager(FilterManager filterManager) { + this.filterManager = filterManager; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageOwner.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageOwner.java index 321306d1e..6b157351b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageOwner.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageOwner.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.servant; import io.grpc.Status; @@ -44,106 +40,105 @@ public class ManageOwner extends OwnerInterfaceGrpc.OwnerInterfaceImplBase { - private HostManager hostManager; - private OwnerManager ownerManager; - private Whiteboard whiteboard; - private AdminManager adminManager; - - @Override - public void getOwner(OwnerGetOwnerRequest request, StreamObserver responseObserver) { - try { - responseObserver.onNext(OwnerGetOwnerResponse.newBuilder() - .setOwner(whiteboard.getOwner(request.getName())) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void delete(OwnerDeleteRequest request, StreamObserver responseObserver) { - OwnerEntity owner = getOwnerById(request.getOwner().getId()); - ownerManager.deleteOwner((owner)); - OwnerDeleteResponse response = OwnerDeleteResponse.newBuilder().build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getDeeds(OwnerGetDeedsRequest request, StreamObserver responseObserver) { - OwnerEntity owner = getOwnerById(request.getOwner().getId()); - OwnerGetDeedsResponse response = OwnerGetDeedsResponse.newBuilder() - .setDeeds(whiteboard.getDeeds(owner)) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getHosts(OwnerGetHostsRequest request, StreamObserver responseObserver) { - OwnerEntity owner = getOwnerById(request.getOwner().getId()); - OwnerGetHostsResponse response = OwnerGetHostsResponse.newBuilder() - .setHosts(whiteboard.getHosts(owner)) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void takeOwnership(OwnerTakeOwnershipRequest request, - StreamObserver responseObserver) { - OwnerEntity owner = getOwnerById(request.getOwner().getId()); - ownerManager.takeOwnership(owner, hostManager.findHost(request.getHost())); - responseObserver.onNext(OwnerTakeOwnershipResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setShow(OwnerSetShowRequest request, StreamObserver responseObserver) { - OwnerEntity owner = getOwnerById(request.getOwner().getId()); - ownerManager.setShow(owner, adminManager.findShowEntity(request.getShow())); - responseObserver.onNext(OwnerSetShowResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public HostManager getHostManager() { - return hostManager; - } - - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; - } - - public OwnerManager getOwnerManager() { - return ownerManager; - } - - public void setOwnerManager(OwnerManager ownerManager) { - this.ownerManager = ownerManager; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public AdminManager getAdminManager() { - return adminManager; - } - - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } - - private OwnerEntity getOwnerById(String id) { - return ownerManager.getOwner(id); + private HostManager hostManager; + private OwnerManager ownerManager; + private Whiteboard whiteboard; + private AdminManager adminManager; + + @Override + public void getOwner(OwnerGetOwnerRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(OwnerGetOwnerResponse.newBuilder() + .setOwner(whiteboard.getOwner(request.getName())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); } + } + + @Override + public void delete(OwnerDeleteRequest request, + StreamObserver responseObserver) { + OwnerEntity owner = getOwnerById(request.getOwner().getId()); + ownerManager.deleteOwner((owner)); + OwnerDeleteResponse response = OwnerDeleteResponse.newBuilder().build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getDeeds(OwnerGetDeedsRequest request, + StreamObserver responseObserver) { + OwnerEntity owner = getOwnerById(request.getOwner().getId()); + OwnerGetDeedsResponse response = + OwnerGetDeedsResponse.newBuilder().setDeeds(whiteboard.getDeeds(owner)).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getHosts(OwnerGetHostsRequest request, + StreamObserver responseObserver) { + OwnerEntity owner = getOwnerById(request.getOwner().getId()); + OwnerGetHostsResponse response = + OwnerGetHostsResponse.newBuilder().setHosts(whiteboard.getHosts(owner)).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void takeOwnership(OwnerTakeOwnershipRequest request, + StreamObserver responseObserver) { + OwnerEntity owner = getOwnerById(request.getOwner().getId()); + ownerManager.takeOwnership(owner, hostManager.findHost(request.getHost())); + responseObserver.onNext(OwnerTakeOwnershipResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setShow(OwnerSetShowRequest request, + StreamObserver responseObserver) { + OwnerEntity owner = getOwnerById(request.getOwner().getId()); + ownerManager.setShow(owner, adminManager.findShowEntity(request.getShow())); + responseObserver.onNext(OwnerSetShowResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public HostManager getHostManager() { + return hostManager; + } + + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; + } + + public OwnerManager getOwnerManager() { + return ownerManager; + } + + public void setOwnerManager(OwnerManager ownerManager) { + this.ownerManager = ownerManager; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public AdminManager getAdminManager() { + return adminManager; + } + + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } + + private OwnerEntity getOwnerById(String id) { + return ownerManager.getOwner(id); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageProc.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageProc.java index 7760799cb..3df42fcef 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageProc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageProc.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.servant; @@ -69,254 +67,246 @@ public class ManageProc extends ProcInterfaceGrpc.ProcInterfaceImplBase { - private ProcDao procDao; - private Whiteboard whiteboard; - private JobManagerSupport jobManagerSupport; - private JobManager jobManager; - private GroupManager groupManager; - private RedirectManager redirectManager; - private ProcSearchFactory procSearchFactory; - - @Override - public void getProcs(ProcGetProcsRequest request, StreamObserver responseObserver) { - responseObserver.onNext(ProcGetProcsResponse.newBuilder() - .setProcs(whiteboard.getProcs(procSearchFactory.create(request.getR()))) - .build()); - responseObserver.onCompleted(); + private ProcDao procDao; + private Whiteboard whiteboard; + private JobManagerSupport jobManagerSupport; + private JobManager jobManager; + private GroupManager groupManager; + private RedirectManager redirectManager; + private ProcSearchFactory procSearchFactory; + + @Override + public void getProcs(ProcGetProcsRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(ProcGetProcsResponse.newBuilder() + .setProcs(whiteboard.getProcs(procSearchFactory.create(request.getR()))).build()); + responseObserver.onCompleted(); + } + + @Override + public void unbookProcs(ProcUnbookProcsRequest request, + StreamObserver responseObserver) { + ProcSearchInterface procSearch = procSearchFactory.create(request.getR()); + procSearch.sortByBookedTime(); + responseObserver.onNext(ProcUnbookProcsResponse.newBuilder().setNumProcs(jobManagerSupport + .unbookProcs(procSearch, request.getKill(), new Source(request.toString()))).build()); + responseObserver.onCompleted(); + } + + @Override + public void unbookToGroup(ProcUnbookToGroupRequest request, + StreamObserver responseObserver) { + if (request.getR().getMaxResultsCount() == 0) { + throw new RuntimeException( + "You must specify the number of procs to unbook " + "within the ProcSearchCriteria."); } - @Override - public void unbookProcs(ProcUnbookProcsRequest request, StreamObserver responseObserver) { - ProcSearchInterface procSearch = procSearchFactory.create(request.getR()); - procSearch.sortByBookedTime(); - responseObserver.onNext(ProcUnbookProcsResponse.newBuilder() - .setNumProcs( - jobManagerSupport.unbookProcs( - procSearch, request.getKill(), new Source(request.toString()))) - .build()); - responseObserver.onCompleted(); + GroupInterface g = groupManager.getGroup(request.getGroup().getId()); + List procs = redirectManager.addRedirect(request.getR(), g, request.getKill(), + new Source(request.toString())); + responseObserver + .onNext(ProcUnbookToGroupResponse.newBuilder().setNumProcs(procs.size()).build()); + responseObserver.onCompleted(); + } + + @Override + public void unbookToJob(ProcUnbookToJobRequest request, + StreamObserver responseObserver) { + if (request.getR().getMaxResultsCount() == 0) { + throw new RuntimeException( + "You must specify the number of procs to unbook " + "within the ProcSearchCriteria."); } - @Override - public void unbookToGroup(ProcUnbookToGroupRequest request, StreamObserver responseObserver) { - if (request.getR().getMaxResultsCount() == 0) { - throw new RuntimeException( - "You must specify the number of procs to unbook " + - "within the ProcSearchCriteria."); - } - - GroupInterface g = groupManager.getGroup(request.getGroup().getId()); - List procs = redirectManager.addRedirect(request.getR(), - g, request.getKill(), new Source(request.toString())); - responseObserver.onNext(ProcUnbookToGroupResponse.newBuilder() - .setNumProcs(procs.size()) - .build()); - responseObserver.onCompleted(); + List jobs = new ArrayList(request.getJobs().getJobsCount()); + + for (Job job : request.getJobs().getJobsList()) { + try { + jobs.add(jobManager.getJob(job.getId())); + } catch (EmptyResultDataAccessException e) { + // just eat it, just eat it. + // Open up your mouth and feed it. + // Have a banana. Have a whole bunch. + // It doesn't matter, when you had lunch. + // just eat it, just eat it + // get yourself and egg and beat it + } } - @Override - public void unbookToJob(ProcUnbookToJobRequest request, StreamObserver responseObserver) { - if (request.getR().getMaxResultsCount() == 0) { - throw new RuntimeException( - "You must specify the number of procs to unbook " + - "within the ProcSearchCriteria."); - } - - List jobs = new ArrayList(request.getJobs().getJobsCount()); - - for (Job job: request.getJobs().getJobsList()) { - try { - jobs.add(jobManager.getJob(job.getId())); - } - catch (EmptyResultDataAccessException e) { - // just eat it, just eat it. - // Open up your mouth and feed it. - // Have a banana. Have a whole bunch. - // It doesn't matter, when you had lunch. - // just eat it, just eat it - // get yourself and egg and beat it - } - } - - int returnVal; - if (jobs.size() == 0) { - returnVal = 0; - } else { - List procs = redirectManager.addRedirect(request.getR(), - jobs, request.getKill(), new Source(request.toString())); - - returnVal = procs.size(); - } - responseObserver.onNext(ProcUnbookToJobResponse.newBuilder() - .setNumProcs(returnVal) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void getFrame(ProcGetFrameRequest request, StreamObserver responseObserver) { - VirtualProc proc = getVirtualProc(request.getProc()); - Frame frame = whiteboard.getFrame(procDao.getCurrentFrameId(proc)); - ProcGetFrameResponse response = ProcGetFrameResponse.newBuilder() - .setFrame(frame) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getHost(ProcGetHostRequest request, StreamObserver responseObserver) { - VirtualProc proc = getVirtualProc(request.getProc()); - ProcGetHostResponse response = ProcGetHostResponse.newBuilder() - .setHost(whiteboard.getHost(proc.getHostId())) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getJob(ProcGetJobRequest request, StreamObserver responseObserver) { - try { - VirtualProc proc = getVirtualProc(request.getProc()); - ProcGetJobResponse response = ProcGetJobResponse.newBuilder() - .setJob(whiteboard.getJob(procDao.getCurrentJobId(proc))) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void getLayer(ProcGetLayerRequest request, StreamObserver responseObserver) { - VirtualProc proc = getVirtualProc(request.getProc()); - ProcGetLayerResponse response = ProcGetLayerResponse.newBuilder() - .setLayer(whiteboard.getLayer(procDao.getCurrentLayerId(proc))) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void kill(ProcKillRequest request, StreamObserver responseObserver) { - VirtualProc proc = getVirtualProc(request.getProc()); - String message = "Kill Proc on " + proc.getProcId(); - jobManagerSupport.unbookProc(procDao.getVirtualProc(proc.getProcId()), - true, new Source(message)); - responseObserver.onNext(ProcKillResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void unbook(ProcUnbookRequest request, StreamObserver responseObserver) { - VirtualProc proc = getVirtualProc(request.getProc()); - procDao.unbookProc(proc); - if (request.getKill()) { - String message = "Kill Proc on " + proc.getProcId(); - jobManagerSupport.unbookProc(procDao.getVirtualProc(proc.getProcId()), - true, new Source(message)); - } - responseObserver.onNext(ProcUnbookResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void redirectToGroup(ProcRedirectToGroupRequest request, - StreamObserver responseObserver) { - VirtualProc proc = getVirtualProc(request.getProc()); - VirtualProc p = procDao.getVirtualProc(proc.getProcId()); - GroupInterface g = groupManager.getGroup(request.getGroupId()); - String message = "redirectToGroup called on " + proc.getProcId() + " with Group " + g.getGroupId(); - boolean value = redirectManager.addRedirect(p, g, request.getKill(), new Source(message)); - responseObserver.onNext(ProcRedirectToGroupResponse.newBuilder().setValue(value).build()); - responseObserver.onCompleted(); - } - - @Override - public void redirectToJob(ProcRedirectToJobRequest request, - StreamObserver responseObserver) { - VirtualProc proc = getVirtualProc(request.getProc()); - VirtualProc p = procDao.getVirtualProc(proc.getId()); - JobInterface j = jobManager.getJob(request.getJobId()); - String message = "redirectToJob called on " + proc.getProcId() + " with Job " + j.getJobId(); - boolean value = redirectManager.addRedirect(p, j, request.getKill(), new Source(message)); - responseObserver.onNext(ProcRedirectToJobResponse.newBuilder().setValue(value).build()); - responseObserver.onCompleted(); - } - - @Override - public void clearRedirect(ProcClearRedirectRequest request, - StreamObserver responseObserver) { - VirtualProc proc = getVirtualProc(request.getProc()); - procDao.setUnbookState(proc, false); - boolean value = redirectManager.removeRedirect(proc); - responseObserver.onNext(ProcClearRedirectResponse.newBuilder().setValue(value).build()); - responseObserver.onCompleted(); - } - - public ProcDao getProcDao() { - return procDao; - } - - public void setProcDao(ProcDao procDao) { - this.procDao = procDao; - } + int returnVal; + if (jobs.size() == 0) { + returnVal = 0; + } else { + List procs = redirectManager.addRedirect(request.getR(), jobs, request.getKill(), + new Source(request.toString())); - public Whiteboard getWhiteboard() { - return whiteboard; + returnVal = procs.size(); } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public JobManagerSupport getJobManagerSupport() { - return jobManagerSupport; - } - - public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { - this.jobManagerSupport = jobManagerSupport; - } - - public JobManager getJobManager() { - return jobManager; - } - - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; + responseObserver.onNext(ProcUnbookToJobResponse.newBuilder().setNumProcs(returnVal).build()); + responseObserver.onCompleted(); + } + + @Override + public void getFrame(ProcGetFrameRequest request, + StreamObserver responseObserver) { + VirtualProc proc = getVirtualProc(request.getProc()); + Frame frame = whiteboard.getFrame(procDao.getCurrentFrameId(proc)); + ProcGetFrameResponse response = ProcGetFrameResponse.newBuilder().setFrame(frame).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getHost(ProcGetHostRequest request, + StreamObserver responseObserver) { + VirtualProc proc = getVirtualProc(request.getProc()); + ProcGetHostResponse response = + ProcGetHostResponse.newBuilder().setHost(whiteboard.getHost(proc.getHostId())).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getJob(ProcGetJobRequest request, + StreamObserver responseObserver) { + try { + VirtualProc proc = getVirtualProc(request.getProc()); + ProcGetJobResponse response = ProcGetJobResponse.newBuilder() + .setJob(whiteboard.getJob(procDao.getCurrentJobId(proc))).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); } - - public GroupManager getGroupManager() { - return groupManager; - } - - public void setGroupManager(GroupManager groupManager) { - this.groupManager = groupManager; - } - - public RedirectManager getRedirectManager() { - return redirectManager; - } - - public void setRedirectManager(RedirectManager redirectManager) { - this.redirectManager = redirectManager; - } - - private VirtualProc getVirtualProc(Proc proc) { - return procDao.getVirtualProc(proc.getId()); - } - - public ProcSearchFactory getProcSearchFactory() { - return procSearchFactory; - } - - public void setProcSearchFactory(ProcSearchFactory procSearchFactory) { - this.procSearchFactory = procSearchFactory; + } + + @Override + public void getLayer(ProcGetLayerRequest request, + StreamObserver responseObserver) { + VirtualProc proc = getVirtualProc(request.getProc()); + ProcGetLayerResponse response = ProcGetLayerResponse.newBuilder() + .setLayer(whiteboard.getLayer(procDao.getCurrentLayerId(proc))).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void kill(ProcKillRequest request, StreamObserver responseObserver) { + VirtualProc proc = getVirtualProc(request.getProc()); + String message = "Kill Proc on " + proc.getProcId(); + jobManagerSupport.unbookProc(procDao.getVirtualProc(proc.getProcId()), true, + new Source(message)); + responseObserver.onNext(ProcKillResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void unbook(ProcUnbookRequest request, + StreamObserver responseObserver) { + VirtualProc proc = getVirtualProc(request.getProc()); + procDao.unbookProc(proc); + if (request.getKill()) { + String message = "Kill Proc on " + proc.getProcId(); + jobManagerSupport.unbookProc(procDao.getVirtualProc(proc.getProcId()), true, + new Source(message)); } + responseObserver.onNext(ProcUnbookResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void redirectToGroup(ProcRedirectToGroupRequest request, + StreamObserver responseObserver) { + VirtualProc proc = getVirtualProc(request.getProc()); + VirtualProc p = procDao.getVirtualProc(proc.getProcId()); + GroupInterface g = groupManager.getGroup(request.getGroupId()); + String message = + "redirectToGroup called on " + proc.getProcId() + " with Group " + g.getGroupId(); + boolean value = redirectManager.addRedirect(p, g, request.getKill(), new Source(message)); + responseObserver.onNext(ProcRedirectToGroupResponse.newBuilder().setValue(value).build()); + responseObserver.onCompleted(); + } + + @Override + public void redirectToJob(ProcRedirectToJobRequest request, + StreamObserver responseObserver) { + VirtualProc proc = getVirtualProc(request.getProc()); + VirtualProc p = procDao.getVirtualProc(proc.getId()); + JobInterface j = jobManager.getJob(request.getJobId()); + String message = "redirectToJob called on " + proc.getProcId() + " with Job " + j.getJobId(); + boolean value = redirectManager.addRedirect(p, j, request.getKill(), new Source(message)); + responseObserver.onNext(ProcRedirectToJobResponse.newBuilder().setValue(value).build()); + responseObserver.onCompleted(); + } + + @Override + public void clearRedirect(ProcClearRedirectRequest request, + StreamObserver responseObserver) { + VirtualProc proc = getVirtualProc(request.getProc()); + procDao.setUnbookState(proc, false); + boolean value = redirectManager.removeRedirect(proc); + responseObserver.onNext(ProcClearRedirectResponse.newBuilder().setValue(value).build()); + responseObserver.onCompleted(); + } + + public ProcDao getProcDao() { + return procDao; + } + + public void setProcDao(ProcDao procDao) { + this.procDao = procDao; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public JobManagerSupport getJobManagerSupport() { + return jobManagerSupport; + } + + public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { + this.jobManagerSupport = jobManagerSupport; + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public GroupManager getGroupManager() { + return groupManager; + } + + public void setGroupManager(GroupManager groupManager) { + this.groupManager = groupManager; + } + + public RedirectManager getRedirectManager() { + return redirectManager; + } + + public void setRedirectManager(RedirectManager redirectManager) { + this.redirectManager = redirectManager; + } + + private VirtualProc getVirtualProc(Proc proc) { + return procDao.getVirtualProc(proc.getId()); + } + + public ProcSearchFactory getProcSearchFactory() { + return procSearchFactory; + } + + public void setProcSearchFactory(ProcSearchFactory procSearchFactory) { + this.procSearchFactory = procSearchFactory; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageRenderPartition.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageRenderPartition.java index 413f1982c..b35159a3a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageRenderPartition.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageRenderPartition.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.servant; import io.grpc.stub.StreamObserver; @@ -30,37 +26,39 @@ import com.imageworks.spcue.grpc.renderpartition.RenderPartitionInterfaceGrpc; import com.imageworks.spcue.service.BookingManager; -public class ManageRenderPartition extends RenderPartitionInterfaceGrpc.RenderPartitionInterfaceImplBase { +public class ManageRenderPartition + extends RenderPartitionInterfaceGrpc.RenderPartitionInterfaceImplBase { - private BookingManager bookingManager; + private BookingManager bookingManager; - @Override - public void delete(RenderPartDeleteRequest request, StreamObserver responseObserver) { - bookingManager.deactivateLocalHostAssignment(getLocalHostAssignment(request.getRenderPartition())); - responseObserver.onNext(RenderPartDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + @Override + public void delete(RenderPartDeleteRequest request, + StreamObserver responseObserver) { + bookingManager + .deactivateLocalHostAssignment(getLocalHostAssignment(request.getRenderPartition())); + responseObserver.onNext(RenderPartDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } - @Override - public void setMaxResources(RenderPartSetMaxResourcesRequest request, - StreamObserver responseObserver) { - LocalHostAssignment localJobAssign = getLocalHostAssignment(request.getRenderPartition()); - bookingManager.setMaxResources(localJobAssign, request.getCores(), request.getMemory(), request.getGpus(), request.getGpuMemory()); - responseObserver.onNext(RenderPartSetMaxResourcesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + @Override + public void setMaxResources(RenderPartSetMaxResourcesRequest request, + StreamObserver responseObserver) { + LocalHostAssignment localJobAssign = getLocalHostAssignment(request.getRenderPartition()); + bookingManager.setMaxResources(localJobAssign, request.getCores(), request.getMemory(), + request.getGpus(), request.getGpuMemory()); + responseObserver.onNext(RenderPartSetMaxResourcesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + public BookingManager getBookingManager() { + return bookingManager; + } - public BookingManager getBookingManager() { - return bookingManager; - } + public void setBookingManager(BookingManager bookingManager) { + this.bookingManager = bookingManager; + } - public void setBookingManager(BookingManager bookingManager) { - this.bookingManager = bookingManager; - } - - private LocalHostAssignment getLocalHostAssignment(RenderPartition renderPartition) { - return bookingManager.getLocalHostAssignment(renderPartition.getId()); - } + private LocalHostAssignment getLocalHostAssignment(RenderPartition renderPartition) { + return bookingManager.getLocalHostAssignment(renderPartition.getId()); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageService.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageService.java index 8cd9029c8..534b5edca 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageService.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.servant; import java.util.LinkedHashSet; @@ -44,102 +40,99 @@ public class ManageService extends ServiceInterfaceGrpc.ServiceInterfaceImplBase { - private ServiceManager serviceManager; - private Whiteboard whiteboard; - - @Override - public void createService(ServiceCreateServiceRequest request, - StreamObserver responseObserver) { - ServiceEntity service = new ServiceEntity(); - service.name = request.getData().getName(); - service.minCores = request.getData().getMinCores(); - service.maxCores = request.getData().getMaxCores(); - service.minMemory = request.getData().getMinMemory(); - service.minGpus = request.getData().getMinGpus(); - service.maxGpus = request.getData().getMaxGpus(); - service.minGpuMemory = request.getData().getMinGpuMemory(); - service.tags = Sets.newLinkedHashSet(request.getData().getTagsList()); - service.threadable = request.getData().getThreadable(); - service.timeout = request.getData().getTimeout(); - service.timeout_llu = request.getData().getTimeoutLlu(); - service.minMemoryIncrease = request.getData().getMinMemoryIncrease(); - serviceManager.createService(service); - responseObserver.onNext(ServiceCreateServiceResponse.newBuilder() - .setService(whiteboard.getService(service.getId())) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void getDefaultServices(ServiceGetDefaultServicesRequest request, - StreamObserver responseObserver) { - responseObserver.onNext(ServiceGetDefaultServicesResponse.newBuilder() - .setServices(whiteboard.getDefaultServices()) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void getService(ServiceGetServiceRequest request, - StreamObserver responseObserver) { - try { - responseObserver.onNext(ServiceGetServiceResponse.newBuilder() - .setService(whiteboard.getService(request.getName())) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void delete(ServiceDeleteRequest request, StreamObserver responseObserver) { - serviceManager.deleteService(toServiceEntity(request.getService())); - responseObserver.onNext(ServiceDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void update(ServiceUpdateRequest request, StreamObserver responseObserver) { - serviceManager.updateService(toServiceEntity(request.getService())); - responseObserver.onNext(ServiceUpdateResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public ServiceManager getServiceManager() { - return serviceManager; - } - - public void setServiceManager(ServiceManager serviceManager) { - this.serviceManager = serviceManager; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - private ServiceEntity toServiceEntity(Service service) { - ServiceEntity entity = new ServiceEntity(); - entity.id = service.getId(); - entity.name = service.getName(); - entity.minCores = service.getMinCores(); - entity.maxCores = service.getMaxCores(); - entity.minMemory = service.getMinMemory(); - entity.minGpus = service.getMinGpus(); - entity.maxGpus = service.getMaxGpus(); - entity.minGpuMemory = service.getMinGpuMemory(); - entity.tags = new LinkedHashSet<> (service.getTagsList()); - entity.threadable = service.getThreadable(); - entity.timeout = service.getTimeout(); - entity.timeout_llu = service.getTimeoutLlu(); - entity.minMemoryIncrease = service.getMinMemoryIncrease(); - return entity; + private ServiceManager serviceManager; + private Whiteboard whiteboard; + + @Override + public void createService(ServiceCreateServiceRequest request, + StreamObserver responseObserver) { + ServiceEntity service = new ServiceEntity(); + service.name = request.getData().getName(); + service.minCores = request.getData().getMinCores(); + service.maxCores = request.getData().getMaxCores(); + service.minMemory = request.getData().getMinMemory(); + service.minGpus = request.getData().getMinGpus(); + service.maxGpus = request.getData().getMaxGpus(); + service.minGpuMemory = request.getData().getMinGpuMemory(); + service.tags = Sets.newLinkedHashSet(request.getData().getTagsList()); + service.threadable = request.getData().getThreadable(); + service.timeout = request.getData().getTimeout(); + service.timeout_llu = request.getData().getTimeoutLlu(); + service.minMemoryIncrease = request.getData().getMinMemoryIncrease(); + serviceManager.createService(service); + responseObserver.onNext(ServiceCreateServiceResponse.newBuilder() + .setService(whiteboard.getService(service.getId())).build()); + responseObserver.onCompleted(); + } + + @Override + public void getDefaultServices(ServiceGetDefaultServicesRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(ServiceGetDefaultServicesResponse.newBuilder() + .setServices(whiteboard.getDefaultServices()).build()); + responseObserver.onCompleted(); + } + + @Override + public void getService(ServiceGetServiceRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(ServiceGetServiceResponse.newBuilder() + .setService(whiteboard.getService(request.getName())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); } + } + + @Override + public void delete(ServiceDeleteRequest request, + StreamObserver responseObserver) { + serviceManager.deleteService(toServiceEntity(request.getService())); + responseObserver.onNext(ServiceDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void update(ServiceUpdateRequest request, + StreamObserver responseObserver) { + serviceManager.updateService(toServiceEntity(request.getService())); + responseObserver.onNext(ServiceUpdateResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public ServiceManager getServiceManager() { + return serviceManager; + } + + public void setServiceManager(ServiceManager serviceManager) { + this.serviceManager = serviceManager; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + private ServiceEntity toServiceEntity(Service service) { + ServiceEntity entity = new ServiceEntity(); + entity.id = service.getId(); + entity.name = service.getName(); + entity.minCores = service.getMinCores(); + entity.maxCores = service.getMaxCores(); + entity.minMemory = service.getMinMemory(); + entity.minGpus = service.getMinGpus(); + entity.maxGpus = service.getMaxGpus(); + entity.minGpuMemory = service.getMinGpuMemory(); + entity.tags = new LinkedHashSet<>(service.getTagsList()); + entity.threadable = service.getThreadable(); + entity.timeout = service.getTimeout(); + entity.timeout_llu = service.getTimeoutLlu(); + entity.minMemoryIncrease = service.getMinMemoryIncrease(); + return entity; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageServiceOverride.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageServiceOverride.java index 6d2db02fe..5769c49e5 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageServiceOverride.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageServiceOverride.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.servant; import java.util.LinkedHashSet; @@ -32,52 +28,55 @@ import com.imageworks.spcue.grpc.service.ServiceOverrideUpdateResponse; import com.imageworks.spcue.service.ServiceManager; -public class ManageServiceOverride extends ServiceOverrideInterfaceGrpc.ServiceOverrideInterfaceImplBase { +public class ManageServiceOverride + extends ServiceOverrideInterfaceGrpc.ServiceOverrideInterfaceImplBase { - private ServiceManager serviceManager; + private ServiceManager serviceManager; - @Override - public void delete(ServiceOverrideDeleteRequest request, - StreamObserver responseObserver) { - // Passing null on showId as the interface doesn't require a showId in this situation - serviceManager.deleteService(toServiceOverrideEntity(request.getService(), null)); - responseObserver.onNext(ServiceOverrideDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + @Override + public void delete(ServiceOverrideDeleteRequest request, + StreamObserver responseObserver) { + // Passing null on showId as the interface doesn't require a showId in this + // situation + serviceManager.deleteService(toServiceOverrideEntity(request.getService(), null)); + responseObserver.onNext(ServiceOverrideDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } - @Override - public void update(ServiceOverrideUpdateRequest request, - StreamObserver responseObserver) { - // Passing null on showId as the interface doesn't require a showId in this situation - serviceManager.updateService(toServiceOverrideEntity(request.getService(), null)); - responseObserver.onNext(ServiceOverrideUpdateResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + @Override + public void update(ServiceOverrideUpdateRequest request, + StreamObserver responseObserver) { + // Passing null on showId as the interface doesn't require a showId in this + // situation + serviceManager.updateService(toServiceOverrideEntity(request.getService(), null)); + responseObserver.onNext(ServiceOverrideUpdateResponse.newBuilder().build()); + responseObserver.onCompleted(); + } - public ServiceManager getServiceManager() { - return serviceManager; - } + public ServiceManager getServiceManager() { + return serviceManager; + } - public void setServiceManager(ServiceManager serviceManager) { - this.serviceManager = serviceManager; - } + public void setServiceManager(ServiceManager serviceManager) { + this.serviceManager = serviceManager; + } - private ServiceOverrideEntity toServiceOverrideEntity(Service service, String showId){ - ServiceOverrideEntity entity = new ServiceOverrideEntity(); - entity.id = service.getId(); - entity.name = service.getName(); - entity.minCores = service.getMinCores(); - entity.maxCores = service.getMaxCores(); - entity.minMemory = service.getMinMemory(); - entity.minGpus = service.getMinGpus(); - entity.maxGpus = service.getMaxGpus(); - entity.minGpuMemory = service.getMinGpuMemory(); - entity.tags = new LinkedHashSet<>(service.getTagsList()); - entity.threadable = service.getThreadable(); - entity.showId = showId; - entity.timeout = service.getTimeout(); - entity.timeout_llu = service.getTimeoutLlu(); - entity.minMemoryIncrease = service.getMinMemoryIncrease(); - return entity; - } + private ServiceOverrideEntity toServiceOverrideEntity(Service service, String showId) { + ServiceOverrideEntity entity = new ServiceOverrideEntity(); + entity.id = service.getId(); + entity.name = service.getName(); + entity.minCores = service.getMinCores(); + entity.maxCores = service.getMaxCores(); + entity.minMemory = service.getMinMemory(); + entity.minGpus = service.getMinGpus(); + entity.maxGpus = service.getMaxGpus(); + entity.minGpuMemory = service.getMinGpuMemory(); + entity.tags = new LinkedHashSet<>(service.getTagsList()); + entity.threadable = service.getThreadable(); + entity.showId = showId; + entity.timeout = service.getTimeout(); + entity.timeout_llu = service.getTimeoutLlu(); + entity.minMemoryIncrease = service.getMinMemoryIncrease(); + return entity; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageShow.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageShow.java index 0c496b25d..d76fc8de3 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageShow.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageShow.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.servant; @@ -109,397 +107,386 @@ public class ManageShow extends ShowInterfaceGrpc.ShowInterfaceImplBase { - private AdminManager adminManager; - private Whiteboard whiteboard; - private ShowDao showDao; - private DepartmentManager departmentManager; - private FilterManager filterManager; - private OwnerManager ownerManager; - private ServiceManager serviceManager; - private JobSearchFactory jobSearchFactory; - - @Override - public void createShow(ShowCreateShowRequest request, StreamObserver responseObserver) { - try { - ShowEntity show = new ShowEntity(); - show.name = request.getName(); - adminManager.createShow(show); - responseObserver.onNext(ShowCreateShowResponse.newBuilder() - .setShow(whiteboard.getShow(show.getShowId())) - .build()); - responseObserver.onCompleted(); - } catch (Exception e) { - responseObserver.onError(Status.INTERNAL - .withDescription("Show could not be created." + e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void findShow(ShowFindShowRequest request, StreamObserver responseObserver) { - try { - responseObserver.onNext(ShowFindShowResponse.newBuilder() - .setShow(whiteboard.findShow(request.getName())) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void getActiveShows(ShowGetActiveShowsRequest request, - StreamObserver responseObserver) { - responseObserver.onNext(ShowGetActiveShowsResponse.newBuilder() - .setShows(whiteboard.getActiveShows()) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void getShows(ShowGetShowsRequest request, StreamObserver responseObserver) { - responseObserver.onNext(ShowGetShowsResponse.newBuilder() - .setShows(whiteboard.getShows()) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void getFilters(ShowGetFiltersRequest request, StreamObserver responseObserver) { - FilterSeq filterSeq = whiteboard.getFilters(getShowEntity(request.getShow())); - ShowGetFiltersResponse response = ShowGetFiltersResponse.newBuilder() - .setFilters(filterSeq) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getSubscriptions(ShowGetSubscriptionRequest request, - StreamObserver responseObserver) { - SubscriptionSeq subscriptionSeq = whiteboard.getSubscriptions(getShowEntity(request.getShow())); - ShowGetSubscriptionResponse response = ShowGetSubscriptionResponse.newBuilder() - .setSubscriptions(subscriptionSeq) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getRootGroup(ShowGetRootGroupRequest request, - StreamObserver responseObserver) { - Group rootGroup = whiteboard.getRootGroup(getShowEntity(request.getShow())); - ShowGetRootGroupResponse response = ShowGetRootGroupResponse.newBuilder().setGroup(rootGroup).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void createSubscription(ShowCreateSubscriptionRequest request, - StreamObserver responseObserver) { - AllocationEntity allocationEntity = adminManager.getAllocationDetail(request.getAllocationId()); - SubscriptionInterface s = adminManager.createSubscription( - getShowEntity(request.getShow()), - allocationEntity, - Convert.coresToCoreUnits(request.getSize()), - Convert.coresToCoreUnits(request.getBurst())); - Subscription subscription = whiteboard.getSubscription(s.getSubscriptionId()); - ShowCreateSubscriptionResponse response = ShowCreateSubscriptionResponse.newBuilder() - .setSubscription(subscription) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getGroups(ShowGetGroupsRequest request, StreamObserver responseObserver) { - GroupSeq groupSeq = whiteboard.getGroups(getShowEntity(request.getShow())); - ShowGetGroupsResponse response = ShowGetGroupsResponse.newBuilder().setGroups(groupSeq).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getJobWhiteboard(ShowGetJobWhiteboardRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - ShowGetJobWhiteboardResponse response = ShowGetJobWhiteboardResponse.newBuilder() - .setWhiteboard(whiteboard.getJobWhiteboard(show)) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getJobs(ShowGetJobsRequest request, StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - JobSeq jobSeq = whiteboard.getJobs(jobSearchFactory.create(show)); - ShowGetJobsResponse response = ShowGetJobsResponse.newBuilder().setJobs(jobSeq).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void setDefaultMaxCores(ShowSetDefaultMaxCoresRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - showDao.updateShowDefaultMaxCores(show, Convert.coresToWholeCoreUnits(request.getMaxCores())); - responseObserver.onNext(ShowSetDefaultMaxCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setDefaultMinCores(ShowSetDefaultMinCoresRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - showDao.updateShowDefaultMinCores(show, Convert.coresToWholeCoreUnits(request.getMinCores())); - responseObserver.onNext(ShowSetDefaultMinCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setDefaultMaxGpus(ShowSetDefaultMaxGpusRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - showDao.updateShowDefaultMaxGpus(show, request.getMaxGpus()); - responseObserver.onNext(ShowSetDefaultMaxGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setDefaultMinGpus(ShowSetDefaultMinGpusRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - showDao.updateShowDefaultMinGpus(show, request.getMinGpus()); - responseObserver.onNext(ShowSetDefaultMinGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void findFilter(ShowFindFilterRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - ShowFindFilterResponse response = ShowFindFilterResponse.newBuilder() - .setFilter(whiteboard.findFilter(show, request.getName())) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void createFilter(ShowCreateFilterRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - FilterEntity filter = new FilterEntity(); - filter.name = request.getName(); - filter.showId = show.id; - filter.type = FilterType.MATCH_ALL; - filter.order = 0; - filterManager.createFilter(filter); - ShowCreateFilterResponse response = ShowCreateFilterResponse.newBuilder() - .setFilter(whiteboard.findFilter(show, request.getName())) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getDepartment(ShowGetDepartmentRequest request, - StreamObserver responseObserver) { - ShowGetDepartmentResponse response = ShowGetDepartmentResponse.newBuilder() - .setDepartment(whiteboard.getDepartment(getShowEntity(request.getShow()), request.getDepartment())) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getDepartments(ShowGetDepartmentsRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - ShowGetDepartmentsResponse response = ShowGetDepartmentsResponse.newBuilder() - .setDepartments(whiteboard.getDepartments(show)) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void enableBooking(ShowEnableBookingRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - showDao.updateBookingEnabled(show, request.getEnabled()); - responseObserver.onNext(ShowEnableBookingResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void enableDispatching(ShowEnableDispatchingRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - showDao.updateDispatchingEnabled(show, request.getEnabled()); - responseObserver.onNext(ShowEnableDispatchingResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void getDeeds(ShowGetDeedsRequest request, StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - responseObserver.onNext(ShowGetDeedsResponse.newBuilder() - .setDeeds(whiteboard.getDeeds(show)) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void createOwner(ShowCreateOwnerRequest request, StreamObserver responseObserver) { - ownerManager.createOwner(request.getName(), getShowEntity(request.getShow())); - ShowCreateOwnerResponse response = ShowCreateOwnerResponse.newBuilder() - .setOwner(whiteboard.getOwner(request.getName())) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void setActive(ShowSetActiveRequest request, StreamObserver responseObserver) { - adminManager.setShowActive(getShowEntity(request.getShow()), request.getValue()); - responseObserver.onNext(ShowSetActiveResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void createServiceOverride(ShowCreateServiceOverrideRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - Service requestService = request.getService(); - ServiceOverrideEntity service = new ServiceOverrideEntity(); - service.showId = show.getId(); - service.name = requestService.getName(); - service.minCores = requestService.getMinCores(); - service.maxCores = requestService.getMaxCores(); - service.minMemory = requestService.getMinMemory(); - service.minGpus = requestService.getMinGpus(); - service.maxGpus = requestService.getMaxGpus(); - service.minGpuMemory = requestService.getMinGpuMemory(); - service.tags = Sets.newLinkedHashSet(requestService.getTagsList()); - service.threadable = requestService.getThreadable(); - service.minMemoryIncrease = requestService.getMinMemoryIncrease(); - serviceManager.createService(service); - ServiceOverride serviceOverride = whiteboard.getServiceOverride(show, service.name); - responseObserver.onNext(ShowCreateServiceOverrideResponse.newBuilder() - .setServiceOverride(serviceOverride) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void getServiceOverrides(ShowGetServiceOverridesRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - responseObserver.onNext(ShowGetServiceOverridesResponse.newBuilder() - .setServiceOverrides(whiteboard.getServiceOverrides(show)) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void delete(ShowDeleteRequest request, StreamObserver responseObserver) { - showDao.delete(getShowEntity(request.getShow())); - responseObserver.onNext(ShowDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void getServiceOverride(ShowGetServiceOverrideRequest request, - StreamObserver responseObserver) { - ServiceOverride serviceOverride = whiteboard.getServiceOverride( - getShowEntity(request.getShow()), - request.getName()); - responseObserver.onNext(ShowGetServiceOverrideResponse.newBuilder() - .setServiceOverride(serviceOverride) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void setCommentEmail(ShowSetCommentEmailRequest request, StreamObserver responseObserver) { - adminManager.updateShowCommentEmail( - getShowEntity(request.getShow()), - request.getEmail().split(",")); - responseObserver.onNext(ShowSetCommentEmailResponse.newBuilder().build()); - } - - public AdminManager getAdminManager() { - return adminManager; - } - - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public ShowDao getShowDao() { - return showDao; - } - - public void setShowDao(ShowDao showDao) { - this.showDao = showDao; - } - - public FilterManager getFilterManager() { - return filterManager; - } - - public void setFilterManager(FilterManager filterManager) { - this.filterManager = filterManager; - } - - public DepartmentManager getDepartmentManager() { - return departmentManager; - } - - public void setDepartmentManager(DepartmentManager departmentManager) { - this.departmentManager = departmentManager; - } - - public OwnerManager getOwnerManager() { - return ownerManager; - } - - public void setOwnerManager(OwnerManager ownerManager) { - this.ownerManager = ownerManager; - } - - public ServiceManager getServiceManager() { - return serviceManager; - } - - public void setServiceManager(ServiceManager serviceManager) { - this.serviceManager = serviceManager; - } - - private ShowEntity getShowEntity(Show show) { - return adminManager.getShowEntity(show.getId()); - } - - public JobSearchFactory getJobSearchFactory() { - return jobSearchFactory; - } - - public void setJobSearchFactory(JobSearchFactory jobSearchFactory) { - this.jobSearchFactory = jobSearchFactory; - } + private AdminManager adminManager; + private Whiteboard whiteboard; + private ShowDao showDao; + private DepartmentManager departmentManager; + private FilterManager filterManager; + private OwnerManager ownerManager; + private ServiceManager serviceManager; + private JobSearchFactory jobSearchFactory; + + @Override + public void createShow(ShowCreateShowRequest request, + StreamObserver responseObserver) { + try { + ShowEntity show = new ShowEntity(); + show.name = request.getName(); + adminManager.createShow(show); + responseObserver.onNext(ShowCreateShowResponse.newBuilder() + .setShow(whiteboard.getShow(show.getShowId())).build()); + responseObserver.onCompleted(); + } catch (Exception e) { + responseObserver + .onError(Status.INTERNAL.withDescription("Show could not be created." + e.getMessage()) + .withCause(e).asRuntimeException()); + } + } + + @Override + public void findShow(ShowFindShowRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(ShowFindShowResponse.newBuilder() + .setShow(whiteboard.findShow(request.getName())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + } + } + + @Override + public void getActiveShows(ShowGetActiveShowsRequest request, + StreamObserver responseObserver) { + responseObserver.onNext( + ShowGetActiveShowsResponse.newBuilder().setShows(whiteboard.getActiveShows()).build()); + responseObserver.onCompleted(); + } + + @Override + public void getShows(ShowGetShowsRequest request, + StreamObserver responseObserver) { + responseObserver + .onNext(ShowGetShowsResponse.newBuilder().setShows(whiteboard.getShows()).build()); + responseObserver.onCompleted(); + } + + @Override + public void getFilters(ShowGetFiltersRequest request, + StreamObserver responseObserver) { + FilterSeq filterSeq = whiteboard.getFilters(getShowEntity(request.getShow())); + ShowGetFiltersResponse response = + ShowGetFiltersResponse.newBuilder().setFilters(filterSeq).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getSubscriptions(ShowGetSubscriptionRequest request, + StreamObserver responseObserver) { + SubscriptionSeq subscriptionSeq = whiteboard.getSubscriptions(getShowEntity(request.getShow())); + ShowGetSubscriptionResponse response = + ShowGetSubscriptionResponse.newBuilder().setSubscriptions(subscriptionSeq).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getRootGroup(ShowGetRootGroupRequest request, + StreamObserver responseObserver) { + Group rootGroup = whiteboard.getRootGroup(getShowEntity(request.getShow())); + ShowGetRootGroupResponse response = + ShowGetRootGroupResponse.newBuilder().setGroup(rootGroup).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void createSubscription(ShowCreateSubscriptionRequest request, + StreamObserver responseObserver) { + AllocationEntity allocationEntity = adminManager.getAllocationDetail(request.getAllocationId()); + SubscriptionInterface s = adminManager.createSubscription(getShowEntity(request.getShow()), + allocationEntity, Convert.coresToCoreUnits(request.getSize()), + Convert.coresToCoreUnits(request.getBurst())); + Subscription subscription = whiteboard.getSubscription(s.getSubscriptionId()); + ShowCreateSubscriptionResponse response = + ShowCreateSubscriptionResponse.newBuilder().setSubscription(subscription).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getGroups(ShowGetGroupsRequest request, + StreamObserver responseObserver) { + GroupSeq groupSeq = whiteboard.getGroups(getShowEntity(request.getShow())); + ShowGetGroupsResponse response = ShowGetGroupsResponse.newBuilder().setGroups(groupSeq).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getJobWhiteboard(ShowGetJobWhiteboardRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + ShowGetJobWhiteboardResponse response = ShowGetJobWhiteboardResponse.newBuilder() + .setWhiteboard(whiteboard.getJobWhiteboard(show)).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getJobs(ShowGetJobsRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + JobSeq jobSeq = whiteboard.getJobs(jobSearchFactory.create(show)); + ShowGetJobsResponse response = ShowGetJobsResponse.newBuilder().setJobs(jobSeq).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultMaxCores(ShowSetDefaultMaxCoresRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + showDao.updateShowDefaultMaxCores(show, Convert.coresToWholeCoreUnits(request.getMaxCores())); + responseObserver.onNext(ShowSetDefaultMaxCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultMinCores(ShowSetDefaultMinCoresRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + showDao.updateShowDefaultMinCores(show, Convert.coresToWholeCoreUnits(request.getMinCores())); + responseObserver.onNext(ShowSetDefaultMinCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultMaxGpus(ShowSetDefaultMaxGpusRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + showDao.updateShowDefaultMaxGpus(show, request.getMaxGpus()); + responseObserver.onNext(ShowSetDefaultMaxGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultMinGpus(ShowSetDefaultMinGpusRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + showDao.updateShowDefaultMinGpus(show, request.getMinGpus()); + responseObserver.onNext(ShowSetDefaultMinGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void findFilter(ShowFindFilterRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + ShowFindFilterResponse response = ShowFindFilterResponse.newBuilder() + .setFilter(whiteboard.findFilter(show, request.getName())).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void createFilter(ShowCreateFilterRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + FilterEntity filter = new FilterEntity(); + filter.name = request.getName(); + filter.showId = show.id; + filter.type = FilterType.MATCH_ALL; + filter.order = 0; + filterManager.createFilter(filter); + ShowCreateFilterResponse response = ShowCreateFilterResponse.newBuilder() + .setFilter(whiteboard.findFilter(show, request.getName())).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getDepartment(ShowGetDepartmentRequest request, + StreamObserver responseObserver) { + ShowGetDepartmentResponse response = ShowGetDepartmentResponse.newBuilder() + .setDepartment( + whiteboard.getDepartment(getShowEntity(request.getShow()), request.getDepartment())) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getDepartments(ShowGetDepartmentsRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + ShowGetDepartmentsResponse response = ShowGetDepartmentsResponse.newBuilder() + .setDepartments(whiteboard.getDepartments(show)).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void enableBooking(ShowEnableBookingRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + showDao.updateBookingEnabled(show, request.getEnabled()); + responseObserver.onNext(ShowEnableBookingResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void enableDispatching(ShowEnableDispatchingRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + showDao.updateDispatchingEnabled(show, request.getEnabled()); + responseObserver.onNext(ShowEnableDispatchingResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void getDeeds(ShowGetDeedsRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + responseObserver + .onNext(ShowGetDeedsResponse.newBuilder().setDeeds(whiteboard.getDeeds(show)).build()); + responseObserver.onCompleted(); + } + + @Override + public void createOwner(ShowCreateOwnerRequest request, + StreamObserver responseObserver) { + ownerManager.createOwner(request.getName(), getShowEntity(request.getShow())); + ShowCreateOwnerResponse response = ShowCreateOwnerResponse.newBuilder() + .setOwner(whiteboard.getOwner(request.getName())).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void setActive(ShowSetActiveRequest request, + StreamObserver responseObserver) { + adminManager.setShowActive(getShowEntity(request.getShow()), request.getValue()); + responseObserver.onNext(ShowSetActiveResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void createServiceOverride(ShowCreateServiceOverrideRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + Service requestService = request.getService(); + ServiceOverrideEntity service = new ServiceOverrideEntity(); + service.showId = show.getId(); + service.name = requestService.getName(); + service.minCores = requestService.getMinCores(); + service.maxCores = requestService.getMaxCores(); + service.minMemory = requestService.getMinMemory(); + service.minGpus = requestService.getMinGpus(); + service.maxGpus = requestService.getMaxGpus(); + service.minGpuMemory = requestService.getMinGpuMemory(); + service.tags = Sets.newLinkedHashSet(requestService.getTagsList()); + service.threadable = requestService.getThreadable(); + service.minMemoryIncrease = requestService.getMinMemoryIncrease(); + serviceManager.createService(service); + ServiceOverride serviceOverride = whiteboard.getServiceOverride(show, service.name); + responseObserver.onNext( + ShowCreateServiceOverrideResponse.newBuilder().setServiceOverride(serviceOverride).build()); + responseObserver.onCompleted(); + } + + @Override + public void getServiceOverrides(ShowGetServiceOverridesRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + responseObserver.onNext(ShowGetServiceOverridesResponse.newBuilder() + .setServiceOverrides(whiteboard.getServiceOverrides(show)).build()); + responseObserver.onCompleted(); + } + + @Override + public void delete(ShowDeleteRequest request, + StreamObserver responseObserver) { + showDao.delete(getShowEntity(request.getShow())); + responseObserver.onNext(ShowDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void getServiceOverride(ShowGetServiceOverrideRequest request, + StreamObserver responseObserver) { + ServiceOverride serviceOverride = + whiteboard.getServiceOverride(getShowEntity(request.getShow()), request.getName()); + responseObserver.onNext( + ShowGetServiceOverrideResponse.newBuilder().setServiceOverride(serviceOverride).build()); + responseObserver.onCompleted(); + } + + @Override + public void setCommentEmail(ShowSetCommentEmailRequest request, + StreamObserver responseObserver) { + adminManager.updateShowCommentEmail(getShowEntity(request.getShow()), + request.getEmail().split(",")); + responseObserver.onNext(ShowSetCommentEmailResponse.newBuilder().build()); + } + + public AdminManager getAdminManager() { + return adminManager; + } + + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public ShowDao getShowDao() { + return showDao; + } + + public void setShowDao(ShowDao showDao) { + this.showDao = showDao; + } + + public FilterManager getFilterManager() { + return filterManager; + } + + public void setFilterManager(FilterManager filterManager) { + this.filterManager = filterManager; + } + + public DepartmentManager getDepartmentManager() { + return departmentManager; + } + + public void setDepartmentManager(DepartmentManager departmentManager) { + this.departmentManager = departmentManager; + } + + public OwnerManager getOwnerManager() { + return ownerManager; + } + + public void setOwnerManager(OwnerManager ownerManager) { + this.ownerManager = ownerManager; + } + + public ServiceManager getServiceManager() { + return serviceManager; + } + + public void setServiceManager(ServiceManager serviceManager) { + this.serviceManager = serviceManager; + } + + private ShowEntity getShowEntity(Show show) { + return adminManager.getShowEntity(show.getId()); + } + + public JobSearchFactory getJobSearchFactory() { + return jobSearchFactory; + } + + public void setJobSearchFactory(JobSearchFactory jobSearchFactory) { + this.jobSearchFactory = jobSearchFactory; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageSubscription.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageSubscription.java index 2f6119d5b..bf907da59 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageSubscription.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageSubscription.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.servant; import io.grpc.Status; @@ -41,97 +38,89 @@ import com.imageworks.spcue.service.Whiteboard; import com.imageworks.spcue.util.Convert; - public class ManageSubscription extends SubscriptionInterfaceGrpc.SubscriptionInterfaceImplBase { - private AdminManager adminManager; - private Whiteboard whiteboard; - - @Override - public void delete(SubscriptionDeleteRequest request, StreamObserver responseObserver) { - adminManager.deleteSubscription( - getSubscriptionDetail(request.getSubscription()) - ); - responseObserver.onNext(SubscriptionDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); + private AdminManager adminManager; + private Whiteboard whiteboard; + + @Override + public void delete(SubscriptionDeleteRequest request, + StreamObserver responseObserver) { + adminManager.deleteSubscription(getSubscriptionDetail(request.getSubscription())); + responseObserver.onNext(SubscriptionDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void find(SubscriptionFindRequest request, + StreamObserver responseObserver) throws CueGrpcException { + String name = request.getName(); + try { + String[] parts = name.split("\\.", 3); + if (parts.length != 3) { + throw new CueGrpcException("Subscription names must be in the form of alloc.show"); + } + SubscriptionFindResponse response = SubscriptionFindResponse.newBuilder() + .setSubscription(whiteboard.findSubscription(parts[2], parts[0] + "." + parts[1])) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver + .onError(Status.NOT_FOUND.withDescription("A subscription to " + name + " was not found.") + .withCause(e).asRuntimeException()); } - - @Override - public void find(SubscriptionFindRequest request, StreamObserver responseObserver) - throws CueGrpcException{ - String name = request.getName(); - try { - String[] parts = name.split("\\.", 3); - if (parts.length != 3) { - throw new CueGrpcException("Subscription names must be in the form of alloc.show"); - } - SubscriptionFindResponse response = SubscriptionFindResponse.newBuilder() - .setSubscription(whiteboard.findSubscription(parts[2], parts[0] + "." + parts[1])) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription("A subscription to " + name + " was not found.") - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void get(SubscriptionGetRequest request, StreamObserver responseObserver) { - try { - SubscriptionGetResponse response = SubscriptionGetResponse.newBuilder() - .setSubscription(whiteboard.getSubscription(request.getId())) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.NOT_FOUND - .withDescription(e.getMessage()) - .withCause(e) - .asRuntimeException()); - } - } - - @Override - public void setBurst(SubscriptionSetBurstRequest request, - StreamObserver responseObserver) { - adminManager.setSubscriptionBurst( - getSubscriptionDetail(request.getSubscription()), - request.getBurst()); - responseObserver.onNext(SubscriptionSetBurstResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setSize(SubscriptionSetSizeRequest request, - StreamObserver responseObserver) { - adminManager.setSubscriptionSize( - getSubscriptionDetail(request.getSubscription()), - request.getNewSize()); - responseObserver.onNext(SubscriptionSetSizeResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public AdminManager getAdminManager() { - return adminManager; - } - - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - private SubscriptionEntity getSubscriptionDetail(Subscription subscription) { - return adminManager.getSubscriptionDetail(subscription.getId()); + } + + @Override + public void get(SubscriptionGetRequest request, + StreamObserver responseObserver) { + try { + SubscriptionGetResponse response = SubscriptionGetResponse.newBuilder() + .setSubscription(whiteboard.getSubscription(request.getId())).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); } + } + + @Override + public void setBurst(SubscriptionSetBurstRequest request, + StreamObserver responseObserver) { + adminManager.setSubscriptionBurst(getSubscriptionDetail(request.getSubscription()), + request.getBurst()); + responseObserver.onNext(SubscriptionSetBurstResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setSize(SubscriptionSetSizeRequest request, + StreamObserver responseObserver) { + adminManager.setSubscriptionSize(getSubscriptionDetail(request.getSubscription()), + request.getNewSize()); + responseObserver.onNext(SubscriptionSetSizeResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public AdminManager getAdminManager() { + return adminManager; + } + + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + private SubscriptionEntity getSubscriptionDetail(Subscription subscription) { + return adminManager.getSubscriptionDetail(subscription.getId()); + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageTask.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageTask.java index e43690829..4c75824e6 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageTask.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageTask.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.servant; import io.grpc.stub.StreamObserver; @@ -32,52 +29,47 @@ import com.imageworks.spcue.service.DepartmentManager; import com.imageworks.spcue.util.Convert; - public class ManageTask extends TaskInterfaceGrpc.TaskInterfaceImplBase { - private DepartmentManager departmentManager; + private DepartmentManager departmentManager; - @Override - public void delete(TaskDeleteRequest request, StreamObserver responseObserver) { - departmentManager.removeTask( - getTaskDetail(request.getTask()) - ); - TaskDeleteResponse response = TaskDeleteResponse.newBuilder().build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } + @Override + public void delete(TaskDeleteRequest request, + StreamObserver responseObserver) { + departmentManager.removeTask(getTaskDetail(request.getTask())); + TaskDeleteResponse response = TaskDeleteResponse.newBuilder().build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } - @Override - public void setMinCores(TaskSetMinCoresRequest request, StreamObserver responseObserver) { - departmentManager.setMinCores( - getTaskDetail(request.getTask()), - Convert.coresToWholeCoreUnits(request.getNewMinCores()) - ); - TaskSetMinCoresResponse response = TaskSetMinCoresResponse.newBuilder().build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } + @Override + public void setMinCores(TaskSetMinCoresRequest request, + StreamObserver responseObserver) { + departmentManager.setMinCores(getTaskDetail(request.getTask()), + Convert.coresToWholeCoreUnits(request.getNewMinCores())); + TaskSetMinCoresResponse response = TaskSetMinCoresResponse.newBuilder().build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } - @Override - public void clearAdjustments(TaskClearAdjustmentsRequest request, - StreamObserver responseObserver) { - departmentManager.clearTaskAdjustment( - getTaskDetail(request.getTask()) - ); - TaskClearAdjustmentsResponse response = TaskClearAdjustmentsResponse.newBuilder().build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } + @Override + public void clearAdjustments(TaskClearAdjustmentsRequest request, + StreamObserver responseObserver) { + departmentManager.clearTaskAdjustment(getTaskDetail(request.getTask())); + TaskClearAdjustmentsResponse response = TaskClearAdjustmentsResponse.newBuilder().build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } - public DepartmentManager getDepartmentManager() { - return departmentManager; - } + public DepartmentManager getDepartmentManager() { + return departmentManager; + } - public void setDepartmentManager(DepartmentManager departmentManager) { - this.departmentManager = departmentManager; - } + public void setDepartmentManager(DepartmentManager departmentManager) { + this.departmentManager = departmentManager; + } - private TaskEntity getTaskDetail(Task task) { - return departmentManager.getTaskDetail(task.getName()); - } + private TaskEntity getTaskDetail(Task task) { + return departmentManager.getTaskDetail(task.getName()); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/RqdReportStatic.java b/cuebot/src/main/java/com/imageworks/spcue/servant/RqdReportStatic.java index 4e7ce7094..e827cc5f6 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/RqdReportStatic.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/RqdReportStatic.java @@ -1,5 +1,4 @@ - package com.imageworks.spcue.servant; import io.grpc.stub.StreamObserver; @@ -14,50 +13,50 @@ import com.imageworks.spcue.grpc.report.RqdReportStatusRequest; import com.imageworks.spcue.grpc.report.RqdReportStatusResponse; - public class RqdReportStatic extends RqdReportInterfaceGrpc.RqdReportInterfaceImplBase { - private FrameCompleteHandler frameCompleteHandler; - private HostReportHandler hostReportHandler; - - @SuppressWarnings("unused") - - @Override - public void reportRqdStartup(RqdReportRqdStartupRequest request, - StreamObserver responseObserver) { - hostReportHandler.queueBootReport(request.getBootReport()); - responseObserver.onNext(RqdReportRqdStartupResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void reportRunningFrameCompletion(RqdReportRunningFrameCompletionRequest request, - StreamObserver responseObserver) { - frameCompleteHandler.handleFrameCompleteReport(request.getFrameCompleteReport()); - responseObserver.onNext(RqdReportRunningFrameCompletionResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void reportStatus(RqdReportStatusRequest request, StreamObserver responseObserver) { - hostReportHandler.queueHostReport(request.getHostReport()); - responseObserver.onNext(RqdReportStatusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public FrameCompleteHandler getFrameCompleteHandler() { - return frameCompleteHandler; - } - - public void setFrameCompleteHandler(FrameCompleteHandler frameCompleteHandler) { - this.frameCompleteHandler = frameCompleteHandler; - } - - public HostReportHandler getHostReportHandler() { - return hostReportHandler; - } - - public void setHostReportHandler(HostReportHandler hostReportHandler) { - this.hostReportHandler = hostReportHandler; - } + private FrameCompleteHandler frameCompleteHandler; + private HostReportHandler hostReportHandler; + + @SuppressWarnings("unused") + + @Override + public void reportRqdStartup(RqdReportRqdStartupRequest request, + StreamObserver responseObserver) { + hostReportHandler.queueBootReport(request.getBootReport()); + responseObserver.onNext(RqdReportRqdStartupResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void reportRunningFrameCompletion(RqdReportRunningFrameCompletionRequest request, + StreamObserver responseObserver) { + frameCompleteHandler.handleFrameCompleteReport(request.getFrameCompleteReport()); + responseObserver.onNext(RqdReportRunningFrameCompletionResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void reportStatus(RqdReportStatusRequest request, + StreamObserver responseObserver) { + hostReportHandler.queueHostReport(request.getHostReport()); + responseObserver.onNext(RqdReportStatusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public FrameCompleteHandler getFrameCompleteHandler() { + return frameCompleteHandler; + } + + public void setFrameCompleteHandler(FrameCompleteHandler frameCompleteHandler) { + this.frameCompleteHandler = frameCompleteHandler; + } + + public HostReportHandler getHostReportHandler() { + return hostReportHandler; + } + + public void setHostReportHandler(HostReportHandler hostReportHandler) { + this.hostReportHandler = hostReportHandler; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ServantUtil.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ServantUtil.java index a88c8e87d..331299a38 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ServantUtil.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ServantUtil.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.servant; import java.util.ArrayList; @@ -37,40 +33,58 @@ public class ServantUtil { - public static List convertLayerFilterList(LayerSeq layers) { - final List result = new ArrayList(); - for (final Layer layer: layers.getLayersList()) { - final String id = layer.getId(); - result.add(new LayerInterface() { - String _id = id; - public String getLayerId() { return _id; } - public String getJobId() { throw new RuntimeException("not implemented"); } - public String getShowId() { throw new RuntimeException("not implemented"); } - public String getId() { return _id; } - public String getName() { throw new RuntimeException("not implemented"); } - public String getFacilityId() { throw new RuntimeException("not implemented"); } - }); + public static List convertLayerFilterList(LayerSeq layers) { + final List result = new ArrayList(); + for (final Layer layer : layers.getLayersList()) { + final String id = layer.getId(); + result.add(new LayerInterface() { + String _id = id; + + public String getLayerId() { + return _id; } - return result; - } - private static boolean isJobFinished(Environment env, String property, JobManager jobManager, JobInterface job) { - if (env.getProperty(property, String.class) != null && - Objects.equals(env.getProperty(property, String.class), "true")) { - JobDetail jobDetail = jobManager.getJobDetail(job.getJobId()); - return jobDetail.state == JobState.FINISHED; + public String getJobId() { + throw new RuntimeException("not implemented"); + } + + public String getShowId() { + throw new RuntimeException("not implemented"); + } + + public String getId() { + return _id; + } + + public String getName() { + throw new RuntimeException("not implemented"); } - return false; - } - public static boolean attemptChange(Environment env, String property, JobManager jobManager, JobInterface job, StreamObserver responseObserver) { - if (ServantUtil.isJobFinished(env, property, jobManager, job)) { - responseObserver.onError(Status.FAILED_PRECONDITION - .withDescription("Finished jobs are readonly") - .asRuntimeException()); - return false; + public String getFacilityId() { + throw new RuntimeException("not implemented"); } - return true; + }); } -} + return result; + } + private static boolean isJobFinished(Environment env, String property, JobManager jobManager, + JobInterface job) { + if (env.getProperty(property, String.class) != null + && Objects.equals(env.getProperty(property, String.class), "true")) { + JobDetail jobDetail = jobManager.getJobDetail(job.getJobId()); + return jobDetail.state == JobState.FINISHED; + } + return false; + } + + public static boolean attemptChange(Environment env, String property, JobManager jobManager, + JobInterface job, StreamObserver responseObserver) { + if (ServantUtil.isJobFinished(env, property, jobManager, job)) { + responseObserver.onError(Status.FAILED_PRECONDITION + .withDescription("Finished jobs are readonly").asRuntimeException()); + return false; + } + return true; + } +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/AdminManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/AdminManager.java index 755b39317..306518c6f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/AdminManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/AdminManager.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import com.imageworks.spcue.AllocationEntity; @@ -31,66 +27,99 @@ public interface AdminManager { - /* - * Shows - */ - boolean showExists(String name); - void createShow(ShowEntity show); - ShowEntity findShowEntity(String name); - ShowEntity getShowEntity(String id); - void setShowActive(ShowInterface show, boolean value); - void updateShowCommentEmail(ShowInterface s, String[] emails); - void updateShowsStatus(); - - /* - * Facilities - */ - FacilityInterface createFacility(String name); - void deleteFacility(FacilityInterface facility); - void setFacilityName(FacilityInterface facility, String name); - FacilityInterface getFacility(String id); - FacilityInterface getDefaultFacility(); - /* - * Allocations - */ - void createAllocation(FacilityInterface facility, AllocationEntity alloc); - void deleteAllocation(AllocationInterface alloc); - void setAllocationName(AllocationInterface a, String name); - void setAllocationTag(AllocationInterface a, String tag); - AllocationEntity getDefaultAllocation(); - void setDefaultAllocation(AllocationInterface a); - AllocationEntity findAllocationDetail(String facility, String name); - AllocationEntity getAllocationDetail(String id); - void setAllocationBillable(AllocationInterface alloc, boolean value); - - /* - * Subscriptions - */ - SubscriptionInterface createSubscription(ShowInterface show, AllocationInterface alloc, int size, int burst); - SubscriptionInterface createSubscription(SubscriptionEntity sub); - void deleteSubscription(SubscriptionInterface sub); - void setSubscriptionBurst(SubscriptionInterface sub, int burst); - void setSubscriptionSize(SubscriptionInterface sub, int size); - SubscriptionEntity getSubscriptionDetail(String id); - - /* - * Departments - */ - DepartmentInterface findDepartment(String name); - DepartmentInterface getDefaultDepartment(); - DepartmentInterface getDepartment(DepartmentInterface d); - DepartmentInterface createDepartment(String name); - void removeDepartment(DepartmentInterface d); - - /* - * Limits - */ - String createLimit(String name, int maxValue); - void deleteLimit(LimitInterface limit); - LimitInterface findLimit(String name); - LimitInterface getLimit(String id); - void setLimitName(LimitInterface limit, String name); - void setLimitMaxValue(LimitInterface limit, int maxValue); + /* + * Shows + */ + boolean showExists(String name); -} + void createShow(ShowEntity show); + + ShowEntity findShowEntity(String name); + + ShowEntity getShowEntity(String id); + + void setShowActive(ShowInterface show, boolean value); + + void updateShowCommentEmail(ShowInterface s, String[] emails); + + void updateShowsStatus(); + + /* + * Facilities + */ + FacilityInterface createFacility(String name); + + void deleteFacility(FacilityInterface facility); + + void setFacilityName(FacilityInterface facility, String name); + + FacilityInterface getFacility(String id); + + FacilityInterface getDefaultFacility(); + + /* + * Allocations + */ + void createAllocation(FacilityInterface facility, AllocationEntity alloc); + + void deleteAllocation(AllocationInterface alloc); + + void setAllocationName(AllocationInterface a, String name); + + void setAllocationTag(AllocationInterface a, String tag); + + AllocationEntity getDefaultAllocation(); + + void setDefaultAllocation(AllocationInterface a); + + AllocationEntity findAllocationDetail(String facility, String name); + AllocationEntity getAllocationDetail(String id); + + void setAllocationBillable(AllocationInterface alloc, boolean value); + + /* + * Subscriptions + */ + SubscriptionInterface createSubscription(ShowInterface show, AllocationInterface alloc, int size, + int burst); + + SubscriptionInterface createSubscription(SubscriptionEntity sub); + + void deleteSubscription(SubscriptionInterface sub); + + void setSubscriptionBurst(SubscriptionInterface sub, int burst); + + void setSubscriptionSize(SubscriptionInterface sub, int size); + + SubscriptionEntity getSubscriptionDetail(String id); + + /* + * Departments + */ + DepartmentInterface findDepartment(String name); + + DepartmentInterface getDefaultDepartment(); + + DepartmentInterface getDepartment(DepartmentInterface d); + + DepartmentInterface createDepartment(String name); + + void removeDepartment(DepartmentInterface d); + + /* + * Limits + */ + String createLimit(String name, int maxValue); + + void deleteLimit(LimitInterface limit); + + LimitInterface findLimit(String name); + + LimitInterface getLimit(String id); + + void setLimitName(LimitInterface limit, String name); + + void setLimitMaxValue(LimitInterface limit, int maxValue); + +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/AdminManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/AdminManagerService.java index 8f1f66133..30d67f9af 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/AdminManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/AdminManagerService.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import org.apache.logging.log4j.Logger; @@ -46,293 +42,292 @@ @Transactional public class AdminManagerService implements AdminManager { - @SuppressWarnings("unused") - private static final Logger logger = LogManager.getLogger(AdminManagerService.class); + @SuppressWarnings("unused") + private static final Logger logger = LogManager.getLogger(AdminManagerService.class); - private ShowDao showDao; + private ShowDao showDao; - private AllocationDao allocationDao; - - private SubscriptionDao subscriptionDao; - - private DepartmentDao departmentDao; - - private FacilityDao facilityDao; - - private GroupManager groupManager; - - private LimitDao limitDao; - - public void setShowActive(ShowInterface show, boolean value) { - showDao.updateActive(show, value); - } - - public boolean showExists(String name) { - return showDao.showExists(name); - } - - public void createShow(ShowEntity show) { - - show.name = JobSpec.conformShowName(show.name); - - DepartmentInterface dept = getDefaultDepartment(); - showDao.insertShow(show); - - /* - * This is for the show's default group - */ - GroupDetail newGroup = new GroupDetail(); - newGroup.name = show.getName(); - newGroup.parentId = null; - newGroup.showId = show.getShowId(); - newGroup.deptId = dept.getId(); - groupManager.createGroup(newGroup, null); - } - - @Override - public void createAllocation(FacilityInterface facility, AllocationEntity alloc) { - allocationDao.insertAllocation(facility, alloc); - } - - public void deleteAllocation(AllocationInterface alloc) { - allocationDao.deleteAllocation(alloc); - } - - public void setAllocationName(AllocationInterface a, String name) { - allocationDao.updateAllocationName(a, name); - } - - @Transactional(propagation = Propagation.NEVER) - public void setAllocationTag(AllocationInterface a, String tag) { - allocationDao.updateAllocationTag(a, tag); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public AllocationEntity getDefaultAllocation() { - return allocationDao.getDefaultAllocationEntity(); - } - - @Override - public void setDefaultAllocation(AllocationInterface a) { - allocationDao.setDefaultAllocation(a); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public ShowEntity findShowEntity(String name) { - return showDao.findShowDetail(name); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public ShowEntity getShowEntity(String id) { - return showDao.getShowDetail(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void updateShowCommentEmail(ShowInterface s, String[] emails) { - showDao.updateShowCommentEmail(s, emails); - } - - @Override - public void updateShowsStatus() { - showDao.updateShowsStatus(); - } - - public SubscriptionInterface createSubscription(SubscriptionEntity sub) { - subscriptionDao.insertSubscription(sub); - return sub; - } - - public SubscriptionInterface createSubscription(ShowInterface show, AllocationInterface alloc, - int size, int burst) { - SubscriptionEntity s = new SubscriptionEntity(); - s.size = size; - s.burst = burst; - s.showId = show.getShowId(); - s.allocationId = alloc.getAllocationId(); - subscriptionDao.insertSubscription(s); - return s; - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public AllocationEntity findAllocationDetail(String facility, String name) { - return allocationDao.findAllocationEntity(facility, name); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public AllocationEntity getAllocationDetail(String id) { - return allocationDao.getAllocationEntity(id); - } - - public void deleteSubscription(SubscriptionInterface sub) { - subscriptionDao.deleteSubscription(sub); - } - - public void setSubscriptionBurst(SubscriptionInterface sub, int burst) { - subscriptionDao.updateSubscriptionBurst(sub, burst); - } - - public void setSubscriptionSize(SubscriptionInterface sub, int size) { - subscriptionDao.updateSubscriptionSize(sub, size); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public SubscriptionEntity getSubscriptionDetail(String id) { - return subscriptionDao.getSubscriptionDetail(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public DepartmentInterface findDepartment(String name) { - return departmentDao.findDepartment(name); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public DepartmentInterface getDefaultDepartment() { - return departmentDao.getDefaultDepartment(); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public DepartmentInterface getDepartment(DepartmentInterface d) { - return departmentDao.getDepartment(d.getDepartmentId()); - } - - @Override - public DepartmentInterface createDepartment(String name) { - departmentDao.insertDepartment(name); - return findDepartment(name); - } - - @Override - public void removeDepartment(DepartmentInterface d) { - departmentDao.deleteDepartment(d); - } - - @Override - public FacilityInterface createFacility(String name) { - FacilityEntity facility = new FacilityEntity(); - facility.name = name; - return facilityDao.insertFacility(facility); - } - - @Override - public void deleteFacility(FacilityInterface facility) { - facilityDao.deleteFacility(facility); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public FacilityInterface getFacility(String id) { - return facilityDao.getFacility(id); - } - - @Override - public void setFacilityName(FacilityInterface facility, String name) { - facilityDao.updateFacilityName(facility, name); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public FacilityInterface getDefaultFacility() { - return facilityDao.getDefaultFacility(); - } - - @Override - public void setAllocationBillable(AllocationInterface alloc, boolean value) { - allocationDao.updateAllocationBillable(alloc, value); - } - - @Override - public String createLimit(String name, int maxValue) { - return limitDao.createLimit(name, maxValue); - } - - public void deleteLimit(LimitInterface limit) { - limitDao.deleteLimit(limit); - } - - @Override - public LimitInterface findLimit(String name) { - return limitDao.findLimit(name); - } - - @Override - public LimitInterface getLimit(String id){ - return limitDao.getLimit(id); - } - - @Override - public void setLimitName(LimitInterface limit, String name){ - limitDao.setLimitName(limit, name); - } - - @Override - public void setLimitMaxValue(LimitInterface limit, int maxValue) { - limitDao.setMaxValue(limit, maxValue); - } - - public AllocationDao getAllocationDao() { - return allocationDao; - } - - public void setAllocationDao(AllocationDao allocationDao) { - this.allocationDao = allocationDao; - } - - public ShowDao getShowDao() { - return showDao; - } - - public void setShowDao(ShowDao showDao) { - this.showDao = showDao; - } - - public SubscriptionDao getSubscriptionDao() { - return subscriptionDao; - } - - public void setSubscriptionDao(SubscriptionDao subscriptionDao) { - this.subscriptionDao = subscriptionDao; - } - - public DepartmentDao getDepartmentDao() { - return departmentDao; - } - - public void setDepartmentDao(DepartmentDao departmentDao) { - this.departmentDao = departmentDao; - } - - public GroupManager getGroupManager() { - return groupManager; - } - - public void setGroupManager(GroupManager groupManager) { - this.groupManager = groupManager; - } - - public FacilityDao getFacilityDao() { - return facilityDao; - } - - public void setFacilityDao(FacilityDao facilityDao) { - this.facilityDao = facilityDao; - } - - public LimitDao getLimitDao() { - return limitDao; - } - - public void setLimitDao(LimitDao limitDao) { - this.limitDao = limitDao; - } + private AllocationDao allocationDao; + + private SubscriptionDao subscriptionDao; + + private DepartmentDao departmentDao; + + private FacilityDao facilityDao; + + private GroupManager groupManager; + + private LimitDao limitDao; + + public void setShowActive(ShowInterface show, boolean value) { + showDao.updateActive(show, value); + } + + public boolean showExists(String name) { + return showDao.showExists(name); + } + + public void createShow(ShowEntity show) { + + show.name = JobSpec.conformShowName(show.name); + + DepartmentInterface dept = getDefaultDepartment(); + showDao.insertShow(show); + + /* + * This is for the show's default group + */ + GroupDetail newGroup = new GroupDetail(); + newGroup.name = show.getName(); + newGroup.parentId = null; + newGroup.showId = show.getShowId(); + newGroup.deptId = dept.getId(); + groupManager.createGroup(newGroup, null); + } + + @Override + public void createAllocation(FacilityInterface facility, AllocationEntity alloc) { + allocationDao.insertAllocation(facility, alloc); + } + + public void deleteAllocation(AllocationInterface alloc) { + allocationDao.deleteAllocation(alloc); + } + + public void setAllocationName(AllocationInterface a, String name) { + allocationDao.updateAllocationName(a, name); + } + + @Transactional(propagation = Propagation.NEVER) + public void setAllocationTag(AllocationInterface a, String tag) { + allocationDao.updateAllocationTag(a, tag); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public AllocationEntity getDefaultAllocation() { + return allocationDao.getDefaultAllocationEntity(); + } + + @Override + public void setDefaultAllocation(AllocationInterface a) { + allocationDao.setDefaultAllocation(a); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public ShowEntity findShowEntity(String name) { + return showDao.findShowDetail(name); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public ShowEntity getShowEntity(String id) { + return showDao.getShowDetail(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void updateShowCommentEmail(ShowInterface s, String[] emails) { + showDao.updateShowCommentEmail(s, emails); + } + + @Override + public void updateShowsStatus() { + showDao.updateShowsStatus(); + } + + public SubscriptionInterface createSubscription(SubscriptionEntity sub) { + subscriptionDao.insertSubscription(sub); + return sub; + } + + public SubscriptionInterface createSubscription(ShowInterface show, AllocationInterface alloc, + int size, int burst) { + SubscriptionEntity s = new SubscriptionEntity(); + s.size = size; + s.burst = burst; + s.showId = show.getShowId(); + s.allocationId = alloc.getAllocationId(); + subscriptionDao.insertSubscription(s); + return s; + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public AllocationEntity findAllocationDetail(String facility, String name) { + return allocationDao.findAllocationEntity(facility, name); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public AllocationEntity getAllocationDetail(String id) { + return allocationDao.getAllocationEntity(id); + } + + public void deleteSubscription(SubscriptionInterface sub) { + subscriptionDao.deleteSubscription(sub); + } + + public void setSubscriptionBurst(SubscriptionInterface sub, int burst) { + subscriptionDao.updateSubscriptionBurst(sub, burst); + } + + public void setSubscriptionSize(SubscriptionInterface sub, int size) { + subscriptionDao.updateSubscriptionSize(sub, size); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public SubscriptionEntity getSubscriptionDetail(String id) { + return subscriptionDao.getSubscriptionDetail(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public DepartmentInterface findDepartment(String name) { + return departmentDao.findDepartment(name); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public DepartmentInterface getDefaultDepartment() { + return departmentDao.getDefaultDepartment(); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public DepartmentInterface getDepartment(DepartmentInterface d) { + return departmentDao.getDepartment(d.getDepartmentId()); + } + + @Override + public DepartmentInterface createDepartment(String name) { + departmentDao.insertDepartment(name); + return findDepartment(name); + } + + @Override + public void removeDepartment(DepartmentInterface d) { + departmentDao.deleteDepartment(d); + } + + @Override + public FacilityInterface createFacility(String name) { + FacilityEntity facility = new FacilityEntity(); + facility.name = name; + return facilityDao.insertFacility(facility); + } + + @Override + public void deleteFacility(FacilityInterface facility) { + facilityDao.deleteFacility(facility); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FacilityInterface getFacility(String id) { + return facilityDao.getFacility(id); + } + + @Override + public void setFacilityName(FacilityInterface facility, String name) { + facilityDao.updateFacilityName(facility, name); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FacilityInterface getDefaultFacility() { + return facilityDao.getDefaultFacility(); + } + + @Override + public void setAllocationBillable(AllocationInterface alloc, boolean value) { + allocationDao.updateAllocationBillable(alloc, value); + } + + @Override + public String createLimit(String name, int maxValue) { + return limitDao.createLimit(name, maxValue); + } + + public void deleteLimit(LimitInterface limit) { + limitDao.deleteLimit(limit); + } + + @Override + public LimitInterface findLimit(String name) { + return limitDao.findLimit(name); + } + + @Override + public LimitInterface getLimit(String id) { + return limitDao.getLimit(id); + } + + @Override + public void setLimitName(LimitInterface limit, String name) { + limitDao.setLimitName(limit, name); + } + + @Override + public void setLimitMaxValue(LimitInterface limit, int maxValue) { + limitDao.setMaxValue(limit, maxValue); + } + + public AllocationDao getAllocationDao() { + return allocationDao; + } + + public void setAllocationDao(AllocationDao allocationDao) { + this.allocationDao = allocationDao; + } + + public ShowDao getShowDao() { + return showDao; + } + + public void setShowDao(ShowDao showDao) { + this.showDao = showDao; + } + + public SubscriptionDao getSubscriptionDao() { + return subscriptionDao; + } + + public void setSubscriptionDao(SubscriptionDao subscriptionDao) { + this.subscriptionDao = subscriptionDao; + } + + public DepartmentDao getDepartmentDao() { + return departmentDao; + } + + public void setDepartmentDao(DepartmentDao departmentDao) { + this.departmentDao = departmentDao; + } + + public GroupManager getGroupManager() { + return groupManager; + } + + public void setGroupManager(GroupManager groupManager) { + this.groupManager = groupManager; + } + + public FacilityDao getFacilityDao() { + return facilityDao; + } + + public void setFacilityDao(FacilityDao facilityDao) { + this.facilityDao = facilityDao; + } + + public LimitDao getLimitDao() { + return limitDao; + } + + public void setLimitDao(LimitDao limitDao) { + this.limitDao = limitDao; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/BookingManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/BookingManager.java index b5cfb8455..aff143fce 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/BookingManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/BookingManager.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.util.List; @@ -30,116 +26,115 @@ public interface BookingManager { - /** - * Return an active LocalHostAssignment for the given host. - * - * @param host - * @return - */ - public List getLocalHostAssignment(HostInterface host); - - /** - * Return an active LocalHostAssignment for the given unique ID. - * - * @param id - * @return - */ - public LocalHostAssignment getLocalHostAssignment(String id); - - /** - * Return an active LocalHostAssignment for the given job ID and host ID. - * - * @param hostId - * @param jobId - * @return - */ - public LocalHostAssignment getLocalHostAssignment(String hostId, String jobId); - - /** - * Create a local host assignment for the given job. - * - * @param host - * @param job - * @param lja - */ - public void createLocalHostAssignment(DispatchHost host, - JobInterface job, LocalHostAssignment lja); - - /** - * Create a local host assignment for the given layer. - * - * @param host - * @param layer - * @param lja - */ - public void createLocalHostAssignment(DispatchHost host, - LayerInterface layer, LocalHostAssignment lja); - - /** - * Create a local host assignment for the given frame. - * - * @param host - * @param frame - * @param lja - */ - public void createLocalHostAssignment(DispatchHost host, - FrameInterface frame, LocalHostAssignment lja); - - /** - * Return true if the host as a local assignment. - * - * @param host - * @return - */ - public boolean hasLocalHostAssignment(HostInterface host); - - /** - * Return true if the given host has active local frames. - * - * @param host - * @return - */ - public boolean hasActiveLocalFrames(HostInterface host); - - /** - * Remove the given LocalHostAssignment. - * - * @param lha - */ - void removeLocalHostAssignment(LocalHostAssignment lha); - - /** - * Deactivate the the given LocalHostAssignment. Deactivated entries - * will not book procs. - * - * @param lha - */ - void deactivateLocalHostAssignment(LocalHostAssignment lha); - - /** - * Set the max resource usage for the given LocalHostAssignment. - * - * @param l - * @param maxCoreUnits - * @param maxMemory - * @param maxGpuUnits - * @param maxGpuMemory - */ - void setMaxResources(LocalHostAssignment l, int maxCoreUnits, long maxMemory, int maxGpuUnits, long maxGpuMemory); - - /** - * Remove a LocalHostAssignment if there are no procs assigned to it. - * - * @param lha - */ - void removeInactiveLocalHostAssignment(LocalHostAssignment lha); - - /** - * Return true if the host is running more cores than the maximum allowed. - * - * @param host - * @return - */ - boolean hasResourceDeficit(HostInterface host); + /** + * Return an active LocalHostAssignment for the given host. + * + * @param host + * @return + */ + public List getLocalHostAssignment(HostInterface host); + + /** + * Return an active LocalHostAssignment for the given unique ID. + * + * @param id + * @return + */ + public LocalHostAssignment getLocalHostAssignment(String id); + + /** + * Return an active LocalHostAssignment for the given job ID and host ID. + * + * @param hostId + * @param jobId + * @return + */ + public LocalHostAssignment getLocalHostAssignment(String hostId, String jobId); + + /** + * Create a local host assignment for the given job. + * + * @param host + * @param job + * @param lja + */ + public void createLocalHostAssignment(DispatchHost host, JobInterface job, + LocalHostAssignment lja); + + /** + * Create a local host assignment for the given layer. + * + * @param host + * @param layer + * @param lja + */ + public void createLocalHostAssignment(DispatchHost host, LayerInterface layer, + LocalHostAssignment lja); + + /** + * Create a local host assignment for the given frame. + * + * @param host + * @param frame + * @param lja + */ + public void createLocalHostAssignment(DispatchHost host, FrameInterface frame, + LocalHostAssignment lja); + + /** + * Return true if the host as a local assignment. + * + * @param host + * @return + */ + public boolean hasLocalHostAssignment(HostInterface host); + + /** + * Return true if the given host has active local frames. + * + * @param host + * @return + */ + public boolean hasActiveLocalFrames(HostInterface host); + + /** + * Remove the given LocalHostAssignment. + * + * @param lha + */ + void removeLocalHostAssignment(LocalHostAssignment lha); + + /** + * Deactivate the the given LocalHostAssignment. Deactivated entries will not book procs. + * + * @param lha + */ + void deactivateLocalHostAssignment(LocalHostAssignment lha); + + /** + * Set the max resource usage for the given LocalHostAssignment. + * + * @param l + * @param maxCoreUnits + * @param maxMemory + * @param maxGpuUnits + * @param maxGpuMemory + */ + void setMaxResources(LocalHostAssignment l, int maxCoreUnits, long maxMemory, int maxGpuUnits, + long maxGpuMemory); + + /** + * Remove a LocalHostAssignment if there are no procs assigned to it. + * + * @param lha + */ + void removeInactiveLocalHostAssignment(LocalHostAssignment lha); + + /** + * Return true if the host is running more cores than the maximum allowed. + * + * @param host + * @return + */ + boolean hasResourceDeficit(HostInterface host); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/BookingManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/BookingManagerService.java index 1322b622d..d3d1848e2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/BookingManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/BookingManagerService.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.util.List; @@ -44,207 +40,202 @@ import com.imageworks.spcue.dispatcher.Dispatcher; import com.imageworks.spcue.grpc.job.JobState; - @Transactional public class BookingManagerService implements BookingManager { - @SuppressWarnings("unused") - private static final Logger logger = - LogManager.getLogger(BookingManagerService.class); - - private BookingQueue bookingQueue; - private BookingDao bookingDao; - private Dispatcher localDispatcher; - private JobManager jobManager; - private JobManagerSupport jobManagerSupport; - private JobDao jobDao; - private HostDao hostDao; - private ProcDao procDao; - - @Override - public boolean hasLocalHostAssignment(HostInterface host) { - return bookingDao.hasLocalJob(host); - } - - @Override - public boolean hasActiveLocalFrames(HostInterface host) { - return bookingDao.hasActiveLocalJob(host); - } - - @Override - public void setMaxResources(LocalHostAssignment l, int maxCoreUnits, - long maxMemory, int maxGpuUnits, long maxGpuMemory) { - - HostInterface host = hostDao.getHost(l.getHostId()); - - if (maxCoreUnits > 0) { - bookingDao.updateMaxCores(l, maxCoreUnits); - } - - if (maxMemory > 0) { - bookingDao.updateMaxMemory(l, maxMemory); - } - - if (maxGpuUnits > 0) { - bookingDao.updateMaxGpus(l, maxGpuUnits); - } - - if (maxGpuMemory > 0) { - bookingDao.updateMaxGpuMemory(l, maxGpuMemory); - } - } - - @Override - @Transactional(propagation = Propagation.SUPPORTS) - public void removeInactiveLocalHostAssignment(LocalHostAssignment lha) { - String jobId = lha.getJobId(); - try { - JobDetail jobDetail = jobDao.getJobDetail(jobId); - if (jobManager.isJobComplete(jobDetail) || jobDetail.state.equals(JobState.FINISHED)) { - removeLocalHostAssignment(lha); - } - } - catch (EmptyResultDataAccessException e) { - removeLocalHostAssignment(lha); - } - } + @SuppressWarnings("unused") + private static final Logger logger = LogManager.getLogger(BookingManagerService.class); - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void removeLocalHostAssignment(LocalHostAssignment l) { + private BookingQueue bookingQueue; + private BookingDao bookingDao; + private Dispatcher localDispatcher; + private JobManager jobManager; + private JobManagerSupport jobManagerSupport; + private JobDao jobDao; + private HostDao hostDao; + private ProcDao procDao; - LocalHostAssignment lja = bookingDao.getLocalJobAssignment(l.id); - HostInterface host = hostDao.getHost(l.getHostId()); + @Override + public boolean hasLocalHostAssignment(HostInterface host) { + return bookingDao.hasLocalJob(host); + } - bookingDao.deleteLocalJobAssignment(lja); - } + @Override + public boolean hasActiveLocalFrames(HostInterface host) { + return bookingDao.hasActiveLocalJob(host); + } - @Override - @Transactional(propagation = Propagation.SUPPORTS) - public void deactivateLocalHostAssignment(LocalHostAssignment l) { - - /* - * De-activate the local booking and unbook procs. - * The last proc to report in should remove the LHA. - */ - bookingDao.deactivate(l); - - List procs = procDao.findVirtualProcs(l); - for (VirtualProc p: procs) { - jobManagerSupport.unbookProc(p, true, new - Source("user cleared local jobs")); - } - removeLocalHostAssignment(l); - } + @Override + public void setMaxResources(LocalHostAssignment l, int maxCoreUnits, long maxMemory, + int maxGpuUnits, long maxGpuMemory) { - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public List getLocalHostAssignment(HostInterface host) { - return bookingDao.getLocalJobAssignment(host); - } + HostInterface host = hostDao.getHost(l.getHostId()); - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public LocalHostAssignment getLocalHostAssignment(String id) { - return bookingDao.getLocalJobAssignment(id); + if (maxCoreUnits > 0) { + bookingDao.updateMaxCores(l, maxCoreUnits); } - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public LocalHostAssignment getLocalHostAssignment(String hostId, String jobId) { - return bookingDao.getLocalJobAssignment(hostId, jobId); + if (maxMemory > 0) { + bookingDao.updateMaxMemory(l, maxMemory); } - /** - * Create LocalHostAssignments - */ - - @Override - public void createLocalHostAssignment(DispatchHost host, JobInterface job, - LocalHostAssignment lja) { - bookingDao.insertLocalHostAssignment(host, job, lja); + if (maxGpuUnits > 0) { + bookingDao.updateMaxGpus(l, maxGpuUnits); } - @Override - public void createLocalHostAssignment(DispatchHost host, LayerInterface layer, - LocalHostAssignment lja) { - bookingDao.insertLocalHostAssignment(host, layer, lja); + if (maxGpuMemory > 0) { + bookingDao.updateMaxGpuMemory(l, maxGpuMemory); } + } - @Override - public void createLocalHostAssignment(DispatchHost host, FrameInterface frame, - LocalHostAssignment lja) { - bookingDao.insertLocalHostAssignment(host, frame, lja); + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void removeInactiveLocalHostAssignment(LocalHostAssignment lha) { + String jobId = lha.getJobId(); + try { + JobDetail jobDetail = jobDao.getJobDetail(jobId); + if (jobManager.isJobComplete(jobDetail) || jobDetail.state.equals(JobState.FINISHED)) { + removeLocalHostAssignment(lha); + } + } catch (EmptyResultDataAccessException e) { + removeLocalHostAssignment(lha); } + } - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean hasResourceDeficit(HostInterface host) { - return bookingDao.hasResourceDeficit(host); - } - - public BookingQueue getBookingQueue() { - return bookingQueue; - } + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void removeLocalHostAssignment(LocalHostAssignment l) { - public void setBookingQueue(BookingQueue bookingQueue) { - this.bookingQueue = bookingQueue; - } + LocalHostAssignment lja = bookingDao.getLocalJobAssignment(l.id); + HostInterface host = hostDao.getHost(l.getHostId()); - public BookingDao getBookingDao() { - return bookingDao; - } + bookingDao.deleteLocalJobAssignment(lja); + } - public void setBookingDao(BookingDao bookingDao) { - this.bookingDao = bookingDao; - } + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void deactivateLocalHostAssignment(LocalHostAssignment l) { - public Dispatcher getLocalDispatcher() { - return localDispatcher; - } - - public void setLocalDispatcher(Dispatcher localDispatcher) { - this.localDispatcher = localDispatcher; - } - - public JobManagerSupport getJobManagerSupport() { - return jobManagerSupport; - } - - public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { - this.jobManagerSupport = jobManagerSupport; - } - - public JobManager getJobManager() { - return jobManager; - } - - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } - - public JobDao getJobDao() { - return jobDao; - } - - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } - - public HostDao getHostDao() { - return hostDao; - } - - public void setHostDao(HostDao hostDao) { - this.hostDao = hostDao; - } - - public ProcDao getProcDao() { - return procDao; - } - - public void setProcDao(ProcDao procDao) { - this.procDao = procDao; - } + /* + * De-activate the local booking and unbook procs. The last proc to report in should remove the + * LHA. + */ + bookingDao.deactivate(l); + + List procs = procDao.findVirtualProcs(l); + for (VirtualProc p : procs) { + jobManagerSupport.unbookProc(p, true, new Source("user cleared local jobs")); + } + removeLocalHostAssignment(l); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getLocalHostAssignment(HostInterface host) { + return bookingDao.getLocalJobAssignment(host); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public LocalHostAssignment getLocalHostAssignment(String id) { + return bookingDao.getLocalJobAssignment(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public LocalHostAssignment getLocalHostAssignment(String hostId, String jobId) { + return bookingDao.getLocalJobAssignment(hostId, jobId); + } + + /** + * Create LocalHostAssignments + */ + + @Override + public void createLocalHostAssignment(DispatchHost host, JobInterface job, + LocalHostAssignment lja) { + bookingDao.insertLocalHostAssignment(host, job, lja); + } + + @Override + public void createLocalHostAssignment(DispatchHost host, LayerInterface layer, + LocalHostAssignment lja) { + bookingDao.insertLocalHostAssignment(host, layer, lja); + } + + @Override + public void createLocalHostAssignment(DispatchHost host, FrameInterface frame, + LocalHostAssignment lja) { + bookingDao.insertLocalHostAssignment(host, frame, lja); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean hasResourceDeficit(HostInterface host) { + return bookingDao.hasResourceDeficit(host); + } + + public BookingQueue getBookingQueue() { + return bookingQueue; + } + + public void setBookingQueue(BookingQueue bookingQueue) { + this.bookingQueue = bookingQueue; + } + + public BookingDao getBookingDao() { + return bookingDao; + } + + public void setBookingDao(BookingDao bookingDao) { + this.bookingDao = bookingDao; + } + + public Dispatcher getLocalDispatcher() { + return localDispatcher; + } + + public void setLocalDispatcher(Dispatcher localDispatcher) { + this.localDispatcher = localDispatcher; + } + + public JobManagerSupport getJobManagerSupport() { + return jobManagerSupport; + } + + public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { + this.jobManagerSupport = jobManagerSupport; + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public JobDao getJobDao() { + return jobDao; + } + + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } + + public HostDao getHostDao() { + return hostDao; + } + + public void setHostDao(HostDao hostDao) { + this.hostDao = hostDao; + } + + public ProcDao getProcDao() { + return procDao; + } + + public void setProcDao(ProcDao procDao) { + this.procDao = procDao; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/CommentManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/CommentManager.java index faee9dff9..1e185de9a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/CommentManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/CommentManager.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import com.imageworks.spcue.CommentDetail; @@ -27,68 +23,68 @@ public interface CommentManager { - /** - * Add a comment to a job. - * - * @param job - * @param comment - */ - public void addComment(JobInterface job, CommentDetail comment); - - /** - * Add a comment to a host - * - * @param host - * @param comment - */ - public void addComment(HostInterface host, CommentDetail comment); - - /** - * - * @param id - */ - public void deleteComment(String id); - - /** - * Deletes comments using host, user, and subject - * - * @param host - * @param user - * @param subject - * @return boolean: returns true if one or more comments where deleted - */ - public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, String subject); - - /** - * Get comments using host, user, and subject - * - * @param host - * @param user - * @param subject - * @return List - */ - public List getCommentsByHostUserAndSubject(HostInterface host, String user, String subject); - - /** - * - * @param id - * @param message - */ - public void setCommentMessage(String id, String message); - - /** - * - * @param id - * @param subject - */ - public void setCommentSubject(String id, String subject); - - /** - * Save the specified comment - * - * @param detail - */ - public void saveComment(CommentDetail detail); + /** + * Add a comment to a job. + * + * @param job + * @param comment + */ + public void addComment(JobInterface job, CommentDetail comment); + + /** + * Add a comment to a host + * + * @param host + * @param comment + */ + public void addComment(HostInterface host, CommentDetail comment); + + /** + * + * @param id + */ + public void deleteComment(String id); + + /** + * Deletes comments using host, user, and subject + * + * @param host + * @param user + * @param subject + * @return boolean: returns true if one or more comments where deleted + */ + public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, String subject); + + /** + * Get comments using host, user, and subject + * + * @param host + * @param user + * @param subject + * @return List + */ + public List getCommentsByHostUserAndSubject(HostInterface host, String user, + String subject); + + /** + * + * @param id + * @param message + */ + public void setCommentMessage(String id, String message); + + /** + * + * @param id + * @param subject + */ + public void setCommentSubject(String id, String subject); + + /** + * Save the specified comment + * + * @param detail + */ + public void saveComment(CommentDetail detail); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/CommentManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/CommentManagerService.java index b6d4430ec..9cbd2ba6e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/CommentManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/CommentManagerService.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import org.springframework.transaction.annotation.Propagation; @@ -33,77 +29,78 @@ @Transactional public class CommentManagerService implements CommentManager { - private EmailSupport emailSupport; - private AdminManager adminManager; - - CommentDao commentDao; - - @Transactional(propagation = Propagation.SUPPORTS) - public void addComment(JobInterface job, CommentDetail comment) { - commentDao.insertComment(job, comment); - ShowEntity show = adminManager.getShowEntity(job.getShowId()); - if (show.commentMail.length > 0) { - emailSupport.reportJobComment(job, comment, show.commentMail); - } - } - - @Transactional(propagation = Propagation.REQUIRED) - public void addComment(HostInterface host, CommentDetail comment) { - commentDao.insertComment(host, comment); - } + private EmailSupport emailSupport; + private AdminManager adminManager; - @Transactional(propagation = Propagation.REQUIRED) - public void deleteComment(String id) { - commentDao.deleteComment(id); - } + CommentDao commentDao; - @Transactional(propagation = Propagation.REQUIRED) - public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, String subject) { - return commentDao.deleteCommentByHostUserAndSubject(host, user, subject); - } - - @Transactional(propagation = Propagation.REQUIRED) - public List getCommentsByHostUserAndSubject(HostInterface host, String user, String subject) { - return commentDao.getCommentsByHostUserAndSubject(host, user, subject); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void setCommentSubject(String id, String subject) { - commentDao.updateCommentSubject(id, subject); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void setCommentMessage(String id, String message) { - commentDao.updateCommentMessage(id, message); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void saveComment(CommentDetail detail) { - commentDao.updateComment(detail); - } - - public CommentDao getCommentDao() { - return commentDao; - } - - public void setCommentDao(CommentDao commentDao) { - this.commentDao = commentDao; - } - - public EmailSupport getEmailSupport() { - return emailSupport; - } - - public void setEmailSupport(EmailSupport emailSupport) { - this.emailSupport = emailSupport; - } - - public AdminManager getAdminManager() { - return adminManager; - } - - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; + @Transactional(propagation = Propagation.SUPPORTS) + public void addComment(JobInterface job, CommentDetail comment) { + commentDao.insertComment(job, comment); + ShowEntity show = adminManager.getShowEntity(job.getShowId()); + if (show.commentMail.length > 0) { + emailSupport.reportJobComment(job, comment, show.commentMail); } + } + + @Transactional(propagation = Propagation.REQUIRED) + public void addComment(HostInterface host, CommentDetail comment) { + commentDao.insertComment(host, comment); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void deleteComment(String id) { + commentDao.deleteComment(id); + } + + @Transactional(propagation = Propagation.REQUIRED) + public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, + String subject) { + return commentDao.deleteCommentByHostUserAndSubject(host, user, subject); + } + + @Transactional(propagation = Propagation.REQUIRED) + public List getCommentsByHostUserAndSubject(HostInterface host, String user, + String subject) { + return commentDao.getCommentsByHostUserAndSubject(host, user, subject); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void setCommentSubject(String id, String subject) { + commentDao.updateCommentSubject(id, subject); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void setCommentMessage(String id, String message) { + commentDao.updateCommentMessage(id, message); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void saveComment(CommentDetail detail) { + commentDao.updateComment(detail); + } + + public CommentDao getCommentDao() { + return commentDao; + } + + public void setCommentDao(CommentDao commentDao) { + this.commentDao = commentDao; + } + + public EmailSupport getEmailSupport() { + return emailSupport; + } + + public void setEmailSupport(EmailSupport emailSupport) { + this.emailSupport = emailSupport; + } + + public AdminManager getAdminManager() { + return adminManager; + } + + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManager.java index 061e31bea..fccb204af 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManager.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.util.List; @@ -31,184 +27,177 @@ public interface DepartmentManager { - /** - * Creates a new render point configurtion. A render point configuration - * maps an OpenCue department to a Track-It task and priorizes shots based - * frame and final date info. - * - * @param renderPoint - */ - public void createDepartmentConfig(PointDetail renderPoint); - - /** - * Creates a new render point configurtion. A render point configuration - * maps an OpenCue department to a Track-It task and priorizes shots based - * frame and final date info. - * - * @param renderPoint - */ - public PointInterface createDepartmentConfig(ShowInterface show, DepartmentInterface dept); - - /** - * Returns true if a render point configuration already exists - * for the specified show and department. - * - * @param show - * @param dept - * @return - */ - public boolean departmentConfigExists(ShowInterface show, DepartmentInterface dept); - - /** - * Creates a new task. A task is for setting minimum procs - * by shot and department - * - * @param t - */ - public void createTask(TaskEntity t); - - /** - * Removes the specified task - * - * @param t - */ - public void removeTask(TaskInterface t); - - /** - * Returns task details - * - * @param id - * @return - */ - public TaskEntity getTaskDetail(String id); - - /** - * Sets the minimum core value for the specified task. If the task is managed - * then the cores will be adjusted, leaving the original min cores value in tact. - * If the task is not managed then the min cores value is altered directly. - * - * @param t - * @param value - */ - public void setMinCores(TaskInterface t, int coreUnits); - - /** - * Sets the minimum core value for the specified task. If the task is managed - * then the cores will be adjusted, leaving the original min cores value in tact. - * If the task is not managed then the min cores value is altered directly. - * - * @param t - * @param value - */ - public void clearTaskAdjustments(PointInterface rp); - - /** - * Enables TI integration - * - * @param rp - * @param tiTask - * @param cores - */ - public void enableTiManaged(PointInterface rp, String tiTask, int coreUnits); - - /** - * Disables Track-It management - * - * @param rp - */ - public void disableTiManaged(PointInterface rp); - - /** - * Updates TI Managed tasks and recalculates all of the min core values - * - * @param rp - */ - public void updateManagedTasks(PointInterface rp); - - /** - * Set the number of cores to normalize the proc point shots with. - * - * @param cdept - * @param cores - */ - public void setManagedCores(PointInterface cdept, int coreUnits); - - /** - * Returns a department configuration detail object from its id. - * - * @param id - * @return - */ - PointDetail getDepartmentConfigDetail(String id); - - /** - * Returns a department configuration detail object - * - * @param show - * @param dept - * @return - */ - PointDetail getDepartmentConfigDetail(ShowInterface show, DepartmentInterface dept); - - /** - * Returns a list of all managed point configurations. Managed point - * configurations get priortities from an outside source, such as track it. - * - * @return a list of point configurations being managed by Track-It - */ - List getManagedPointConfs(); - - /** - * Clears all existing tasks for specified department - * - * @param cdept - */ - public void clearTasks(PointInterface cdept); - - /** - * Clears all existing tasks for specified show and department - * @param show - * @param dept - */ - public void clearTasks(ShowInterface show, DepartmentInterface dept); - - /** - * Updates the min proc value of all jobs that fall within - * the specified task. - * - * @param TaskDetail the task to sync with - */ - void syncJobsWithTask(TaskEntity t); - - /** - * Updates the min proc value of all jobs that fall within - * the specified task. - * - * @param TaskDetail the task to sync with - */ - void syncJobsWithTask(DepartmentInterface d, String shot); - - /** - * Updates the min proc value of all jobs that fall within - * the specified task. - * - * @param TaskDetail the task to sync with - */ - void syncJobsWithTask(JobInterface job); - - /** - * Returns true of the job is managed by a department manager. - * - * @param j - */ - boolean isManaged(JobInterface j); - - /** - * - * @param t - */ - void clearTaskAdjustment(TaskInterface t); - + /** + * Creates a new render point configurtion. A render point configuration maps an OpenCue + * department to a Track-It task and priorizes shots based frame and final date info. + * + * @param renderPoint + */ + public void createDepartmentConfig(PointDetail renderPoint); + + /** + * Creates a new render point configurtion. A render point configuration maps an OpenCue + * department to a Track-It task and priorizes shots based frame and final date info. + * + * @param renderPoint + */ + public PointInterface createDepartmentConfig(ShowInterface show, DepartmentInterface dept); + + /** + * Returns true if a render point configuration already exists for the specified show and + * department. + * + * @param show + * @param dept + * @return + */ + public boolean departmentConfigExists(ShowInterface show, DepartmentInterface dept); + + /** + * Creates a new task. A task is for setting minimum procs by shot and department + * + * @param t + */ + public void createTask(TaskEntity t); + + /** + * Removes the specified task + * + * @param t + */ + public void removeTask(TaskInterface t); + + /** + * Returns task details + * + * @param id + * @return + */ + public TaskEntity getTaskDetail(String id); + + /** + * Sets the minimum core value for the specified task. If the task is managed then the cores will + * be adjusted, leaving the original min cores value in tact. If the task is not managed then the + * min cores value is altered directly. + * + * @param t + * @param value + */ + public void setMinCores(TaskInterface t, int coreUnits); + + /** + * Sets the minimum core value for the specified task. If the task is managed then the cores will + * be adjusted, leaving the original min cores value in tact. If the task is not managed then the + * min cores value is altered directly. + * + * @param t + * @param value + */ + public void clearTaskAdjustments(PointInterface rp); + + /** + * Enables TI integration + * + * @param rp + * @param tiTask + * @param cores + */ + public void enableTiManaged(PointInterface rp, String tiTask, int coreUnits); + + /** + * Disables Track-It management + * + * @param rp + */ + public void disableTiManaged(PointInterface rp); + + /** + * Updates TI Managed tasks and recalculates all of the min core values + * + * @param rp + */ + public void updateManagedTasks(PointInterface rp); + + /** + * Set the number of cores to normalize the proc point shots with. + * + * @param cdept + * @param cores + */ + public void setManagedCores(PointInterface cdept, int coreUnits); + + /** + * Returns a department configuration detail object from its id. + * + * @param id + * @return + */ + PointDetail getDepartmentConfigDetail(String id); + + /** + * Returns a department configuration detail object + * + * @param show + * @param dept + * @return + */ + PointDetail getDepartmentConfigDetail(ShowInterface show, DepartmentInterface dept); + + /** + * Returns a list of all managed point configurations. Managed point configurations get + * priortities from an outside source, such as track it. + * + * @return a list of point configurations being managed by Track-It + */ + List getManagedPointConfs(); + + /** + * Clears all existing tasks for specified department + * + * @param cdept + */ + public void clearTasks(PointInterface cdept); + + /** + * Clears all existing tasks for specified show and department + * + * @param show + * @param dept + */ + public void clearTasks(ShowInterface show, DepartmentInterface dept); + + /** + * Updates the min proc value of all jobs that fall within the specified task. + * + * @param TaskDetail the task to sync with + */ + void syncJobsWithTask(TaskEntity t); + + /** + * Updates the min proc value of all jobs that fall within the specified task. + * + * @param TaskDetail the task to sync with + */ + void syncJobsWithTask(DepartmentInterface d, String shot); + + /** + * Updates the min proc value of all jobs that fall within the specified task. + * + * @param TaskDetail the task to sync with + */ + void syncJobsWithTask(JobInterface job); + + /** + * Returns true of the job is managed by a department manager. + * + * @param j + */ + boolean isManaged(JobInterface j); + + /** + * + * @param t + */ + void clearTaskAdjustment(TaskInterface t); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManagerService.java index 87d1ea158..5b916ac5e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManagerService.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.service; import java.util.HashMap; @@ -43,250 +40,243 @@ import com.imageworks.spcue.dao.TaskDao; import com.imageworks.spcue.util.CueUtil; - @Transactional public class DepartmentManagerService implements DepartmentManager { - @Autowired - private Environment env; - - private PointDao pointDao; - private TaskDao taskDao; - private ShowDao showDao; - private JobDao jobDao; - - @Override - public void createDepartmentConfig(PointDetail renderPoint) { - pointDao.insertPointConf(renderPoint); - } - - @Override - public boolean departmentConfigExists(ShowInterface show, DepartmentInterface dept) { - return pointDao.pointConfExists(show, dept); - } - - @Override - public void createTask(TaskEntity t) { - taskDao.insertTask(t); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public TaskEntity getTaskDetail(String id) { - return taskDao.getTaskDetail(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public PointDetail getDepartmentConfigDetail(String id) { - return pointDao.getPointConfDetail(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public PointDetail getDepartmentConfigDetail(ShowInterface show, DepartmentInterface dept) { - return pointDao.getPointConfigDetail(show, dept); - } - - @Override - public void removeTask(TaskInterface t) { - taskDao.deleteTask(t); - } - - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void setMinCores(TaskInterface t, int coreUnits) { - if (taskDao.isManaged(t)) { - taskDao.adjustTaskMinCores(t, coreUnits); - } else { - taskDao.updateTaskMinCores(t, coreUnits); - } - this.syncJobsWithTask(taskDao.getTaskDetail(t.getTaskId())); + @Autowired + private Environment env; + + private PointDao pointDao; + private TaskDao taskDao; + private ShowDao showDao; + private JobDao jobDao; + + @Override + public void createDepartmentConfig(PointDetail renderPoint) { + pointDao.insertPointConf(renderPoint); + } + + @Override + public boolean departmentConfigExists(ShowInterface show, DepartmentInterface dept) { + return pointDao.pointConfExists(show, dept); + } + + @Override + public void createTask(TaskEntity t) { + taskDao.insertTask(t); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public TaskEntity getTaskDetail(String id) { + return taskDao.getTaskDetail(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public PointDetail getDepartmentConfigDetail(String id) { + return pointDao.getPointConfDetail(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public PointDetail getDepartmentConfigDetail(ShowInterface show, DepartmentInterface dept) { + return pointDao.getPointConfigDetail(show, dept); + } + + @Override + public void removeTask(TaskInterface t) { + taskDao.deleteTask(t); + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void setMinCores(TaskInterface t, int coreUnits) { + if (taskDao.isManaged(t)) { + taskDao.adjustTaskMinCores(t, coreUnits); + } else { + taskDao.updateTaskMinCores(t, coreUnits); } - - @Override - public PointInterface createDepartmentConfig(ShowInterface show, DepartmentInterface dept) { - return pointDao.insertPointConf(show, dept); - } - - @Override - public void clearTasks(PointInterface cdept) { - taskDao.deleteTasks(cdept); - } - - @Override - public void clearTasks(ShowInterface show, DepartmentInterface dept) { - taskDao.deleteTasks(show, dept); - } - - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void clearTaskAdjustment(TaskInterface t) { - taskDao.clearTaskAdjustment(t); + this.syncJobsWithTask(taskDao.getTaskDetail(t.getTaskId())); + } + + @Override + public PointInterface createDepartmentConfig(ShowInterface show, DepartmentInterface dept) { + return pointDao.insertPointConf(show, dept); + } + + @Override + public void clearTasks(PointInterface cdept) { + taskDao.deleteTasks(cdept); + } + + @Override + public void clearTasks(ShowInterface show, DepartmentInterface dept) { + taskDao.deleteTasks(show, dept); + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void clearTaskAdjustment(TaskInterface t) { + taskDao.clearTaskAdjustment(t); + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void disableTiManaged(PointInterface cdept) { + pointDao.updateDisableManaged(cdept); + clearTasks(cdept); + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void enableTiManaged(PointInterface p, String tiTask, int cores) { + pointDao.updateEnableManaged(p, tiTask, cores); + updateManagedTasks(p); + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void setManagedCores(PointInterface p, int cores) { + pointDao.updateManagedCores(p, cores); + if (pointDao.isManaged(p, p)) { + updateManagedTasks(p); } - - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void disableTiManaged(PointInterface cdept) { - pointDao.updateDisableManaged(cdept); - clearTasks(cdept); - } - - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void enableTiManaged(PointInterface p, String tiTask, int cores) { - pointDao.updateEnableManaged(p, tiTask, cores); - updateManagedTasks(p); - } - - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void setManagedCores(PointInterface p, int cores) { - pointDao.updateManagedCores(p, cores); - if (pointDao.isManaged(p, p)) { - updateManagedTasks(p); - } - } - - @Override - public void clearTaskAdjustments(PointInterface cdept) { - taskDao.clearTaskAdjustments(cdept); + } + + @Override + public void clearTaskAdjustments(PointInterface cdept) { + taskDao.clearTaskAdjustments(cdept); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getManagedPointConfs() { + return pointDao.getManagedPointConfs(); + } + + /** + * Any task with one of these as the production status is considered in progress. + */ + private static final Set IN_PROGRESS_TASK_STATUS = new HashSet(); + static { + IN_PROGRESS_TASK_STATUS + .addAll(java.util.Arrays.asList(new String[] {"I/P", "Kicked To", "CBB", "Blocked"})); + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void updateManagedTasks(PointInterface pd) {} + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void syncJobsWithTask(TaskEntity t) { + + List jobs = jobDao.getJobs(t); + if (jobs.size() == 0) { + return; } - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public List getManagedPointConfs() { - return pointDao.getManagedPointConfs(); + if (jobs.size() == 1) { + jobDao.updateMinCores(jobs.get(0), t.minCoreUnits); + return; } + int core_units_per_job = t.minCoreUnits / (jobs.size() * 100); + int core_units_left_over = (t.minCoreUnits % (jobs.size() * 100) / 100); - /** - * Any task with one of these as the production status is - * considered in progress. + /* + * Calculate a base for each job */ - private static final Set IN_PROGRESS_TASK_STATUS = new HashSet(); - static { - IN_PROGRESS_TASK_STATUS.addAll(java.util.Arrays.asList( - new String[] {"I/P","Kicked To","CBB","Blocked"})); + Map minCores = new HashMap(jobs.size()); + int core_units_unalloc = 0; + + for (JobInterface j : jobs) { + FrameStateTotals totals = jobDao.getFrameStateTotals(j); + if (totals.waiting < core_units_per_job) { + core_units_unalloc = core_units_unalloc + (core_units_per_job - totals.waiting); + minCores.put(j, new Integer[] {totals.waiting, totals.waiting}); + } else { + minCores.put(j, new Integer[] {core_units_per_job, totals.waiting}); + } } - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void updateManagedTasks(PointInterface pd) { - } - - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void syncJobsWithTask(TaskEntity t) { - - List jobs = jobDao.getJobs(t); - if (jobs.size() == 0) { - return; - } - - if (jobs.size() == 1) { - jobDao.updateMinCores(jobs.get(0), t.minCoreUnits); - return; - } - - int core_units_per_job = t.minCoreUnits / (jobs.size() * 100); - int core_units_left_over = (t.minCoreUnits % (jobs.size() * 100) / 100); - - /* - * Calculate a base for each job - */ - Map minCores = new HashMap(jobs.size()); - int core_units_unalloc = 0; - - for (JobInterface j: jobs) { - FrameStateTotals totals = jobDao.getFrameStateTotals(j); - if (totals.waiting < core_units_per_job) { - core_units_unalloc= core_units_unalloc - + (core_units_per_job - totals.waiting); - minCores.put(j, new Integer[] {totals.waiting, totals.waiting}); - } - else { - minCores.put(j, new Integer[] {core_units_per_job, totals.waiting}); - } - } - - /* - * Apply any left over core units. If the job doesn't have - * waiting frames to apply them to then don't do anything. - */ - core_units_left_over = core_units_left_over + core_units_unalloc; - while (core_units_left_over > 0) { - boolean applied = false; - for (JobInterface j: jobs) { - if (core_units_left_over < 1) { - break; - } - if (minCores.get(j)[1] - minCores.get(j)[0] > 0) { - minCores.get(j)[0] = minCores.get(j)[0] + 1; - core_units_left_over = core_units_left_over - 1; - applied = true; - } - } - if (!applied) { - break; - } + /* + * Apply any left over core units. If the job doesn't have waiting frames to apply them to then + * don't do anything. + */ + core_units_left_over = core_units_left_over + core_units_unalloc; + while (core_units_left_over > 0) { + boolean applied = false; + for (JobInterface j : jobs) { + if (core_units_left_over < 1) { + break; } - - /* - * Update the DB - */ - for (JobInterface j: jobs) { - jobDao.updateMinCores(j, minCores.get(j)[0] * 100); + if (minCores.get(j)[1] - minCores.get(j)[0] > 0) { + minCores.get(j)[0] = minCores.get(j)[0] + 1; + core_units_left_over = core_units_left_over - 1; + applied = true; } + } + if (!applied) { + break; + } } - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void syncJobsWithTask(DepartmentInterface d, String shot) { - syncJobsWithTask(taskDao.getTaskDetail(d, shot)); - } - - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void syncJobsWithTask(JobInterface job) { - syncJobsWithTask(taskDao.getTaskDetail(job)); - } - - @Override - public boolean isManaged(JobInterface j) { - return taskDao.isManaged(j); - } - - public TaskDao getTaskDao() { - return taskDao; - } - - public void setTaskDao(TaskDao taskDao) { - this.taskDao = taskDao; - } - - public ShowDao getShowDao() { - return showDao; - } - - public void setShowDao(ShowDao showDao) { - this.showDao = showDao; - } - - public PointDao getPointDao() { - return pointDao; - } - - public void setPointDao(PointDao pointDao) { - this.pointDao = pointDao; - } - - public JobDao getJobDao() { - return jobDao; - } - - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; + /* + * Update the DB + */ + for (JobInterface j : jobs) { + jobDao.updateMinCores(j, minCores.get(j)[0] * 100); } + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void syncJobsWithTask(DepartmentInterface d, String shot) { + syncJobsWithTask(taskDao.getTaskDetail(d, shot)); + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void syncJobsWithTask(JobInterface job) { + syncJobsWithTask(taskDao.getTaskDetail(job)); + } + + @Override + public boolean isManaged(JobInterface j) { + return taskDao.isManaged(j); + } + + public TaskDao getTaskDao() { + return taskDao; + } + + public void setTaskDao(TaskDao taskDao) { + this.taskDao = taskDao; + } + + public ShowDao getShowDao() { + return showDao; + } + + public void setShowDao(ShowDao showDao) { + this.showDao = showDao; + } + + public PointDao getPointDao() { + return pointDao; + } + + public void setPointDao(PointDao pointDao) { + this.pointDao = pointDao; + } + + public JobDao getJobDao() { + return jobDao; + } + + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/DependManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/DependManager.java index a78311225..54debfbc3 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/DependManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/DependManager.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.util.List; @@ -42,168 +38,164 @@ public interface DependManager { - /** - * This just calls createDepend(Dependency depend) with - * the result of buildDepend(BuildableDependency depend). - * This is mainly for convenience. - * - * @param BuildableDependency depend - */ - void createDepend(BuildableDependency depend); - - List getWhatDependsOn(JobInterface job); - List getWhatDependsOn(JobInterface job, DependTarget target); - - /** - * Return any dependencies that reference the given frame - * as the frame to depend on. - * - * @param frame - * @param active To limit results to only active depends, set - * this to true. To limit results to only - * inactive depends, set this to false. - * @return - */ - List getWhatDependsOn(FrameInterface frame, boolean active); - List getWhatDependsOn(FrameInterface frame); - List getWhatDependsOn(LayerInterface layer); - - /** - * Return any dependencies that reference the given layer - * as the layer to depend on. - * - * @param layer - * @param active To limit results to only active depends, set - * this to true. To limit results to only - * inactive depends, set this to false. - * @return - */ - List getWhatDependsOn(LayerInterface layer, boolean active); - - LightweightDependency getDepend(String id); - void satisfyDepend(LightweightDependency depend); - - /** - * Returns a list of depends where the specified job is the depender. Passing a - * depend target will limit the results to either internal or external. This - * method returns active depends only. - * - * @param Job - * @param DependTarget - * @return List - */ - public List getWhatThisDependsOn(JobInterface job, DependTarget target); - - /** - * Returns a list of depends the layer depends on. Passing in a depend - * target will limit the results to either internal, external or both. - * This method returns active depends only. - * - * @param Layer - * @return List - */ - public List getWhatThisDependsOn(LayerInterface layer, DependTarget target); - - /** - * Returns a list of depends the frame depends on. Passing in a depend - * target will limit the results to either internal, external, or both.This - * method returns active depends only. - * - * @param Frame - * @return List - */ - public List getWhatThisDependsOn(FrameInterface frame, DependTarget target); - - /** - * Create a JobOnJob depend. - * - * @param depend - */ - void createDepend(JobOnJob depend); - - /** - * Create a JobOnLayer depend - * - * @param depend - */ - void createDepend(JobOnLayer depend); - - /** - * Create a JobOnFrame depend - * - * @param depend - */ - void createDepend(JobOnFrame depend); - - /** - * Create a LayerOnJob depend. - * - * @param depend - */ - void createDepend(LayerOnJob depend); - - /** - * Create a LayerOnLayer depend. - * - * @param depend - */ - void createDepend(LayerOnLayer depend); - - /** - * Create a LayerOnFrame depend. - * - * @param depend - */ - void createDepend(LayerOnFrame depend); - - /** - * Create a FrameOnJob depend. - * - * @param depend - */ - void createDepend(FrameOnJob depend); - - /** - * Create a FrameOnLayer depend. - * - * @param depend - */ - void createDepend(FrameOnLayer depend); - - /** - * Create a FrameOnFrame depend. - * - * @param depend - */ - void createDepend(FrameOnFrame depend); - - /** - * Create a FrameByFrame depend. - * - * @param depend - */ - void createDepend(FrameByFrame depend); - - /** - * Creates a previous frame dependency. - * - * @param depend - */ - void createDepend(PreviousFrame depend); - - /** - * Unsatisfy the specified dependency. Currently only works - * for FrameOnFrame depends. - * - * @param depend - */ - void unsatisfyDepend(LightweightDependency depend); - - /** - * Create a depend of type LayerOnSimFrame - * - * @param depend - */ - void createDepend(LayerOnSimFrame depend); + /** + * This just calls createDepend(Dependency depend) with the result of + * buildDepend(BuildableDependency depend). This is mainly for convenience. + * + * @param BuildableDependency depend + */ + void createDepend(BuildableDependency depend); + + List getWhatDependsOn(JobInterface job); + + List getWhatDependsOn(JobInterface job, DependTarget target); + + /** + * Return any dependencies that reference the given frame as the frame to depend on. + * + * @param frame + * @param active To limit results to only active depends, set this to true. To limit results to + * only inactive depends, set this to false. + * @return + */ + List getWhatDependsOn(FrameInterface frame, boolean active); + + List getWhatDependsOn(FrameInterface frame); + + List getWhatDependsOn(LayerInterface layer); + + /** + * Return any dependencies that reference the given layer as the layer to depend on. + * + * @param layer + * @param active To limit results to only active depends, set this to true. To limit results to + * only inactive depends, set this to false. + * @return + */ + List getWhatDependsOn(LayerInterface layer, boolean active); + + LightweightDependency getDepend(String id); + + void satisfyDepend(LightweightDependency depend); + + /** + * Returns a list of depends where the specified job is the depender. Passing a depend target will + * limit the results to either internal or external. This method returns active depends only. + * + * @param Job + * @param DependTarget + * @return List + */ + public List getWhatThisDependsOn(JobInterface job, DependTarget target); + + /** + * Returns a list of depends the layer depends on. Passing in a depend target will limit the + * results to either internal, external or both. This method returns active depends only. + * + * @param Layer + * @return List + */ + public List getWhatThisDependsOn(LayerInterface layer, + DependTarget target); + + /** + * Returns a list of depends the frame depends on. Passing in a depend target will limit the + * results to either internal, external, or both.This method returns active depends only. + * + * @param Frame + * @return List + */ + public List getWhatThisDependsOn(FrameInterface frame, + DependTarget target); + + /** + * Create a JobOnJob depend. + * + * @param depend + */ + void createDepend(JobOnJob depend); + + /** + * Create a JobOnLayer depend + * + * @param depend + */ + void createDepend(JobOnLayer depend); + + /** + * Create a JobOnFrame depend + * + * @param depend + */ + void createDepend(JobOnFrame depend); + + /** + * Create a LayerOnJob depend. + * + * @param depend + */ + void createDepend(LayerOnJob depend); + + /** + * Create a LayerOnLayer depend. + * + * @param depend + */ + void createDepend(LayerOnLayer depend); + + /** + * Create a LayerOnFrame depend. + * + * @param depend + */ + void createDepend(LayerOnFrame depend); + + /** + * Create a FrameOnJob depend. + * + * @param depend + */ + void createDepend(FrameOnJob depend); + + /** + * Create a FrameOnLayer depend. + * + * @param depend + */ + void createDepend(FrameOnLayer depend); + + /** + * Create a FrameOnFrame depend. + * + * @param depend + */ + void createDepend(FrameOnFrame depend); + + /** + * Create a FrameByFrame depend. + * + * @param depend + */ + void createDepend(FrameByFrame depend); + + /** + * Creates a previous frame dependency. + * + * @param depend + */ + void createDepend(PreviousFrame depend); + + /** + * Unsatisfy the specified dependency. Currently only works for FrameOnFrame depends. + * + * @param depend + */ + void unsatisfyDepend(LightweightDependency depend); + + /** + * Create a depend of type LayerOnSimFrame + * + * @param depend + */ + void createDepend(LayerOnSimFrame depend); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/DependManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/DependManagerService.java index 2a82c099d..128571ce0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/DependManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/DependManagerService.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.util.HashSet; @@ -64,610 +60,548 @@ @Transactional public class DependManagerService implements DependManager { - private static final Logger logger = LogManager.getLogger(DependManagerService.class); - - private DependDao dependDao; - private JobDao jobDao; - private LayerDao layerDao; - private FrameDao frameDao; - private FrameSearchFactory frameSearchFactory; - - /** Job Depends **/ - @Override - @Transactional(propagation=Propagation.SUPPORTS) - public void createDepend(JobOnJob depend) { - if (jobDao.isJobComplete(depend.getDependOnJob())) { - throw new DependException( - "The job you are depending on is already complete."); - } - dependDao.insertDepend(depend); - updateDependCount(depend.getDependErJob()); - } - - @Override - @Transactional(propagation=Propagation.SUPPORTS) - public void createDepend(JobOnLayer depend) { - if (layerDao.isLayerComplete(depend.getDependOnLayer())) { - depend.setActive(false); - } - dependDao.insertDepend(depend); - if (depend.isActive()) { - updateDependCount(depend.getDependErJob()); - } - } - - @Override - @Transactional(propagation=Propagation.SUPPORTS) - public void createDepend(JobOnFrame depend) { - if (frameDao.isFrameComplete(depend.getDependOnFrame())) { - depend.setActive(false); - } - dependDao.insertDepend(depend); - if (depend.isActive()) { - updateDependCount(depend.getDependErJob()); - } - } - - /** Layer Depends **/ - @Override - @Transactional(propagation=Propagation.SUPPORTS) - public void createDepend(LayerOnJob depend) { - if (jobDao.isJobComplete(depend.getDependOnJob())) { - throw new DependException( - "The job you are depending on is already complete."); - } - dependDao.insertDepend(depend); - updateDependCount(depend.getDependErLayer()); - } - - @Override - @Transactional(propagation=Propagation.SUPPORTS) - public void createDepend(LayerOnLayer depend) { - if (layerDao.isLayerComplete(depend.getDependOnLayer())) { - depend.setActive(false); - } - dependDao.insertDepend(depend); - if (depend.isActive()) { - updateDependCount(depend.getDependErLayer()); - } - } - - @Override - @Transactional(propagation=Propagation.SUPPORTS) - public void createDepend(LayerOnFrame depend) { - if (frameDao.isFrameComplete(depend.getDependOnFrame())) { - depend.setActive(false); - } - dependDao.insertDepend(depend); - if (depend.isActive()) { - updateDependCount(depend.getDependErLayer()); - } - } - - /** Frame Depends **/ - @Override - @Transactional(propagation=Propagation.SUPPORTS) - public void createDepend(FrameOnJob depend) { - if (jobDao.isJobComplete(depend.getDependOnJob())) { - throw new DependException( - "The job you are depending on is already complete."); - } - dependDao.insertDepend(depend); - if (depend.isActive()) { - updateDependCounts(depend.getDependErFrame()); - } - } - - @Override - @Transactional(propagation=Propagation.SUPPORTS) - public void createDepend(FrameOnLayer depend) { - if (layerDao.isLayerComplete(depend.getDependOnLayer())) { - depend.setActive(false); - } - dependDao.insertDepend(depend); - if (depend.isActive()) { - updateDependCounts(depend.getDependErFrame()); - } - } - - @Override - @Transactional(propagation=Propagation.SUPPORTS) - public void createDepend(FrameOnFrame depend) { - if (frameDao.isFrameComplete(depend.getDependOnFrame())) { - depend.setActive(false); - } - dependDao.insertDepend(depend); - if (depend.isActive()) { - updateDependCounts(depend.getDependErFrame()); - } - } - - @Override - @Transactional(propagation=Propagation.SUPPORTS) - public void createDepend(LayerOnSimFrame depend) { - - /* - * Need the frame range to make all the dependencies. - */ - LayerDetail dependErLayer = layerDao.getLayerDetail( - depend.getDependErLayer().getLayerId()); - - /* - * A normalized list of frames. - */ - List dependErFrameSet = CueUtil.normalizeFrameRange( - dependErLayer.range, dependErLayer.chunkSize); - - int dependErFrameSetSize = dependErFrameSet.size(); - for (int idx = 0; idx < dependErFrameSetSize; idx = idx +1) { - /* - * Lookup the frame we need out of our depend-er layer. - */ - int frameNum = dependErFrameSet.get(idx); - - FrameInterface dependErFrame = frameDao.findFrame(dependErLayer, frameNum); - FrameOnFrame fofDepend = new FrameOnFrame(dependErFrame, - depend.getDependOnFrame()); - createDepend(fofDepend); - } - } - - @Override - @Transactional(propagation=Propagation.SUPPORTS) - public void createDepend(FrameByFrame depend) { - - /* - * Obtain the full layer record so we have access - * to the frame range and other properties. - */ - LayerDetail dependErLayer = layerDao.getLayerDetail( - depend.getDependErLayer().getLayerId()); - - LayerDetail dependOnLayer = layerDao.getLayerDetail( - depend.getDependOnLayer().getLayerId()); - - /* - * Do not create external dependencies on tile layers. - */ - if (depend.getTarget().equals(DependTarget.EXTERNAL) - && dependOnLayer.getName().contains("_tile_")) { - return; - } - - /* - * Please note. The job frame ranges are not normalized in - * any way, there is going to be duplicates. (why a "Set" - * would allow dups is unknown). Anyways, When iterating - * over these frame sets, you must do so by chunk size and - * ignore duplicate frames. - */ - - List dependErFrameSet = CueUtil.normalizeFrameRange( - dependErLayer.range, dependErLayer.chunkSize); - - List dependOnFrameSet = CueUtil.normalizeFrameRange( - dependOnLayer.range, dependOnLayer.chunkSize); - - /* - * When a layer is chunked so large it contains only a single frame, - * any FrameByFrame depends to/from that that layer are converted - * to LayerOnLayer depends. - */ - if ((dependOnFrameSet.size() == 1 && dependOnLayer.chunkSize > 1) - || (dependErFrameSet.size() == 1 && dependErLayer.chunkSize > 1)) { - - LayerOnLayer lolDepend = new LayerOnLayer(depend.getDependErLayer(), - depend.getDependOnLayer()); - - createDepend(lolDepend); - depend.setId(lolDepend.getId()); - return; - } - - /* - * Create the parent depends. - */ - try { - dependDao.insertDepend(depend); - } - catch (DataIntegrityViolationException e) { - LightweightDependency originalDep = - dependDao.getDependBySignature(depend.getSignature()); - depend.setId(originalDep.getId()); - if (!depend.isActive()) { - unsatisfyDepend(originalDep); - } - else { - return; - } - } - - int dependErFrameSetSize = dependErFrameSet.size(); - for (int idx = 0; idx < dependErFrameSetSize; idx = idx +1) { - - Set dependOnFrames = new HashSet(dependOnFrameSet.size()); - - int dependErFrameNum = dependErFrameSet.get(idx); - /* The frame always depends on the corresponding frame. */ - int dependOnFrameNum = dependErFrameNum; - - /* - * Finds any additional frames the dependErFrame might need to - * depend on. - */ - if (dependOnLayer.chunkSize > dependErLayer.chunkSize) { - dependOnFrameNum = CueUtil.findChunk(dependOnFrameSet, dependErFrameNum); - dependOnFrames.add(dependOnFrameNum); - } - else if (dependOnLayer.chunkSize < dependErLayer.chunkSize) { - dependOnFrameNum = CueUtil.findChunk(dependOnFrameSet, dependErFrameNum); - dependOnFrames.add(dependOnFrameNum); - - for(int i=0; i <= dependErLayer.chunkSize - dependOnLayer.chunkSize; i++) { - int nextFrameIdx = dependOnFrameSet.indexOf(dependOnFrameNum) + i; - try { - dependOnFrames.add(dependOnFrameSet.get(nextFrameIdx)); - } catch (java.lang.IndexOutOfBoundsException e) { - continue; - } - } - } - else if (!dependErFrameSet.equals(dependOnFrameSet)) { - if (dependOnFrameSet.contains(dependErFrameNum)) { - dependOnFrames.add(dependErFrameNum); - } - else { - continue; - } - } - else { - dependOnFrames.add(dependErFrameNum); - } - - /* - * Now we can finally start adding child dependencies. - */ - try { - FrameInterface dependErFrame = frameDao.findFrame(dependErLayer, - dependErFrameNum); - for (int frameNum: dependOnFrames) { - FrameInterface dependOnFrame = frameDao.findFrame(dependOnLayer, - frameNum); - FrameOnFrame fofDepend = new FrameOnFrame(dependErFrame, - dependOnFrame, depend); - - createDepend(fofDepend); - - } - } catch (DataRetrievalFailureException dre) { - logger.warn("failed to create frame by frame depend, " + - "part of frame on frame depend: "+ - depend.getId() + " reason: " + dre); - } - } - } - - @Override - public void createDepend(PreviousFrame depend) { - - /* - * Obtain the full layer record so we have access - * to the frame range and other properties. - */ - LayerDetail dependErLayer = layerDao.getLayerDetail( - depend.getDependErLayer().getLayerId()); - - LayerDetail dependOnLayer = layerDao.getLayerDetail( - depend.getDependOnLayer().getLayerId()); - - FrameSet dependErFrameSet = new FrameSet(dependErLayer.range); - FrameSet dependOnFrameSet = new FrameSet(dependOnLayer.range); - - dependDao.insertDepend(depend); - int dependErFrameSetSize = dependErFrameSet.size(); - for (int idx = 1; idx < dependErFrameSetSize; idx = idx + 1) { - - try { - FrameInterface dependErFrame = frameDao.findFrame(dependErLayer, - dependErFrameSet.get(idx)); - - FrameInterface dependOnFrame = frameDao.findFrame(dependOnLayer, - dependOnFrameSet.get(idx - 1)); - - createDepend(new FrameOnFrame(dependErFrame, - dependOnFrame, depend)); - } catch (DataRetrievalFailureException dre) { - logger.warn("failed to create frame by frame depend, " + - "part of a previous frame depend: " + - depend.getId() + " reason: " + dre); - } - } - } - - - @Override - @Transactional(propagation=Propagation.SUPPORTS) - public void createDepend(BuildableDependency depend) { - - JobInterface onJob = null; - JobInterface erJob = null; - - try { - onJob = jobDao.findJob(depend.getDependOnJobName()); - erJob = jobDao.findJob(depend.getDependErJobName()); + private static final Logger logger = LogManager.getLogger(DependManagerService.class); + + private DependDao dependDao; + private JobDao jobDao; + private LayerDao layerDao; + private FrameDao frameDao; + private FrameSearchFactory frameSearchFactory; + + /** Job Depends **/ + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(JobOnJob depend) { + if (jobDao.isJobComplete(depend.getDependOnJob())) { + throw new DependException("The job you are depending on is already complete."); + } + dependDao.insertDepend(depend); + updateDependCount(depend.getDependErJob()); + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(JobOnLayer depend) { + if (layerDao.isLayerComplete(depend.getDependOnLayer())) { + depend.setActive(false); + } + dependDao.insertDepend(depend); + if (depend.isActive()) { + updateDependCount(depend.getDependErJob()); + } + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(JobOnFrame depend) { + if (frameDao.isFrameComplete(depend.getDependOnFrame())) { + depend.setActive(false); + } + dependDao.insertDepend(depend); + if (depend.isActive()) { + updateDependCount(depend.getDependErJob()); + } + } + + /** Layer Depends **/ + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(LayerOnJob depend) { + if (jobDao.isJobComplete(depend.getDependOnJob())) { + throw new DependException("The job you are depending on is already complete."); + } + dependDao.insertDepend(depend); + updateDependCount(depend.getDependErLayer()); + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(LayerOnLayer depend) { + if (layerDao.isLayerComplete(depend.getDependOnLayer())) { + depend.setActive(false); + } + dependDao.insertDepend(depend); + if (depend.isActive()) { + updateDependCount(depend.getDependErLayer()); + } + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(LayerOnFrame depend) { + if (frameDao.isFrameComplete(depend.getDependOnFrame())) { + depend.setActive(false); + } + dependDao.insertDepend(depend); + if (depend.isActive()) { + updateDependCount(depend.getDependErLayer()); + } + } + + /** Frame Depends **/ + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(FrameOnJob depend) { + if (jobDao.isJobComplete(depend.getDependOnJob())) { + throw new DependException("The job you are depending on is already complete."); + } + dependDao.insertDepend(depend); + if (depend.isActive()) { + updateDependCounts(depend.getDependErFrame()); + } + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(FrameOnLayer depend) { + if (layerDao.isLayerComplete(depend.getDependOnLayer())) { + depend.setActive(false); + } + dependDao.insertDepend(depend); + if (depend.isActive()) { + updateDependCounts(depend.getDependErFrame()); + } + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(FrameOnFrame depend) { + if (frameDao.isFrameComplete(depend.getDependOnFrame())) { + depend.setActive(false); + } + dependDao.insertDepend(depend); + if (depend.isActive()) { + updateDependCounts(depend.getDependErFrame()); + } + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(LayerOnSimFrame depend) { + + /* + * Need the frame range to make all the dependencies. + */ + LayerDetail dependErLayer = layerDao.getLayerDetail(depend.getDependErLayer().getLayerId()); + + /* + * A normalized list of frames. + */ + List dependErFrameSet = + CueUtil.normalizeFrameRange(dependErLayer.range, dependErLayer.chunkSize); + + int dependErFrameSetSize = dependErFrameSet.size(); + for (int idx = 0; idx < dependErFrameSetSize; idx = idx + 1) { + /* + * Lookup the frame we need out of our depend-er layer. + */ + int frameNum = dependErFrameSet.get(idx); + + FrameInterface dependErFrame = frameDao.findFrame(dependErLayer, frameNum); + FrameOnFrame fofDepend = new FrameOnFrame(dependErFrame, depend.getDependOnFrame()); + createDepend(fofDepend); + } + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(FrameByFrame depend) { + + /* + * Obtain the full layer record so we have access to the frame range and other properties. + */ + LayerDetail dependErLayer = layerDao.getLayerDetail(depend.getDependErLayer().getLayerId()); + + LayerDetail dependOnLayer = layerDao.getLayerDetail(depend.getDependOnLayer().getLayerId()); + + /* + * Do not create external dependencies on tile layers. + */ + if (depend.getTarget().equals(DependTarget.EXTERNAL) + && dependOnLayer.getName().contains("_tile_")) { + return; + } + + /* + * Please note. The job frame ranges are not normalized in any way, there is going to be + * duplicates. (why a "Set" would allow dups is unknown). Anyways, When iterating over these + * frame sets, you must do so by chunk size and ignore duplicate frames. + */ + + List dependErFrameSet = + CueUtil.normalizeFrameRange(dependErLayer.range, dependErLayer.chunkSize); + + List dependOnFrameSet = + CueUtil.normalizeFrameRange(dependOnLayer.range, dependOnLayer.chunkSize); + + /* + * When a layer is chunked so large it contains only a single frame, any FrameByFrame depends + * to/from that that layer are converted to LayerOnLayer depends. + */ + if ((dependOnFrameSet.size() == 1 && dependOnLayer.chunkSize > 1) + || (dependErFrameSet.size() == 1 && dependErLayer.chunkSize > 1)) { + + LayerOnLayer lolDepend = + new LayerOnLayer(depend.getDependErLayer(), depend.getDependOnLayer()); + + createDepend(lolDepend); + depend.setId(lolDepend.getId()); + return; + } + + /* + * Create the parent depends. + */ + try { + dependDao.insertDepend(depend); + } catch (DataIntegrityViolationException e) { + LightweightDependency originalDep = dependDao.getDependBySignature(depend.getSignature()); + depend.setId(originalDep.getId()); + if (!depend.isActive()) { + unsatisfyDepend(originalDep); + } else { + return; + } + } + + int dependErFrameSetSize = dependErFrameSet.size(); + for (int idx = 0; idx < dependErFrameSetSize; idx = idx + 1) { + + Set dependOnFrames = new HashSet(dependOnFrameSet.size()); + + int dependErFrameNum = dependErFrameSet.get(idx); + /* The frame always depends on the corresponding frame. */ + int dependOnFrameNum = dependErFrameNum; + + /* + * Finds any additional frames the dependErFrame might need to depend on. + */ + if (dependOnLayer.chunkSize > dependErLayer.chunkSize) { + dependOnFrameNum = CueUtil.findChunk(dependOnFrameSet, dependErFrameNum); + dependOnFrames.add(dependOnFrameNum); + } else if (dependOnLayer.chunkSize < dependErLayer.chunkSize) { + dependOnFrameNum = CueUtil.findChunk(dependOnFrameSet, dependErFrameNum); + dependOnFrames.add(dependOnFrameNum); + + for (int i = 0; i <= dependErLayer.chunkSize - dependOnLayer.chunkSize; i++) { + int nextFrameIdx = dependOnFrameSet.indexOf(dependOnFrameNum) + i; + try { + dependOnFrames.add(dependOnFrameSet.get(nextFrameIdx)); + } catch (java.lang.IndexOutOfBoundsException e) { + continue; + } } - catch (Exception e) { - throw new DependencyManagerException("failed to setup new dependency: " + - depend.getType().toString() + ", was unable to find job info for " + - depend.getDependOnJobName() + " or " + depend.getDependErJobName() + - "," + e); + } else if (!dependErFrameSet.equals(dependOnFrameSet)) { + if (dependOnFrameSet.contains(dependErFrameNum)) { + dependOnFrames.add(dependErFrameNum); + } else { + continue; } + } else { + dependOnFrames.add(dependErFrameNum); + } - switch (depend.getType()) { - - case FRAME_BY_FRAME: - createDepend(new FrameByFrame( - layerDao.findLayer(erJob, - depend.getDependErLayerName()), - layerDao.findLayer(onJob, - depend.getDependOnLayerName())) - ); - break; - - case JOB_ON_JOB: - createDepend(new JobOnJob(erJob, onJob)); - break; - - case JOB_ON_LAYER: - createDepend(new JobOnLayer(erJob, - layerDao.findLayer(onJob, - depend.getDependOnLayerName()))); - break; - - case JOB_ON_FRAME: - createDepend(new JobOnFrame(erJob, - frameDao.findFrame(onJob, depend - .getDependOnFrameName()))); - break; - - case LAYER_ON_JOB: - createDepend(new LayerOnJob( - layerDao.findLayer(erJob, - depend.getDependErLayerName()), - onJob)); - break; - - case LAYER_ON_LAYER: - LayerOnLayer lol = new LayerOnLayer( - layerDao.findLayer(erJob, depend - .getDependErLayerName()), - layerDao.findLayer(onJob, depend - .getDependOnLayerName())); - lol.setAnyFrame(depend.anyFrame); - createDepend(lol); - break; - - case LAYER_ON_FRAME: - createDepend(new LayerOnFrame( - layerDao.findLayer(erJob, - depend.getDependErLayerName()), - frameDao.findFrame(onJob, - depend.getDependOnLayerName()))); - break; - - case FRAME_ON_JOB: - createDepend(new FrameOnJob( - frameDao.findFrame(erJob, depend - .getDependErFrameName()), - onJob)); - break; - - case FRAME_ON_LAYER: - createDepend(new FrameOnLayer( - frameDao.findFrame(erJob, - depend.getDependErFrameName()), - layerDao.findLayer(onJob, - depend.getDependOnLayerName()))); - break; - - case FRAME_ON_FRAME: - createDepend(new FrameOnFrame( - frameDao.findFrame(erJob, depend - .getDependErFrameName()), - frameDao.findFrame(onJob, depend - .getDependOnFrameName()))); - break; - - case PREVIOUS_FRAME: - createDepend(new PreviousFrame( - layerDao.findLayer(erJob, depend - .getDependErLayerName()), - layerDao.findLayer(onJob, depend - .getDependOnLayerName()))); - break; - - case LAYER_ON_SIM_FRAME: - createDepend(new LayerOnSimFrame( - layerDao.findLayer(erJob, - depend.getDependErLayerName()), - frameDao.findFrame(onJob, - depend.getDependOnFrameName()))); - break; - } - } + /* + * Now we can finally start adding child dependencies. + */ + try { + FrameInterface dependErFrame = frameDao.findFrame(dependErLayer, dependErFrameNum); + for (int frameNum : dependOnFrames) { + FrameInterface dependOnFrame = frameDao.findFrame(dependOnLayer, frameNum); + FrameOnFrame fofDepend = new FrameOnFrame(dependErFrame, dependOnFrame, depend); - private void updateDependCount(LayerInterface layer) { - FrameSearchInterface r = frameSearchFactory.create(layer); - for (FrameInterface f: frameDao.findFrames(r)) { - updateDependCounts(f); - } - } + createDepend(fofDepend); - private void updateDependCount(JobInterface job) { - FrameSearchInterface r = frameSearchFactory.create(job); - for (FrameInterface f: frameDao.findFrames(r)) { - updateDependCounts(f); } - } + } catch (DataRetrievalFailureException dre) { + logger.warn("failed to create frame by frame depend, " + "part of frame on frame depend: " + + depend.getId() + " reason: " + dre); + } + } + } + + @Override + public void createDepend(PreviousFrame depend) { + + /* + * Obtain the full layer record so we have access to the frame range and other properties. + */ + LayerDetail dependErLayer = layerDao.getLayerDetail(depend.getDependErLayer().getLayerId()); + + LayerDetail dependOnLayer = layerDao.getLayerDetail(depend.getDependOnLayer().getLayerId()); + + FrameSet dependErFrameSet = new FrameSet(dependErLayer.range); + FrameSet dependOnFrameSet = new FrameSet(dependOnLayer.range); + + dependDao.insertDepend(depend); + int dependErFrameSetSize = dependErFrameSet.size(); + for (int idx = 1; idx < dependErFrameSetSize; idx = idx + 1) { + + try { + FrameInterface dependErFrame = frameDao.findFrame(dependErLayer, dependErFrameSet.get(idx)); - private void updateDependCounts(FrameInterface f) { - dependDao.incrementDependCount(f); - } + FrameInterface dependOnFrame = + frameDao.findFrame(dependOnLayer, dependOnFrameSet.get(idx - 1)); + + createDepend(new FrameOnFrame(dependErFrame, dependOnFrame, depend)); + } catch (DataRetrievalFailureException dre) { + logger.warn("failed to create frame by frame depend, " + "part of a previous frame depend: " + + depend.getId() + " reason: " + dre); + } + } + } - @Transactional(propagation=Propagation.REQUIRED, readOnly=true) - public LightweightDependency getDepend(String id) { - return dependDao.getDepend(id); - } - - @Override - @Transactional(propagation=Propagation.SUPPORTS) - public void unsatisfyDepend(LightweightDependency depend) { - - // Currently only handles FrameOnFrame and LayerOnLayer. - if (dependDao.setActive(depend)) { - - switch(depend.type) { - - case FRAME_ON_FRAME: - FrameInterface frame = frameDao.getFrame(depend.dependErFrameId); - updateDependCounts(frame); - break; - - case LAYER_ON_LAYER: - updateDependCount(layerDao.getLayer(depend.dependErLayerId)); - break; - } - } + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(BuildableDependency depend) { + + JobInterface onJob = null; + JobInterface erJob = null; + + try { + onJob = jobDao.findJob(depend.getDependOnJobName()); + erJob = jobDao.findJob(depend.getDependErJobName()); + } catch (Exception e) { + throw new DependencyManagerException("failed to setup new dependency: " + + depend.getType().toString() + ", was unable to find job info for " + + depend.getDependOnJobName() + " or " + depend.getDependErJobName() + "," + e); } - @Transactional(propagation=Propagation.SUPPORTS) - public void satisfyDepend(LightweightDependency depend) { - /* - * Before setting the depend to in-active, obtain a list - * of frames and decrement the depend count on them. - */ - if (DependType.FRAME_BY_FRAME.equals(depend.type)) { - List children = - dependDao.getChildDepends(depend); - - for (LightweightDependency lwd: children) { - satisfyDepend(lwd); - } - return; - } + switch (depend.getType()) { + + case FRAME_BY_FRAME: + createDepend(new FrameByFrame(layerDao.findLayer(erJob, depend.getDependErLayerName()), + layerDao.findLayer(onJob, depend.getDependOnLayerName()))); + break; + + case JOB_ON_JOB: + createDepend(new JobOnJob(erJob, onJob)); + break; - /* - * Only decrement the depend counts if the depend is - * actually set to inactive. - */ - if (dependDao.setInactive(depend)) { - logger.info("satisfied depend: " + depend.getId()); - for (FrameInterface f: frameDao.getDependentFrames(depend)) { - if (!dependDao.decrementDependCount(f)) { - logger.warn("warning, depend count for " + - depend.getId() + "was not decremented " + - "for frame " + f + "because the count is " + - "already 0."); - } - } + case JOB_ON_LAYER: + createDepend( + new JobOnLayer(erJob, layerDao.findLayer(onJob, depend.getDependOnLayerName()))); + break; + + case JOB_ON_FRAME: + createDepend( + new JobOnFrame(erJob, frameDao.findFrame(onJob, depend.getDependOnFrameName()))); + break; + + case LAYER_ON_JOB: + createDepend( + new LayerOnJob(layerDao.findLayer(erJob, depend.getDependErLayerName()), onJob)); + break; + + case LAYER_ON_LAYER: + LayerOnLayer lol = + new LayerOnLayer(layerDao.findLayer(erJob, depend.getDependErLayerName()), + layerDao.findLayer(onJob, depend.getDependOnLayerName())); + lol.setAnyFrame(depend.anyFrame); + createDepend(lol); + break; + + case LAYER_ON_FRAME: + createDepend(new LayerOnFrame(layerDao.findLayer(erJob, depend.getDependErLayerName()), + frameDao.findFrame(onJob, depend.getDependOnLayerName()))); + break; + + case FRAME_ON_JOB: + createDepend( + new FrameOnJob(frameDao.findFrame(erJob, depend.getDependErFrameName()), onJob)); + break; + + case FRAME_ON_LAYER: + createDepend(new FrameOnLayer(frameDao.findFrame(erJob, depend.getDependErFrameName()), + layerDao.findLayer(onJob, depend.getDependOnLayerName()))); + break; + + case FRAME_ON_FRAME: + createDepend(new FrameOnFrame(frameDao.findFrame(erJob, depend.getDependErFrameName()), + frameDao.findFrame(onJob, depend.getDependOnFrameName()))); + break; + + case PREVIOUS_FRAME: + createDepend(new PreviousFrame(layerDao.findLayer(erJob, depend.getDependErLayerName()), + layerDao.findLayer(onJob, depend.getDependOnLayerName()))); + break; + + case LAYER_ON_SIM_FRAME: + createDepend(new LayerOnSimFrame(layerDao.findLayer(erJob, depend.getDependErLayerName()), + frameDao.findFrame(onJob, depend.getDependOnFrameName()))); + break; + } + } + + private void updateDependCount(LayerInterface layer) { + FrameSearchInterface r = frameSearchFactory.create(layer); + for (FrameInterface f : frameDao.findFrames(r)) { + updateDependCounts(f); + } + } + + private void updateDependCount(JobInterface job) { + FrameSearchInterface r = frameSearchFactory.create(job); + for (FrameInterface f : frameDao.findFrames(r)) { + updateDependCounts(f); + } + } + + private void updateDependCounts(FrameInterface f) { + dependDao.incrementDependCount(f); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public LightweightDependency getDepend(String id) { + return dependDao.getDepend(id); + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void unsatisfyDepend(LightweightDependency depend) { + + // Currently only handles FrameOnFrame and LayerOnLayer. + if (dependDao.setActive(depend)) { + + switch (depend.type) { + + case FRAME_ON_FRAME: + FrameInterface frame = frameDao.getFrame(depend.dependErFrameId); + updateDependCounts(frame); + break; + + case LAYER_ON_LAYER: + updateDependCount(layerDao.getLayer(depend.dependErLayerId)); + break; + } + } + } + + @Transactional(propagation = Propagation.SUPPORTS) + public void satisfyDepend(LightweightDependency depend) { + /* + * Before setting the depend to in-active, obtain a list of frames and decrement the depend + * count on them. + */ + if (DependType.FRAME_BY_FRAME.equals(depend.type)) { + List children = dependDao.getChildDepends(depend); + + for (LightweightDependency lwd : children) { + satisfyDepend(lwd); + } + return; + } + + /* + * Only decrement the depend counts if the depend is actually set to inactive. + */ + if (dependDao.setInactive(depend)) { + logger.info("satisfied depend: " + depend.getId()); + for (FrameInterface f : frameDao.getDependentFrames(depend)) { + if (!dependDao.decrementDependCount(f)) { + logger.warn("warning, depend count for " + depend.getId() + "was not decremented " + + "for frame " + f + "because the count is " + "already 0."); } - } - - @Transactional(propagation=Propagation.REQUIRED, readOnly=true) - public List getWhatThisDependsOn(JobInterface job, DependTarget target) { - return dependDao.getWhatThisDependsOn(job, target); - } - - @Transactional(propagation=Propagation.REQUIRED, readOnly=true) - public List getWhatThisDependsOn(LayerInterface layer, DependTarget target) { - return dependDao.getWhatThisDependsOn(layer, target); - } - - @Transactional(propagation=Propagation.REQUIRED, readOnly=true) - public List getWhatThisDependsOn(FrameInterface frame, DependTarget target) { - return dependDao.getWhatThisDependsOn(frame, target); - } - - @Transactional(propagation=Propagation.REQUIRED, readOnly=true) - public LightweightDependency getCurrentDepend(String id) { - return dependDao.getDepend(id); - } - - @Transactional(propagation=Propagation.REQUIRED, readOnly=true) - public List getWhatDependsOn(JobInterface job) { - return dependDao.getWhatDependsOn(job); - } - - @Transactional(propagation=Propagation.REQUIRED, readOnly=true) - public List getWhatDependsOn(JobInterface job, DependTarget target) { - return dependDao.getWhatDependsOn(job, target); - } - - @Transactional(propagation=Propagation.REQUIRED, readOnly=true) - public List getWhatDependsOn(FrameInterface frame) { - return dependDao.getWhatDependsOn(frame); - } - - @Override - @Transactional(propagation=Propagation.REQUIRED, readOnly=true) - public List getWhatDependsOn(FrameInterface frame, boolean active) { - return dependDao.getWhatDependsOn(frame, active); - } - - @Transactional(propagation=Propagation.REQUIRED, readOnly=true) - public List getWhatDependsOn(LayerInterface layer) { - return dependDao.getWhatDependsOn(layer); - } - - @Override - @Transactional(propagation=Propagation.REQUIRED, readOnly=true) - public List getWhatDependsOn(LayerInterface layer, boolean active) { - return dependDao.getWhatDependsOn(layer, active); - } - - @Transactional(propagation=Propagation.REQUIRED) - public void deleteDepend(LightweightDependency depend) { - dependDao.deleteDepend(depend); - } - - public FrameDao getFrameDao() { - return frameDao; - } - - public void setFrameDao(FrameDao frameDao) { - this.frameDao = frameDao; - } - - public JobDao getJobDao() { - return jobDao; - } - - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } - - public LayerDao getLayerDao() { - return layerDao; - } - - public void setLayerDao(LayerDao layerDao) { - this.layerDao = layerDao; - } - - public DependDao getDependDao() { - return dependDao; - } - - public void setDependDao(DependDao workDao) { - this.dependDao = workDao; - } - - public FrameSearchFactory getFrameSearchFactory() { - return frameSearchFactory; - } - - public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { - this.frameSearchFactory = frameSearchFactory; - } + } + } + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getWhatThisDependsOn(JobInterface job, DependTarget target) { + return dependDao.getWhatThisDependsOn(job, target); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getWhatThisDependsOn(LayerInterface layer, + DependTarget target) { + return dependDao.getWhatThisDependsOn(layer, target); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getWhatThisDependsOn(FrameInterface frame, + DependTarget target) { + return dependDao.getWhatThisDependsOn(frame, target); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public LightweightDependency getCurrentDepend(String id) { + return dependDao.getDepend(id); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getWhatDependsOn(JobInterface job) { + return dependDao.getWhatDependsOn(job); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getWhatDependsOn(JobInterface job, DependTarget target) { + return dependDao.getWhatDependsOn(job, target); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getWhatDependsOn(FrameInterface frame) { + return dependDao.getWhatDependsOn(frame); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getWhatDependsOn(FrameInterface frame, boolean active) { + return dependDao.getWhatDependsOn(frame, active); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getWhatDependsOn(LayerInterface layer) { + return dependDao.getWhatDependsOn(layer); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getWhatDependsOn(LayerInterface layer, boolean active) { + return dependDao.getWhatDependsOn(layer, active); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void deleteDepend(LightweightDependency depend) { + dependDao.deleteDepend(depend); + } + + public FrameDao getFrameDao() { + return frameDao; + } + + public void setFrameDao(FrameDao frameDao) { + this.frameDao = frameDao; + } + + public JobDao getJobDao() { + return jobDao; + } + + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } + + public LayerDao getLayerDao() { + return layerDao; + } + + public void setLayerDao(LayerDao layerDao) { + this.layerDao = layerDao; + } + + public DependDao getDependDao() { + return dependDao; + } + + public void setDependDao(DependDao workDao) { + this.dependDao = workDao; + } + + public FrameSearchFactory getFrameSearchFactory() { + return frameSearchFactory; + } + + public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { + this.frameSearchFactory = frameSearchFactory; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/EmailSupport.java b/cuebot/src/main/java/com/imageworks/spcue/service/EmailSupport.java index 23e51037d..67a11d712 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/EmailSupport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/EmailSupport.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.service; @@ -54,281 +52,277 @@ public class EmailSupport { - private MailSender mailSender; - private JobManager jobManager; - private String emailDomain; - private String emailFromAddress; - private String[] emailCcAddresses; + private MailSender mailSender; + private JobManager jobManager; + private String emailDomain; + private String emailFromAddress; + private String[] emailCcAddresses; - private Map imageMap; + private Map imageMap; - private static final Logger logger = LogManager.getLogger(EmailSupport.class); + private static final Logger logger = LogManager.getLogger(EmailSupport.class); - @Autowired - public EmailSupport(Environment env) { - this.emailDomain = env.getProperty("email.domain", "opencue.io"); - this.emailFromAddress = env.getProperty("email.from.address", "opencue-noreply@opencue.io"); - this.emailCcAddresses = env.getProperty("email.cc.addresses", "").split(","); - } - - private static void loadImage(Map map, String path) { - InputStream is = null; - ByteArrayOutputStream os = null; - try { - // Try loading as classpath resource - is = EmailSupport.class.getResourceAsStream("/public/" + path); - - // Try loading as file (sbt-pack layout) - if (is == null) { - try { - is = new FileInputStream("public/" + path); - } catch (FileNotFoundException fnfe) { - // do nothing - } - } - - // Try loading as file (unit tests don't have image paths loaded into classpath) - if (is == null) { - try { - is = new FileInputStream("conf/webapp/html/" + path); - } catch (FileNotFoundException fnfe) { - // do nothing - } - } + @Autowired + public EmailSupport(Environment env) { + this.emailDomain = env.getProperty("email.domain", "opencue.io"); + this.emailFromAddress = env.getProperty("email.from.address", "opencue-noreply@opencue.io"); + this.emailCcAddresses = env.getProperty("email.cc.addresses", "").split(","); + } - // If neither loaded, throw an exception - if (is == null) { - throw new IOException("Unable to load"); - } + private static void loadImage(Map map, String path) { + InputStream is = null; + ByteArrayOutputStream os = null; + try { + // Try loading as classpath resource + is = EmailSupport.class.getResourceAsStream("/public/" + path); - // Read contents to byte array - os = new ByteArrayOutputStream(); - byte[] buffer = new byte[1024]; - int len; - while ((len = is.read(buffer)) != -1) { - os.write(buffer, 0, len); - } - byte[] data = os.toByteArray(); + // Try loading as file (sbt-pack layout) + if (is == null) { + try { + is = new FileInputStream("public/" + path); + } catch (FileNotFoundException fnfe) { + // do nothing + } + } - // Put in map - map.put(path, data); + // Try loading as file (unit tests don't have image paths loaded into classpath) + if (is == null) { + try { + is = new FileInputStream("conf/webapp/html/" + path); + } catch (FileNotFoundException fnfe) { + // do nothing + } + } + + // If neither loaded, throw an exception + if (is == null) { + throw new IOException("Unable to load"); + } + + // Read contents to byte array + os = new ByteArrayOutputStream(); + byte[] buffer = new byte[1024]; + int len; + while ((len = is.read(buffer)) != -1) { + os.write(buffer, 0, len); + } + byte[] data = os.toByteArray(); + + // Put in map + map.put(path, data); + } catch (IOException ioe) { + logger.error("Unable to read " + path, ioe); + } finally { + + // Close streams + if (os != null) { + try { + os.close(); } catch (IOException ioe) { - logger.error("Unable to read " + path, ioe); - } finally { - - // Close streams - if (os != null) { - try { - os.close(); - } catch (IOException ioe) { - logger.error("Unable to close buffer for " + path, ioe); - } - } - if (is != null) { - try { - is.close(); - } catch (IOException ioe) { - logger.error("Unable to load " + path, ioe); - } - } + logger.error("Unable to close buffer for " + path, ioe); } - } - - public void reportLaunchError(JobSpec spec, Throwable t) { - - SimpleMailMessage msg = new SimpleMailMessage(); - msg.setTo(String.format("%s@%s", spec.getUser(), this.emailDomain)); - msg.setFrom(this.emailFromAddress); - msg.setCc(this.emailCcAddresses); - msg.setSubject("Failed to launch OpenCue job."); - - StringBuilder sb = new StringBuilder(131072); - sb.append("This is an automatic message from cuebot that is sent"); - sb.append(" after a queued\njob has failed to launch. This usually"); - sb.append(" occurs if you have made a mistake\nediting an outline"); - sb.append(" script. If you have no idea why you are receiving\nthis"); - sb.append(" message and your jobs are not hitting the cue, please"); - sb.append(" open a\nhelpdesk ticket with the debugging information"); - sb.append(" provided below.\n\n"); - - sb.append("Failed to launch jobs:\n"); - for (BuildableJob job : spec.getJobs()) { - sb.append(job.detail.name); - sb.append("\n"); + } + if (is != null) { + try { + is.close(); + } catch (IOException ioe) { + logger.error("Unable to load " + path, ioe); } - sb.append("\n\n"); - sb.append(new XMLOutputter( - Format.getPrettyFormat()).outputString(spec.getDoc())); - sb.append("\n\n"); - sb.append(CueExceptionUtil.getStackTrace(t)); - - String body = sb.toString(); - msg.setText(body); - sendMessage(msg); + } } + } + + public void reportLaunchError(JobSpec spec, Throwable t) { + + SimpleMailMessage msg = new SimpleMailMessage(); + msg.setTo(String.format("%s@%s", spec.getUser(), this.emailDomain)); + msg.setFrom(this.emailFromAddress); + msg.setCc(this.emailCcAddresses); + msg.setSubject("Failed to launch OpenCue job."); + + StringBuilder sb = new StringBuilder(131072); + sb.append("This is an automatic message from cuebot that is sent"); + sb.append(" after a queued\njob has failed to launch. This usually"); + sb.append(" occurs if you have made a mistake\nediting an outline"); + sb.append(" script. If you have no idea why you are receiving\nthis"); + sb.append(" message and your jobs are not hitting the cue, please"); + sb.append(" open a\nhelpdesk ticket with the debugging information"); + sb.append(" provided below.\n\n"); + + sb.append("Failed to launch jobs:\n"); + for (BuildableJob job : spec.getJobs()) { + sb.append(job.detail.name); + sb.append("\n"); + } + sb.append("\n\n"); + sb.append(new XMLOutputter(Format.getPrettyFormat()).outputString(spec.getDoc())); + sb.append("\n\n"); + sb.append(CueExceptionUtil.getStackTrace(t)); + + String body = sb.toString(); + msg.setText(body); + sendMessage(msg); + } + + public void reportJobComment(JobInterface job, CommentDetail c, String[] emails) { + + SimpleMailMessage msg = new SimpleMailMessage(); + msg.setTo(emails); + msg.setFrom(this.emailFromAddress); + msg.setSubject("New comment on " + job.getName()); + + StringBuilder sb = new StringBuilder(8096); + sb.append("Job: " + job.getName() + "\n"); + sb.append("User: " + c.user + "\n"); + sb.append("Subject: " + c.subject + "\n"); + sb.append("-----------------------------------------\n"); + sb.append(c.message); + + msg.setText(sb.toString()); + sendMessage(msg); + } + + public void sendMessage(SimpleMailMessage message) { + try { + mailSender.send(message); + } catch (MailException ex) { + logger.warn("Failed to send launch failure email, " + ex.getMessage()); + } + } - public void reportJobComment(JobInterface job, CommentDetail c, String[] emails) { - - SimpleMailMessage msg = new SimpleMailMessage(); - msg.setTo(emails); - msg.setFrom(this.emailFromAddress); - msg.setSubject("New comment on " + job.getName()); - - StringBuilder sb = new StringBuilder(8096); - sb.append("Job: " + job.getName() + "\n"); - sb.append("User: " + c.user + "\n"); - sb.append("Subject: " + c.subject + "\n"); - sb.append("-----------------------------------------\n"); - sb.append(c.message); + public void sendShutdownEmail(JobInterface job) { - msg.setText(sb.toString()); - sendMessage(msg); + JobDetail d = jobManager.getJobDetail(job.getJobId()); + if (d.email == null) { + return; } - public void sendMessage(SimpleMailMessage message) { - try { - mailSender.send(message); - } catch (MailException ex) { - logger.warn("Failed to send launch failure email, " + ex.getMessage()); + try { + + VelocityEngine ve = new VelocityEngine(); + ve.setProperty("resource.loader", "class"); + ve.setProperty("class.resource.loader.class", + "org.apache.velocity.runtime.resource.loader.ClasspathResourceLoader"); + ve.init(); + + VelocityContext context = new VelocityContext(); + ExecutionSummary exj = jobManager.getExecutionSummary(job); + FrameStateTotals jts = jobManager.getFrameStateTotals(job); + + String status = ""; + if (jts.total != jts.succeeded) { + status = "Failed "; + } else { + status = "Succeeded "; + } + + context.put("jobName", d.name); + context.put("jobStatus", status.toUpperCase()); + context.put("deptName", d.deptName.toUpperCase()); + context.put("showName", d.showName.toUpperCase()); + context.put("totalLayers", d.totalLayers); + context.put("shotName", d.shot.toUpperCase()); + context.put("succeededFrames", jts.succeeded); + context.put("totalFrames", jts.total); + context.put("dependFrames", jts.depend); + context.put("deadFrames", jts.dead); + context.put("waitingFrames", jts.waiting); + context.put("eatenFrames", jts.eaten); + context.put("failedFrames", jts.dead + jts.eaten + jts.waiting); + context.put("checkpointFrames", jts.checkpoint); + context.put("maxRSS", + String.format(Locale.ROOT, "%.1fGB", exj.highMemoryKb / 1024.0 / 1024.0)); + context.put("coreTime", String.format(Locale.ROOT, "%.1f", exj.coreTime / 3600.0)); + + Template t = ve.getTemplate("/conf/webapp/html/email_template.html"); + + List layers = jobManager.getLayerDetails(job); + List layerStats = new ArrayList(layers.size()); + + boolean shouldCreateFile = false; + + Map map = new HashMap(); + loadImage(map, "opencue_logo.png"); + + for (LayerDetail layer : layers) { + if (layer.type.equals(LayerType.RENDER)) { + LayerStats stats = new LayerStats(); + stats.setDetail(layer); + stats.setExecutionSummary(jobManager.getExecutionSummary(layer)); + stats.setFrameStateTotals(jobManager.getFrameStateTotals(layer)); + stats.setThreadStats(jobManager.getThreadStats(layer)); + stats.setOutputs( + jobManager.getLayerOutputs(layer).stream().sorted().collect(Collectors.toList())); + layerStats.add(stats); + if (stats.getOutputs().size() > 3) + shouldCreateFile = true; + if (!layer.services.isEmpty()) + loadImage(map, "services/" + layer.services.toArray()[0] + ".png"); } - } + } - public void sendShutdownEmail(JobInterface job) { + imageMap = Collections.unmodifiableMap(map); - JobDetail d = jobManager.getJobDetail(job.getJobId()); - if (d.email == null) { - return; - } + context.put("layers", layerStats); - try { + StringWriter w = new StringWriter(); + t.merge(context, w); - VelocityEngine ve = new VelocityEngine(); - ve.setProperty("resource.loader", "class"); - ve.setProperty("class.resource.loader.class", - "org.apache.velocity.runtime.resource.loader.ClasspathResourceLoader"); - ve.init(); - - VelocityContext context = new VelocityContext(); - ExecutionSummary exj = jobManager.getExecutionSummary(job); - FrameStateTotals jts = jobManager.getFrameStateTotals(job); - - String status = ""; - if (jts.total != jts.succeeded) { - status = "Failed "; - } else { - status = "Succeeded "; - } + String subject = "OpenCue Job " + d.getName(); - context.put("jobName", d.name); - context.put("jobStatus", status.toUpperCase()); - context.put("deptName", d.deptName.toUpperCase()); - context.put("showName", d.showName.toUpperCase()); - context.put("totalLayers", d.totalLayers); - context.put("shotName", d.shot.toUpperCase()); - context.put("succeededFrames", jts.succeeded); - context.put("totalFrames", jts.total); - context.put("dependFrames", jts.depend); - context.put("deadFrames", jts.dead); - context.put("waitingFrames", jts.waiting); - context.put("eatenFrames", jts.eaten); - context.put("failedFrames", jts.dead + jts.eaten + jts.waiting); - context.put("checkpointFrames", jts.checkpoint); - context.put("maxRSS", String.format(Locale.ROOT, "%.1fGB", - exj.highMemoryKb / 1024.0 / 1024.0)); - context.put("coreTime", String.format(Locale.ROOT, "%.1f", - exj.coreTime / 3600.0)); - - Template t = ve.getTemplate("/conf/webapp/html/email_template.html"); - - List layers = jobManager.getLayerDetails(job); - List layerStats = new ArrayList(layers.size()); - - boolean shouldCreateFile = false; - - Map map = new HashMap(); - loadImage(map, "opencue_logo.png"); - - for (LayerDetail layer : layers) { - if (layer.type.equals(LayerType.RENDER)) { - LayerStats stats = new LayerStats(); - stats.setDetail(layer); - stats.setExecutionSummary(jobManager.getExecutionSummary(layer)); - stats.setFrameStateTotals(jobManager.getFrameStateTotals(layer)); - stats.setThreadStats(jobManager.getThreadStats(layer)); - stats.setOutputs(jobManager.getLayerOutputs(layer).stream().sorted().collect(Collectors.toList())); - layerStats.add(stats); - if (stats.getOutputs().size() > 3) - shouldCreateFile = true; - if (!layer.services.isEmpty()) - loadImage(map, "services/" + layer.services.toArray()[0] + ".png"); - } - } + subject = status + subject; - imageMap = Collections.unmodifiableMap(map); - - context.put("layers", layerStats); - - StringWriter w = new StringWriter(); - t.merge(context, w); - - String subject = "OpenCue Job " + d.getName(); - - subject = status + subject; - - BufferedWriter output = null; - File file = null; - if (shouldCreateFile) { - try { - file = new File("my_outputs.txt"); - output = new BufferedWriter(new FileWriter(file)); - for (LayerDetail layer : layers) { - if (layer.type.equals(LayerType.RENDER)) { - List sortedNames = jobManager - .getLayerOutputs(layer) - .stream() - .sorted() - .collect(Collectors.toList()); - output.write(layer.name + "\n" + String.join("\n", sortedNames) + "\n"); - } - } - } catch (IOException e) { - e.printStackTrace(); - } finally { - if (output != null) { - try { - output.close(); - } catch (IOException e) { - e.printStackTrace(); - } - } - } + BufferedWriter output = null; + File file = null; + if (shouldCreateFile) { + try { + file = new File("my_outputs.txt"); + output = new BufferedWriter(new FileWriter(file)); + for (LayerDetail layer : layers) { + if (layer.type.equals(LayerType.RENDER)) { + List sortedNames = + jobManager.getLayerOutputs(layer).stream().sorted().collect(Collectors.toList()); + output.write(layer.name + "\n" + String.join("\n", sortedNames) + "\n"); } - - for (String email : d.email.split(",")) { - try { - CueUtil.sendmail(email, this.emailFromAddress, subject, new StringBuilder(w.toString()), imageMap, - file); - } catch (Exception e) { - // just log and eat if the mail server is down or something - // of that nature. - logger.info("Failed to send job complete mail, reason: " + e); - } + } + } catch (IOException e) { + e.printStackTrace(); + } finally { + if (output != null) { + try { + output.close(); + } catch (IOException e) { + e.printStackTrace(); } + } + } + } + + for (String email : d.email.split(",")) { + try { + CueUtil.sendmail(email, this.emailFromAddress, subject, new StringBuilder(w.toString()), + imageMap, file); } catch (Exception e) { - e.printStackTrace(); - throw new SpcueRuntimeException("Failed " + e, e); + // just log and eat if the mail server is down or something + // of that nature. + logger.info("Failed to send job complete mail, reason: " + e); } + } + } catch (Exception e) { + e.printStackTrace(); + throw new SpcueRuntimeException("Failed " + e, e); } + } - public JobManager getJobManager() { - return jobManager; - } + public JobManager getJobManager() { + return jobManager; + } - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } - public void setMailSender(MailSender mailSender) { - this.mailSender = mailSender; - } + public void setMailSender(MailSender mailSender) { + this.mailSender = mailSender; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/FilterManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/FilterManager.java index 4ec4cc3e5..e8bcdc5de 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/FilterManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/FilterManager.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.util.List; @@ -32,40 +28,57 @@ public interface FilterManager { - void runFiltersOnJob(JobDetail job); - void runFilterOnJob(FilterEntity filter, JobDetail job); - void runFilterOnJob(FilterEntity filter, String id); - void runFilterOnGroup(FilterEntity filter, GroupInterface group); + void runFiltersOnJob(JobDetail job); - void lowerFilterOrder(FilterInterface f); - void raiseFilterOrder(FilterInterface f); - void setFilterOrder(FilterInterface f, double order); + void runFilterOnJob(FilterEntity filter, JobDetail job); - void createFilter(FilterEntity filter); - void createAction(ActionEntity action); - void createMatcher(MatcherEntity action); + void runFilterOnJob(FilterEntity filter, String id); - void deleteFilter(FilterInterface f); - void deleteAction(ActionInterface action); - void deleteMatcher(MatcherInterface matcher); + void runFilterOnGroup(FilterEntity filter, GroupInterface group); - void updateMatcher(MatcherEntity matcher); - void updateAction(ActionEntity action); + void lowerFilterOrder(FilterInterface f); - FilterEntity getFilter(String id); - MatcherEntity getMatcher(String id); - ActionEntity getAction(String id); + void raiseFilterOrder(FilterInterface f); - FilterEntity getFilter(FilterInterface filter); - MatcherEntity getMatcher(MatcherInterface matcher); - ActionEntity getAction(ActionInterface action); + void setFilterOrder(FilterInterface f, double order); - boolean applyAction(ActionEntity action, JobDetail job); - boolean applyAction(ActionEntity action, JobDetail job, FilterManagerService.Context context); - boolean applyActions(List actions, JobDetail job, FilterManagerService.Context context); - boolean applyActions(List actions, JobDetail job); + void createFilter(FilterEntity filter); - public boolean isMatch(MatcherEntity matcher, JobDetail job); + void createAction(ActionEntity action); -} + void createMatcher(MatcherEntity action); + + void deleteFilter(FilterInterface f); + + void deleteAction(ActionInterface action); + + void deleteMatcher(MatcherInterface matcher); + + void updateMatcher(MatcherEntity matcher); + + void updateAction(ActionEntity action); + + FilterEntity getFilter(String id); + + MatcherEntity getMatcher(String id); + ActionEntity getAction(String id); + + FilterEntity getFilter(FilterInterface filter); + + MatcherEntity getMatcher(MatcherInterface matcher); + + ActionEntity getAction(ActionInterface action); + + boolean applyAction(ActionEntity action, JobDetail job); + + boolean applyAction(ActionEntity action, JobDetail job, FilterManagerService.Context context); + + boolean applyActions(List actions, JobDetail job, + FilterManagerService.Context context); + + boolean applyActions(List actions, JobDetail job); + + public boolean isMatch(MatcherEntity matcher, JobDetail job); + +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/FilterManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/FilterManagerService.java index eb49a9c51..eec1b74ac 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/FilterManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/FilterManagerService.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.util.ArrayList; @@ -55,496 +51,503 @@ // TODO: add filter caching /** - * The filter manager handles all spank filtering and manipulation - * of filters, actions, and matchers. + * The filter manager handles all spank filtering and manipulation of filters, actions, and + * matchers. * * @category Service */ @Transactional public class FilterManagerService implements FilterManager { - private static final Logger logger = LogManager.getLogger(FilterManagerService.class); - - private ActionDao actionDao; - private MatcherDao matcherDao; - private FilterDao filterDao; - private GroupDao groupDao; - private JobDao jobDao; - private LayerDao layerDao; - - @Transactional(propagation = Propagation.SUPPORTS) - public void runFilter(FilterEntity filter) { - List jobs = jobDao.findJobs(filter); - for (JobDetail job: jobs) { - if(!match(filter, job)) { - continue; - } - applyActions(filter,job); - } - } + private static final Logger logger = LogManager.getLogger(FilterManagerService.class); - @Transactional(propagation = Propagation.SUPPORTS) - public void runFilterOnJob(FilterEntity filter, JobDetail job) { - if (match(filter,job)) { - applyActions(filter,job); - } - } + private ActionDao actionDao; + private MatcherDao matcherDao; + private FilterDao filterDao; + private GroupDao groupDao; + private JobDao jobDao; + private LayerDao layerDao; - @Transactional(propagation = Propagation.SUPPORTS) - public void runFilterOnJob(FilterEntity filter, String id) { - JobDetail j = jobDao.getJobDetail(id); - if (match(filter, j)) { - applyActions(filter,j); - } + @Transactional(propagation = Propagation.SUPPORTS) + public void runFilter(FilterEntity filter) { + List jobs = jobDao.findJobs(filter); + for (JobDetail job : jobs) { + if (!match(filter, job)) { + continue; + } + applyActions(filter, job); } + } - @Transactional(propagation = Propagation.SUPPORTS) - public void runFilterOnGroup(FilterEntity filter, GroupInterface group) { - for (JobDetail job: jobDao.findJobs(group)) { - if (match(filter,job)) { - applyActions(filter,job); - } - } + @Transactional(propagation = Propagation.SUPPORTS) + public void runFilterOnJob(FilterEntity filter, JobDetail job) { + if (match(filter, job)) { + applyActions(filter, job); } + } - @Transactional(propagation = Propagation.SUPPORTS) - public void filterShow(ShowInterface show) { - - List filters = filterDao.getActiveFilters(show); - List jobs = jobDao.findJobs(show); - - for (JobDetail job: jobs) { - for (FilterEntity filter: filters) { - if (!match(filter,job)) { - continue; - } - boolean stopProcessing = applyActions(filter,job); - if (stopProcessing) { - break; - } - } - } - } - public void deleteFilter(FilterInterface f) { - filterDao.deleteFilter(f); + @Transactional(propagation = Propagation.SUPPORTS) + public void runFilterOnJob(FilterEntity filter, String id) { + JobDetail j = jobDao.getJobDetail(id); + if (match(filter, j)) { + applyActions(filter, j); } + } - public void lowerFilterOrder(FilterInterface f) { - filterDao.lowerFilterOrder(f, 1); + @Transactional(propagation = Propagation.SUPPORTS) + public void runFilterOnGroup(FilterEntity filter, GroupInterface group) { + for (JobDetail job : jobDao.findJobs(group)) { + if (match(filter, job)) { + applyActions(filter, job); + } } + } - public void raiseFilterOrder(FilterInterface f) { - filterDao.raiseFilterOrder(f, 1); - } + @Transactional(propagation = Propagation.SUPPORTS) + public void filterShow(ShowInterface show) { - public void setFilterOrder(FilterInterface f, double order) { - filterDao.updateSetFilterOrder(f, order); - } + List filters = filterDao.getActiveFilters(show); + List jobs = jobDao.findJobs(show); - public void createAction(ActionEntity action) { - actionDao.createAction(action); + for (JobDetail job : jobs) { + for (FilterEntity filter : filters) { + if (!match(filter, job)) { + continue; + } + boolean stopProcessing = applyActions(filter, job); + if (stopProcessing) { + break; + } + } + } + } + + public void deleteFilter(FilterInterface f) { + filterDao.deleteFilter(f); + } + + public void lowerFilterOrder(FilterInterface f) { + filterDao.lowerFilterOrder(f, 1); + } + + public void raiseFilterOrder(FilterInterface f) { + filterDao.raiseFilterOrder(f, 1); + } + + public void setFilterOrder(FilterInterface f, double order) { + filterDao.updateSetFilterOrder(f, order); + } + + public void createAction(ActionEntity action) { + actionDao.createAction(action); + } + + public void createMatcher(MatcherEntity matcher) { + matcherDao.insertMatcher(matcher); + } + + /** + * Stores what options have already been set by other filers. Will need to extend this later to + * handle jobs running through different filers. + */ + public class Context { + + public static final int SET_MIN_CORES = 1; + public static final int SET_MAX_CORES = 2; + public static final int SET_PRIORITY = 4; + + int props = 0; + + public void setProperty(int value) { + if ((props & value) != value) { + props = props + value; + } + } + + public boolean isSet(int value) { + return (props & value) == value; + } + } + + /** + * Take a new job detail and run it though the show's filters, setting the groupId property. + * + * @param job + */ + @Transactional(propagation = Propagation.SUPPORTS) + public void runFiltersOnJob(JobDetail job) { + Context context = new Context(); + List filters = filterDao.getActiveFilters(job); + for (FilterEntity filter : filters) { + if (match(filter, job)) { + boolean stop_filters = applyActions(filter, job, context); + if (stop_filters) { + break; + } + } } + } - public void createMatcher(MatcherEntity matcher) { - matcherDao.insertMatcher(matcher); + public boolean applyActions(List actions, JobDetail job, Context context) { + for (ActionEntity action : actions) { + applyAction(action, job, context); + if (action.type.equals(ActionType.STOP_PROCESSING)) { + return true; + } } + return false; + } - /** - * Stores what options have already been set by other - * filers. Will need to extend this later to handle - * jobs running through different filers. - */ - public class Context { + public boolean applyActions(List actions, JobDetail job) { + return applyActions(actions, job, new Context()); + } - public static final int SET_MIN_CORES = 1; - public static final int SET_MAX_CORES = 2; - public static final int SET_PRIORITY = 4; + public boolean applyActions(FilterEntity filter, JobDetail job) { + return applyActions(filter, job, new Context()); + } - int props = 0; + public boolean applyActions(FilterEntity filter, JobDetail job, Context context) { + return applyActions(actionDao.getActions(filter), job, context); + } - public void setProperty(int value) { - if ((props & value) != value) { - props = props + value; - } - } + private boolean isMatch(final MatcherEntity matcher, final String... inputs) { + boolean isMatch = false; - public boolean isSet(int value) { - return (props & value) == value; + switch (matcher.type) { + case CONTAINS: + for (String s : inputs) { + isMatch = s.contains(matcher.value); + if (isMatch) + break; } - } - - /** - * Take a new job detail and run it though the - * show's filters, setting the groupId property. - - * @param job - */ - @Transactional(propagation = Propagation.SUPPORTS) - public void runFiltersOnJob(JobDetail job) { - Context context = new Context(); - List filters = filterDao.getActiveFilters(job); - for (FilterEntity filter : filters) { - if(match(filter, job)) { - boolean stop_filters = applyActions(filter, job, context); - if (stop_filters) { break; } - } + break; + case DOES_NOT_CONTAIN: + for (String s : inputs) { + isMatch = s.contains(matcher.value); + if (isMatch) + return false; } - } - - public boolean applyActions(List actions, JobDetail job, Context context) { - for (ActionEntity action: actions) { - applyAction(action, job, context); - if (action.type.equals(ActionType.STOP_PROCESSING)) { - return true; - } + isMatch = true; + break; + case IS: + for (String s : inputs) { + isMatch = s.equals(matcher.value); + if (isMatch) + break; + } + break; + case IS_NOT: + for (String s : inputs) { + isMatch = s.equals(matcher.value); + if (isMatch) + return false; + } + isMatch = true; + break; + case BEGINS_WITH: + for (String s : inputs) { + isMatch = s.startsWith(matcher.value); + if (isMatch) + break; + } + break; + case ENDS_WITH: + for (String s : inputs) { + isMatch = s.endsWith(matcher.value); + if (isMatch) + break; + } + break; + case REGEX: + Pattern pattern = null; + try { + pattern = Pattern.compile(matcher.value); + } catch (Exception e) { + return false; } - return false; - } - public boolean applyActions(List actions, JobDetail job) { - return applyActions(actions, job, new Context()); + for (String s : inputs) { + isMatch = pattern.matcher(s).find(); + if (isMatch) + break; + } + break; } + return isMatch; + } - public boolean applyActions(FilterEntity filter, JobDetail job) { - return applyActions(filter, job, new Context()); - } + public boolean isMatch(MatcherEntity matcher, JobDetail job) { - public boolean applyActions(FilterEntity filter, JobDetail job, Context context) { - return applyActions(actionDao.getActions(filter), job, context); - } + String input = null; - private boolean isMatch(final MatcherEntity matcher, final String ... inputs) { - boolean isMatch = false; - - switch (matcher.type) { - case CONTAINS: - for (String s : inputs) { - isMatch = s.contains(matcher.value); - if (isMatch) break; - } - break; - case DOES_NOT_CONTAIN: - for (String s : inputs) { - isMatch = s.contains(matcher.value); - if (isMatch) return false; - } - isMatch = true; - break; - case IS: - for (String s : inputs) { - isMatch = s.equals(matcher.value); - if (isMatch) break; - } - break; - case IS_NOT: - for (String s : inputs) { - isMatch = s.equals(matcher.value); - if (isMatch) return false; - } - isMatch = true; - break; - case BEGINS_WITH: - for (String s : inputs) { - isMatch = s.startsWith(matcher.value); - if (isMatch) break; - } - break; - case ENDS_WITH: - for (String s : inputs) { - isMatch = s.endsWith(matcher.value); - if (isMatch) break; - } - break; - case REGEX: - Pattern pattern = null; - try { - pattern = Pattern.compile(matcher.value); - } catch (Exception e) { - return false; - } - - for (String s : inputs) { - isMatch = pattern.matcher(s).find(); - if (isMatch) break; - } - break; + switch (matcher.subject) { + case SERVICE_NAME: { + List layers = layerDao.getLayerDetails(job); + List serviceNames = new ArrayList(layers.size()); + for (LayerDetail layer : layers) { + for (String service : layer.services) { + serviceNames.add(service); + } } - return isMatch; - } - public boolean isMatch(MatcherEntity matcher, JobDetail job) { - - String input = null; + return isMatch(matcher, serviceNames.toArray(new String[0])); + } + case LAYER_NAME: { + List layers = layerDao.getLayerDetails(job); + List layerNames = new ArrayList(layers.size()); + for (LayerDetail layer : layers) { + layerNames.add(layer.name); + } + return isMatch(matcher, layerNames.toArray(new String[0])); + } + default: { switch (matcher.subject) { - case SERVICE_NAME: { - List layers = layerDao.getLayerDetails(job); - List serviceNames = new ArrayList(layers.size()); - for (LayerDetail layer : layers) { - for (String service : layer.services) { - serviceNames.add(service); - } - } - - return isMatch(matcher, serviceNames.toArray(new String[0])); - } - case LAYER_NAME: { - List layers = layerDao.getLayerDetails(job); - List layerNames = new ArrayList(layers.size()); - for (LayerDetail layer : layers) { - layerNames.add(layer.name); - } - - return isMatch(matcher, layerNames.toArray(new String[0])); - } - default: { - switch (matcher.subject) { - case JOB_NAME: - input = job.getName().toLowerCase(); - break; - case SHOW: - input = job.showName.toLowerCase(); - break; - case SHOT: - input = job.shot.toLowerCase(); - break; - case USER: - input = job.user.toLowerCase(); - break; - case PRIORITY: - input = Integer.toString(job.priority); - break; - case FACILITY: - if (job.facilityName == null) { - return false; - } - input = job.facilityName.toLowerCase(); - break; - default: - input = ""; - } - - return isMatch(matcher, input); + case JOB_NAME: + input = job.getName().toLowerCase(); + break; + case SHOW: + input = job.showName.toLowerCase(); + break; + case SHOT: + input = job.shot.toLowerCase(); + break; + case USER: + input = job.user.toLowerCase(); + break; + case PRIORITY: + input = Integer.toString(job.priority); + break; + case FACILITY: + if (job.facilityName == null) { + return false; } + input = job.facilityName.toLowerCase(); + break; + default: + input = ""; } - } - public boolean applyAction(ActionEntity action, JobDetail job) { - return applyAction(action, job, new Context()); + return isMatch(matcher, input); + } } + } - public boolean applyAction(ActionEntity action, JobDetail job, Context context) { - - boolean stopProcessing = false; - /** - * All of these actions can be handled by the call - * to updateJob which happens later on. All other - * actions are handlded in applyAction - */ - switch(action.type) { - case PAUSE_JOB: - jobDao.updatePaused(job, action.booleanValue); - break; - - case SET_JOB_MIN_CORES: - context.setProperty(Context.SET_MIN_CORES); - jobDao.updateMinCores(job, Convert.coresToCoreUnits(action.floatValue)); - break; - - case SET_JOB_MAX_CORES: - context.setProperty(Context.SET_MAX_CORES); - jobDao.updateMaxCores(job, Convert.coresToCoreUnits(action.floatValue)); - break; - - case SET_JOB_PRIORITY: - context.setProperty(Context.SET_PRIORITY); - int priority = (int) action.intValue; - jobDao.updatePriority(job, priority); - job.priority = priority; - break; - - case MOVE_JOB_TO_GROUP: - // Just ignore this if the groupValue is null. The job will launch - // and it can be moved to the right group manually. - if (action.groupValue == null) { - logger.error("Did not move job to group, the group value was not valid."); - break; - } - - GroupDetail g = groupDao.getGroupDetail(action.groupValue); - List inherits = new ArrayList(3); - - // Do not set these values from the group if they were set by another filter. - if (!context.isSet(Context.SET_PRIORITY) && g.jobPriority != -1) { - inherits.add(Inherit.Priority); - } - if (!context.isSet(Context.SET_MAX_CORES) && g.jobMaxCores != -1) { - inherits.add(Inherit.MaxCores); - } - if (!context.isSet(Context.SET_MIN_CORES) && g.jobMinCores != -1) { - inherits.add(Inherit.MinCores); - } - - logger.info("moving job into group: " + g.name); - jobDao.updateParent(job, g, inherits.toArray(new Inherit[0])); - break; - - case SET_ALL_RENDER_LAYER_TAGS: - layerDao.updateTags(job, action.stringValue, LayerType.RENDER); - break; - - case SET_ALL_RENDER_LAYER_MEMORY: - layerDao.updateMinMemory(job, (int) action.intValue, LayerType.RENDER); - break; - - case SET_ALL_RENDER_LAYER_MIN_CORES: - layerDao.updateMinCores(job, Convert.coresToCoreUnits(action.floatValue), LayerType.RENDER); - break; - - case SET_ALL_RENDER_LAYER_MAX_CORES: - layerDao.updateMaxCores(job, Convert.coresToCoreUnits(action.floatValue), LayerType.RENDER); - break; - - case SET_MEMORY_OPTIMIZER: - List layers = layerDao.getLayers(job); - for (LayerInterface layer : layers) { - layerDao.enableMemoryOptimizer(layer, action.booleanValue); - } - break; - - default: - stopProcessing = true; - break; + public boolean applyAction(ActionEntity action, JobDetail job) { + return applyAction(action, job, new Context()); + } + + public boolean applyAction(ActionEntity action, JobDetail job, Context context) { + + boolean stopProcessing = false; + /** + * All of these actions can be handled by the call to updateJob which happens later on. All + * other actions are handlded in applyAction + */ + switch (action.type) { + case PAUSE_JOB: + jobDao.updatePaused(job, action.booleanValue); + break; + + case SET_JOB_MIN_CORES: + context.setProperty(Context.SET_MIN_CORES); + jobDao.updateMinCores(job, Convert.coresToCoreUnits(action.floatValue)); + break; + + case SET_JOB_MAX_CORES: + context.setProperty(Context.SET_MAX_CORES); + jobDao.updateMaxCores(job, Convert.coresToCoreUnits(action.floatValue)); + break; + + case SET_JOB_PRIORITY: + context.setProperty(Context.SET_PRIORITY); + int priority = (int) action.intValue; + jobDao.updatePriority(job, priority); + job.priority = priority; + break; + + case MOVE_JOB_TO_GROUP: + // Just ignore this if the groupValue is null. The job will launch + // and it can be moved to the right group manually. + if (action.groupValue == null) { + logger.error("Did not move job to group, the group value was not valid."); + break; } - return stopProcessing; - } + GroupDetail g = groupDao.getGroupDetail(action.groupValue); + List inherits = new ArrayList(3); - private boolean match(FilterEntity filter, JobDetail job) { + // Do not set these values from the group if they were set by another filter. + if (!context.isSet(Context.SET_PRIORITY) && g.jobPriority != -1) { + inherits.add(Inherit.Priority); + } + if (!context.isSet(Context.SET_MAX_CORES) && g.jobMaxCores != -1) { + inherits.add(Inherit.MaxCores); + } + if (!context.isSet(Context.SET_MIN_CORES) && g.jobMinCores != -1) { + inherits.add(Inherit.MinCores); + } - int numMatched = 0; - int numMatchesRequired = 1; + logger.info("moving job into group: " + g.name); + jobDao.updateParent(job, g, inherits.toArray(new Inherit[0])); + break; - List matchers = matcherDao.getMatchers(filter); - if (matchers.size() == 0) { return false; } + case SET_ALL_RENDER_LAYER_TAGS: + layerDao.updateTags(job, action.stringValue, LayerType.RENDER); + break; - if (filter.type.equals(FilterType.MATCH_ALL)) { - numMatchesRequired = matchers.size(); - } + case SET_ALL_RENDER_LAYER_MEMORY: + layerDao.updateMinMemory(job, (int) action.intValue, LayerType.RENDER); + break; - for (MatcherEntity matcher: matchers) { - boolean itMatches = isMatch(matcher,job); + case SET_ALL_RENDER_LAYER_MIN_CORES: + layerDao.updateMinCores(job, Convert.coresToCoreUnits(action.floatValue), LayerType.RENDER); + break; - if (!itMatches) { - if (filter.type.equals(FilterType.MATCH_ALL)) { - break; - } - } - else { - numMatched++; - if (filter.type.equals(FilterType.MATCH_ANY)) { - break; - } - } - } + case SET_ALL_RENDER_LAYER_MAX_CORES: + layerDao.updateMaxCores(job, Convert.coresToCoreUnits(action.floatValue), LayerType.RENDER); + break; - if (numMatched == numMatchesRequired) { - return true; + case SET_MEMORY_OPTIMIZER: + List layers = layerDao.getLayers(job); + for (LayerInterface layer : layers) { + layerDao.enableMemoryOptimizer(layer, action.booleanValue); } + break; - return false; + default: + stopProcessing = true; + break; } - public FilterDao getFilterDao() { - return filterDao; - } + return stopProcessing; + } - public void setFilterDao(FilterDao filterDao) { - this.filterDao = filterDao; - } + private boolean match(FilterEntity filter, JobDetail job) { - public GroupDao getGroupDao() { - return groupDao; - } + int numMatched = 0; + int numMatchesRequired = 1; - public void setGroupDao(GroupDao groupDao) { - this.groupDao = groupDao; + List matchers = matcherDao.getMatchers(filter); + if (matchers.size() == 0) { + return false; } - public void deleteAction(ActionInterface action) { - actionDao.deleteAction(action); + if (filter.type.equals(FilterType.MATCH_ALL)) { + numMatchesRequired = matchers.size(); } - public void deleteMatcher(MatcherInterface matcher) { - matcherDao.deleteMatcher(matcher); - } + for (MatcherEntity matcher : matchers) { + boolean itMatches = isMatch(matcher, job); - public ActionEntity getAction(String id) { - return actionDao.getAction(id); + if (!itMatches) { + if (filter.type.equals(FilterType.MATCH_ALL)) { + break; + } + } else { + numMatched++; + if (filter.type.equals(FilterType.MATCH_ANY)) { + break; + } + } } - public ActionEntity getAction(ActionInterface action) { - return actionDao.getAction(action); + if (numMatched == numMatchesRequired) { + return true; } - public FilterEntity getFilter(String id) { - return filterDao.getFilter(id); - } + return false; + } - public FilterEntity getFilter(FilterInterface filter) { - return filterDao.getFilter(filter); - } + public FilterDao getFilterDao() { + return filterDao; + } - public MatcherEntity getMatcher(String id) { - return matcherDao.getMatcher(id); - } + public void setFilterDao(FilterDao filterDao) { + this.filterDao = filterDao; + } - public MatcherEntity getMatcher(MatcherInterface matcher) { - return matcherDao.getMatcher(matcher); - } + public GroupDao getGroupDao() { + return groupDao; + } - public void updateAction(ActionEntity action) { - actionDao.updateAction(action); - } + public void setGroupDao(GroupDao groupDao) { + this.groupDao = groupDao; + } - public void updateMatcher(MatcherEntity matcher) { - matcherDao.updateMatcher(matcher); - } + public void deleteAction(ActionInterface action) { + actionDao.deleteAction(action); + } - public void createFilter(FilterEntity filter) { - filterDao.insertFilter(filter); - } + public void deleteMatcher(MatcherInterface matcher) { + matcherDao.deleteMatcher(matcher); + } - public ActionDao getActionDao() { - return actionDao; - } + public ActionEntity getAction(String id) { + return actionDao.getAction(id); + } - public void setActionDao(ActionDao actionDao) { - this.actionDao = actionDao; - } + public ActionEntity getAction(ActionInterface action) { + return actionDao.getAction(action); + } - public JobDao getJobDao() { - return jobDao; - } + public FilterEntity getFilter(String id) { + return filterDao.getFilter(id); + } - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } + public FilterEntity getFilter(FilterInterface filter) { + return filterDao.getFilter(filter); + } - public MatcherDao getMatcherDao() { - return matcherDao; - } + public MatcherEntity getMatcher(String id) { + return matcherDao.getMatcher(id); + } - public void setMatcherDao(MatcherDao matcherDao) { - this.matcherDao = matcherDao; - } + public MatcherEntity getMatcher(MatcherInterface matcher) { + return matcherDao.getMatcher(matcher); + } - public LayerDao getLayerDao() { - return layerDao; - } + public void updateAction(ActionEntity action) { + actionDao.updateAction(action); + } - public void setLayerDao(LayerDao layerDao) { - this.layerDao = layerDao; - } -} + public void updateMatcher(MatcherEntity matcher) { + matcherDao.updateMatcher(matcher); + } + + public void createFilter(FilterEntity filter) { + filterDao.insertFilter(filter); + } + + public ActionDao getActionDao() { + return actionDao; + } + + public void setActionDao(ActionDao actionDao) { + this.actionDao = actionDao; + } + + public JobDao getJobDao() { + return jobDao; + } + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } + + public MatcherDao getMatcherDao() { + return matcherDao; + } + + public void setMatcherDao(MatcherDao matcherDao) { + this.matcherDao = matcherDao; + } + + public LayerDao getLayerDao() { + return layerDao; + } + + public void setLayerDao(LayerDao layerDao) { + this.layerDao = layerDao; + } +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/GroupManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/GroupManager.java index 2e3cf70be..15a9eaa60 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/GroupManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/GroupManager.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.util.List; @@ -30,86 +26,92 @@ public interface GroupManager { - void setGroupMaxCores(GroupInterface g, int coreUnits); - void setGroupMinCores(GroupInterface g, int coreUnits); - void setGroupDefaultJobMinCores(GroupInterface g, int coreUnits); - void setGroupDefaultJobMaxCores(GroupInterface g, int coreUnits); - void setGroupMaxGpus(GroupInterface g, int gpuUnits); - void setGroupMinGpus(GroupInterface g, int gpuUnits); - void setGroupDefaultJobMinGpus(GroupInterface g, int gpuUnits); - void setGroupDefaultJobMaxGpus(GroupInterface g, int gpuUnits); - void setGroupDefaultJobPriority(GroupInterface g, int priority); - - /** - * Return the group from its unique ID - * - * @param id - * @return - */ - GroupInterface getGroup(String id); - - /** - * Return the root group for the specified show. - * - * @param s - * @return - */ - GroupDetail getRootGroupDetail(ShowInterface s); - - /** - * Return the GroupDetail by job. - * - * @param j - * @return - */ - GroupDetail getGroupDetail(JobInterface j); - - /** - * Return a GroupDetail from its unique ID - * - * @param id - * @return - */ - GroupDetail getGroupDetail(String id); - - void setGroupParent(GroupInterface group, GroupInterface newParent); - - void deleteGroup(GroupInterface group); - - void createGroup(GroupDetail group, GroupInterface parent); - - /** - * Re-parent a job to the specified group. - * - * @param job - * @param group - * @param inherit - */ - void reparentJob(JobInterface job, GroupDetail group, Inherit[] inherit); - - /** - * Parents a list of groups to the specified group - * - * @param group - * @param groups - */ - void reparentGroups(GroupInterface group, List groups); - - /** - * Re-parent a list of unique group IDs. - * - * @param group - * @param groups - */ - void reparentGroupIds(GroupInterface group, List groups); - - /** - * Sets the group's department all all jobs in that - * group to the new department. - * - * @param group - * @param d - */ - void setGroupDepartment(GroupInterface group, DepartmentInterface d); + void setGroupMaxCores(GroupInterface g, int coreUnits); + + void setGroupMinCores(GroupInterface g, int coreUnits); + + void setGroupDefaultJobMinCores(GroupInterface g, int coreUnits); + + void setGroupDefaultJobMaxCores(GroupInterface g, int coreUnits); + + void setGroupMaxGpus(GroupInterface g, int gpuUnits); + + void setGroupMinGpus(GroupInterface g, int gpuUnits); + + void setGroupDefaultJobMinGpus(GroupInterface g, int gpuUnits); + + void setGroupDefaultJobMaxGpus(GroupInterface g, int gpuUnits); + + void setGroupDefaultJobPriority(GroupInterface g, int priority); + + /** + * Return the group from its unique ID + * + * @param id + * @return + */ + GroupInterface getGroup(String id); + + /** + * Return the root group for the specified show. + * + * @param s + * @return + */ + GroupDetail getRootGroupDetail(ShowInterface s); + + /** + * Return the GroupDetail by job. + * + * @param j + * @return + */ + GroupDetail getGroupDetail(JobInterface j); + + /** + * Return a GroupDetail from its unique ID + * + * @param id + * @return + */ + GroupDetail getGroupDetail(String id); + + void setGroupParent(GroupInterface group, GroupInterface newParent); + + void deleteGroup(GroupInterface group); + + void createGroup(GroupDetail group, GroupInterface parent); + + /** + * Re-parent a job to the specified group. + * + * @param job + * @param group + * @param inherit + */ + void reparentJob(JobInterface job, GroupDetail group, Inherit[] inherit); + + /** + * Parents a list of groups to the specified group + * + * @param group + * @param groups + */ + void reparentGroups(GroupInterface group, List groups); + + /** + * Re-parent a list of unique group IDs. + * + * @param group + * @param groups + */ + void reparentGroupIds(GroupInterface group, List groups); + + /** + * Sets the group's department all all jobs in that group to the new department. + * + * @param group + * @param d + */ + void setGroupDepartment(GroupInterface group, DepartmentInterface d); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/GroupManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/GroupManagerService.java index 89fc25193..c06e09dfb 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/GroupManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/GroupManagerService.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.util.List; @@ -38,183 +34,181 @@ @Transactional public class GroupManagerService implements GroupManager { - private GroupDao groupDao; - - private JobDao jobDao; - - private DepartmentDao departmentDao; - - private DepartmentManager departmentManager; - - @Override - public void setGroupDefaultJobPriority(GroupInterface g, int priority) { - groupDao.updateDefaultJobPriority(g, priority); - jobDao.updatePriority(g, priority); - } - - @Override - public void setGroupDefaultJobMaxCores(GroupInterface g, int coreUnits) { - groupDao.updateDefaultJobMaxCores(g,coreUnits); - if (coreUnits != CueUtil.FEATURE_DISABLED && !groupDao.isManaged(g)) { - jobDao.updateMaxCores(g,coreUnits); - } - } - - @Override - public void setGroupDefaultJobMinCores(GroupInterface g, int coreUnits) { - groupDao.updateDefaultJobMinCores(g,coreUnits); - if (coreUnits != CueUtil.FEATURE_DISABLED && !groupDao.isManaged(g)) { - jobDao.updateMinCores(g,coreUnits); - } - } - - @Override - public void setGroupMaxCores(GroupInterface g, int coreUnits) { - groupDao.updateMaxCores(g,coreUnits); - } - - @Override - public void setGroupMinCores(GroupInterface g, int coreUnits) { - groupDao.updateMinCores(g,coreUnits); - } - - @Override - public void setGroupDefaultJobMaxGpus(GroupInterface g, int gpuUnits) { - groupDao.updateDefaultJobMaxGpus(g,gpuUnits); - if (gpuUnits != CueUtil.FEATURE_DISABLED && !groupDao.isManaged(g)) { - jobDao.updateMaxGpus(g, gpuUnits); - } - } - - @Override - public void setGroupDefaultJobMinGpus(GroupInterface g, int gpuUnits) { - groupDao.updateDefaultJobMinGpus(g,gpuUnits); - if (gpuUnits != CueUtil.FEATURE_DISABLED && !groupDao.isManaged(g)) { - jobDao.updateMinGpus(g, gpuUnits); - } - } - - @Override - public void setGroupMaxGpus(GroupInterface g, int gpuUnits) { - groupDao.updateMaxGpus(g, gpuUnits); - } - - @Override - public void setGroupMinGpus(GroupInterface g, int gpuUnits) { - groupDao.updateMinGpus(g, gpuUnits); - } - - @Override - public void setGroupParent(GroupInterface group, GroupInterface newParent) { - groupDao.updateGroupParent(group, newParent); - } - - @Override - public void deleteGroup(GroupInterface group) { - groupDao.deleteGroup(group); - } - - @Override - public void createGroup(GroupDetail group, GroupInterface parent) { - DepartmentInterface d; - if (group.getDepartmentId() == null) { - d = departmentDao.getDefaultDepartment(); - group.deptId = d.getId(); - } - else { - d = departmentDao.getDepartment(group.getDepartmentId()); - } - groupDao.insertGroup(group, parent); - - if (!departmentManager.departmentConfigExists(group, d)) { - departmentManager.createDepartmentConfig(group, d); - } - } - - @Override - public void reparentGroups(GroupInterface group, List groups) { - for (GroupInterface g : groups) { - groupDao.updateGroupParent(g, group); - } - } - - @Override - public void reparentJob(JobInterface job, GroupDetail group, Inherit[] inherit) { - jobDao.updateParent(job, group, inherit); - } - - @Override - public void reparentGroupIds(GroupInterface group, List groups) { - reparentGroups(group, groupDao.getGroups(groups)); - } - - @Override - public void setGroupDepartment(GroupInterface group, DepartmentInterface dept) { - /* - * If this is the first time the show is using this department - * a department configuration is created. - */ - if (!departmentManager.departmentConfigExists(group, dept)) { - departmentManager.createDepartmentConfig(group, dept); - } - groupDao.updateDepartment(group, dept); - jobDao.updateDepartment(group, dept); - } - - @Override - @Transactional(propagation=Propagation.REQUIRED, readOnly=true) - public GroupInterface getGroup(String id) { - return groupDao.getGroup(id); - } - - @Override - @Transactional(propagation=Propagation.REQUIRED, readOnly=true) - public GroupDetail getGroupDetail(String id) { - return groupDao.getGroupDetail(id); - } - - @Override - @Transactional(propagation=Propagation.REQUIRED, readOnly=true) - public GroupDetail getRootGroupDetail(ShowInterface s) { - return groupDao.getRootGroupDetail(s); - } - - @Override - @Transactional(propagation=Propagation.REQUIRED, readOnly=true) - public GroupDetail getGroupDetail(JobInterface j) { - return groupDao.getGroupDetail(j); - } - - public GroupDao getGroupDao() { - return groupDao; - } - - public void setGroupDao(GroupDao groupDao) { - this.groupDao = groupDao; - } + private GroupDao groupDao; - public JobDao getJobDao() { - return jobDao; - } + private JobDao jobDao; - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } + private DepartmentDao departmentDao; - public DepartmentDao getDepartmentDao() { - return departmentDao; - } + private DepartmentManager departmentManager; - public void setDepartmentDao(DepartmentDao departmentDao) { - this.departmentDao = departmentDao; - } + @Override + public void setGroupDefaultJobPriority(GroupInterface g, int priority) { + groupDao.updateDefaultJobPriority(g, priority); + jobDao.updatePriority(g, priority); + } - public DepartmentManager getDepartmentManager() { - return departmentManager; - } + @Override + public void setGroupDefaultJobMaxCores(GroupInterface g, int coreUnits) { + groupDao.updateDefaultJobMaxCores(g, coreUnits); + if (coreUnits != CueUtil.FEATURE_DISABLED && !groupDao.isManaged(g)) { + jobDao.updateMaxCores(g, coreUnits); + } + } - public void setDepartmentManager(DepartmentManager departmentManager) { - this.departmentManager = departmentManager; - } + @Override + public void setGroupDefaultJobMinCores(GroupInterface g, int coreUnits) { + groupDao.updateDefaultJobMinCores(g, coreUnits); + if (coreUnits != CueUtil.FEATURE_DISABLED && !groupDao.isManaged(g)) { + jobDao.updateMinCores(g, coreUnits); + } + } + + @Override + public void setGroupMaxCores(GroupInterface g, int coreUnits) { + groupDao.updateMaxCores(g, coreUnits); + } + + @Override + public void setGroupMinCores(GroupInterface g, int coreUnits) { + groupDao.updateMinCores(g, coreUnits); + } + + @Override + public void setGroupDefaultJobMaxGpus(GroupInterface g, int gpuUnits) { + groupDao.updateDefaultJobMaxGpus(g, gpuUnits); + if (gpuUnits != CueUtil.FEATURE_DISABLED && !groupDao.isManaged(g)) { + jobDao.updateMaxGpus(g, gpuUnits); + } + } + + @Override + public void setGroupDefaultJobMinGpus(GroupInterface g, int gpuUnits) { + groupDao.updateDefaultJobMinGpus(g, gpuUnits); + if (gpuUnits != CueUtil.FEATURE_DISABLED && !groupDao.isManaged(g)) { + jobDao.updateMinGpus(g, gpuUnits); + } + } + + @Override + public void setGroupMaxGpus(GroupInterface g, int gpuUnits) { + groupDao.updateMaxGpus(g, gpuUnits); + } + + @Override + public void setGroupMinGpus(GroupInterface g, int gpuUnits) { + groupDao.updateMinGpus(g, gpuUnits); + } + + @Override + public void setGroupParent(GroupInterface group, GroupInterface newParent) { + groupDao.updateGroupParent(group, newParent); + } + + @Override + public void deleteGroup(GroupInterface group) { + groupDao.deleteGroup(group); + } + + @Override + public void createGroup(GroupDetail group, GroupInterface parent) { + DepartmentInterface d; + if (group.getDepartmentId() == null) { + d = departmentDao.getDefaultDepartment(); + group.deptId = d.getId(); + } else { + d = departmentDao.getDepartment(group.getDepartmentId()); + } + groupDao.insertGroup(group, parent); + + if (!departmentManager.departmentConfigExists(group, d)) { + departmentManager.createDepartmentConfig(group, d); + } + } + + @Override + public void reparentGroups(GroupInterface group, List groups) { + for (GroupInterface g : groups) { + groupDao.updateGroupParent(g, group); + } + } + + @Override + public void reparentJob(JobInterface job, GroupDetail group, Inherit[] inherit) { + jobDao.updateParent(job, group, inherit); + } + + @Override + public void reparentGroupIds(GroupInterface group, List groups) { + reparentGroups(group, groupDao.getGroups(groups)); + } + + @Override + public void setGroupDepartment(GroupInterface group, DepartmentInterface dept) { + /* + * If this is the first time the show is using this department a department configuration is + * created. + */ + if (!departmentManager.departmentConfigExists(group, dept)) { + departmentManager.createDepartmentConfig(group, dept); + } + groupDao.updateDepartment(group, dept); + jobDao.updateDepartment(group, dept); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public GroupInterface getGroup(String id) { + return groupDao.getGroup(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public GroupDetail getGroupDetail(String id) { + return groupDao.getGroupDetail(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public GroupDetail getRootGroupDetail(ShowInterface s) { + return groupDao.getRootGroupDetail(s); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public GroupDetail getGroupDetail(JobInterface j) { + return groupDao.getGroupDetail(j); + } + + public GroupDao getGroupDao() { + return groupDao; + } + + public void setGroupDao(GroupDao groupDao) { + this.groupDao = groupDao; + } + + public JobDao getJobDao() { + return jobDao; + } + + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } + + public DepartmentDao getDepartmentDao() { + return departmentDao; + } + + public void setDepartmentDao(DepartmentDao departmentDao) { + this.departmentDao = departmentDao; + } + + public DepartmentManager getDepartmentManager() { + return departmentManager; + } + + public void setDepartmentManager(DepartmentManager departmentManager) { + this.departmentManager = departmentManager; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalManager.java index 8de83cf46..9aca062ac 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalManager.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.util.List; @@ -25,19 +21,18 @@ public interface HistoricalManager { - /** - * Returns a list of jobs ready to be archived. - * - * @return List - */ - List getFinishedJobs(); + /** + * Returns a list of jobs ready to be archived. + * + * @return List + */ + List getFinishedJobs(); - /** - * Transfers data from the live to the historical tables. - * - * @param job - */ - void transferJob(JobInterface job); + /** + * Transfers data from the live to the historical tables. + * + * @param job + */ + void transferJob(JobInterface job); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalManagerService.java index 07c6186b3..8e60eebe3 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalManagerService.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.util.List; @@ -33,35 +29,33 @@ @Transactional public class HistoricalManagerService implements HistoricalManager { - private HistoricalDao historicalDao; + private HistoricalDao historicalDao; - @Autowired - private Environment env; + @Autowired + private Environment env; - @Transactional(readOnly=true, isolation=Isolation.SERIALIZABLE) - public List getFinishedJobs() { - return historicalDao.getFinishedJobs( - env.getRequiredProperty("history.archive_jobs_cutoff_hours", Integer.class)); - } + @Transactional(readOnly = true, isolation = Isolation.SERIALIZABLE) + public List getFinishedJobs() { + return historicalDao.getFinishedJobs( + env.getRequiredProperty("history.archive_jobs_cutoff_hours", Integer.class)); + } - @Transactional - public void transferJob(JobInterface job) { - try { - historicalDao.transferJob(job); - } catch (Exception e) { - throw new HistoricalJobTransferException("failed to transfer job " + - job.getName() + " to historical table"); - } + @Transactional + public void transferJob(JobInterface job) { + try { + historicalDao.transferJob(job); + } catch (Exception e) { + throw new HistoricalJobTransferException( + "failed to transfer job " + job.getName() + " to historical table"); } + } - public HistoricalDao getHistoricalDao() { - return historicalDao; - } - - public void setHistoricalDao(HistoricalDao historicalDao) { - this.historicalDao = historicalDao; - } + public HistoricalDao getHistoricalDao() { + return historicalDao; + } + public void setHistoricalDao(HistoricalDao historicalDao) { + this.historicalDao = historicalDao; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalSupport.java b/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalSupport.java index 74c256729..c266ba1f8 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalSupport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalSupport.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.util.List; @@ -27,29 +23,28 @@ import com.imageworks.spcue.JobInterface; public class HistoricalSupport { - private static final Logger logger = LogManager.getLogger(HistoricalSupport.class); - - private HistoricalManager historicalManager; - - public void archiveHistoricalJobData() { - logger.info("running historical job data transfer"); - List jobs = historicalManager.getFinishedJobs(); - for (JobInterface j: jobs) { - logger.info("transfering job " + j.getId() + "/" + j.getName()); - try { - historicalManager.transferJob(j); - } catch (Exception e) { - logger.warn("failed to transfer job, " + e); - } - } + private static final Logger logger = LogManager.getLogger(HistoricalSupport.class); + + private HistoricalManager historicalManager; + + public void archiveHistoricalJobData() { + logger.info("running historical job data transfer"); + List jobs = historicalManager.getFinishedJobs(); + for (JobInterface j : jobs) { + logger.info("transfering job " + j.getId() + "/" + j.getName()); + try { + historicalManager.transferJob(j); + } catch (Exception e) { + logger.warn("failed to transfer job, " + e); + } } + } - public HistoricalManager getHistoricalManager() { - return historicalManager; - } + public HistoricalManager getHistoricalManager() { + return historicalManager; + } - public void setHistoricalManager(HistoricalManager historicalManager) { - this.historicalManager = historicalManager; - } + public void setHistoricalManager(HistoricalManager historicalManager) { + this.historicalManager = historicalManager; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/HostManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/HostManager.java index dae9bf552..e074169b3 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/HostManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/HostManager.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.sql.Timestamp; @@ -42,211 +38,211 @@ public interface HostManager { - void rebootWhenIdle(HostInterface host); - void rebootNow(HostInterface host); - - /** - * Lock/unlock the specified host. - * - * @param host - * @param state - * @param source - */ - void setHostLock(HostInterface host, LockState state, Source source); - - /** - * Updates the state of a host. - * - * @param host HostInterface - * @param state HardwareState - */ - void setHostState(HostInterface host, HardwareState state); - - /** - * Updates the free temporary directory (mcp) of a host. - * - * @param host HostInterface - * @param freeTempDir Long - */ - void setHostFreeTempDir(HostInterface host, Long freeTempDir); - - DispatchHost createHost(HostReport report); - DispatchHost createHost(RenderHost host); - - /** - * Create a host and move it into the specified allocation. - * - * @param rhost - * @param alloc - * @return - */ - DispatchHost createHost(RenderHost rhost, AllocationEntity alloc); - - - HostInterface getHost(String id); - HostInterface findHost(String name); - - DispatchHost getDispatchHost(String id); - DispatchHost findDispatchHost(String name); - - HostEntity getHostDetail(HostInterface host); - HostEntity getHostDetail(String id); - HostEntity findHostDetail(String name); - - /** - * Returns true of the LockState is not Open. - * - * @param host - * @return - */ - boolean isLocked(HostInterface host); - - /** - * Set all host statistics. - * - * @param host - * @param totalMemory - * @param freeMemory - * @param totalSwap - * @param freeSwap - * @param totalMcp - * @param freeMcp - * @param totalGpuMemory - * @param freeGpuMemory - * @param load - * @param bootTime - * @param os - */ - void setHostStatistics(HostInterface host, - long totalMemory, long freeMemory, - long totalSwap, long freeSwap, - long totalMcp, long freeMcp, - long totalGpuMemory, long freeGpuMemory, - int load, Timestamp bootTime, String os); - - - void deleteHost(HostInterface host); - - AllocationInterface getDefaultAllocationDetail(); - - void setAllocation(HostInterface host, AllocationInterface alloc); - - void addTags(HostInterface host, String[] tags); - void removeTags(HostInterface host, String[] tags); - void renameTag(HostInterface host, String oldTag, String newTag); - - /** - * Verify that the given proc and frame IDs are assigned - * to each other in the database. - * - * @param procId - * @param frameId - * @return - */ - boolean verifyRunningProc(String procId, String frameId); - - /** - * Returns a list of VirtualProcs that match - * the specified criteria. - * - * @param r - * @return a list of VirtualProcs that match the criteria - */ - List findVirtualProcs(ProcSearchInterface r); - - List findVirtualProcs(FrameSearchInterface r); - VirtualProc findVirtualProc(FrameInterface frame); - List findVirtualProcs(HardwareState state); - - /** - * Returns a list of booked procs. When a proc is "booked", that means - * it plans on staying on the same job after it completes the current - * frame. If a proc is unbooked, it aways tries to find work to do - * on another job. - * - * @param r - * @return - */ - List findBookedVirtualProcs(ProcSearchInterface r); - - void unbookVirtualProcs(List procs); - void unbookProc(ProcInterface proc); - - /** - * Return the Virtual proc with the specified unique ID. - * - * @param id - * @return - */ - VirtualProc getVirtualProc(String id); - - /** - * Return true if the given host is in the Up state. Other - * states are Down, Rebooting, RebootWhenIdle, etc. Only hosts - * in the Up state should be booked or dispatched. - * - * @param host - * @return - */ - boolean isHostUp(HostInterface host); - - /** - * Return true if the proc is an orphan. An orphan has not - * had a ping in 5 minutes. - * - * @param proc - * @return - */ - boolean isOprhan(ProcInterface proc); - - /** - * Return the number of stranded cores on the host. - */ - int getStrandedCoreUnits(HostInterface h); - - /** - * Return the number of stranded cores on the host. - */ - int getStrandedGpuUnits(HostInterface h); - - /** - * Return true of the host prefers a particular show. - * - * @param host - * @return - */ - boolean isPreferShow(HostInterface host); - - /** - * Return a host's preferred show. - * - * @param host - * @return - */ - ShowInterface getPreferredShow(HostInterface host); - - /** - * Return all running procs for the given host. - * - * @param host - * @return - */ - List findVirtualProcs(HostInterface host); - - /** - * Return all running procs for the given LocalHostAssignment. - * - * @param l - * @return - */ - List findVirtualProcs(LocalHostAssignment l); - - /** - * Set the hosts available idle cores and memory. - * - * @param host - * @param report - */ - void setHostResources(DispatchHost host, HostReport report); + void rebootWhenIdle(HostInterface host); + + void rebootNow(HostInterface host); + + /** + * Lock/unlock the specified host. + * + * @param host + * @param state + * @param source + */ + void setHostLock(HostInterface host, LockState state, Source source); + + /** + * Updates the state of a host. + * + * @param host HostInterface + * @param state HardwareState + */ + void setHostState(HostInterface host, HardwareState state); + + /** + * Updates the free temporary directory (mcp) of a host. + * + * @param host HostInterface + * @param freeTempDir Long + */ + void setHostFreeTempDir(HostInterface host, Long freeTempDir); + + DispatchHost createHost(HostReport report); + + DispatchHost createHost(RenderHost host); + + /** + * Create a host and move it into the specified allocation. + * + * @param rhost + * @param alloc + * @return + */ + DispatchHost createHost(RenderHost rhost, AllocationEntity alloc); + + HostInterface getHost(String id); + + HostInterface findHost(String name); + + DispatchHost getDispatchHost(String id); + + DispatchHost findDispatchHost(String name); + + HostEntity getHostDetail(HostInterface host); + + HostEntity getHostDetail(String id); + + HostEntity findHostDetail(String name); + + /** + * Returns true of the LockState is not Open. + * + * @param host + * @return + */ + boolean isLocked(HostInterface host); + + /** + * Set all host statistics. + * + * @param host + * @param totalMemory + * @param freeMemory + * @param totalSwap + * @param freeSwap + * @param totalMcp + * @param freeMcp + * @param totalGpuMemory + * @param freeGpuMemory + * @param load + * @param bootTime + * @param os + */ + void setHostStatistics(HostInterface host, long totalMemory, long freeMemory, long totalSwap, + long freeSwap, long totalMcp, long freeMcp, long totalGpuMemory, long freeGpuMemory, int load, + Timestamp bootTime, String os); + + void deleteHost(HostInterface host); + + AllocationInterface getDefaultAllocationDetail(); + + void setAllocation(HostInterface host, AllocationInterface alloc); + + void addTags(HostInterface host, String[] tags); + + void removeTags(HostInterface host, String[] tags); + + void renameTag(HostInterface host, String oldTag, String newTag); + + /** + * Verify that the given proc and frame IDs are assigned to each other in the database. + * + * @param procId + * @param frameId + * @return + */ + boolean verifyRunningProc(String procId, String frameId); + + /** + * Returns a list of VirtualProcs that match the specified criteria. + * + * @param r + * @return a list of VirtualProcs that match the criteria + */ + List findVirtualProcs(ProcSearchInterface r); + + List findVirtualProcs(FrameSearchInterface r); + + VirtualProc findVirtualProc(FrameInterface frame); + + List findVirtualProcs(HardwareState state); + + /** + * Returns a list of booked procs. When a proc is "booked", that means it plans on staying on the + * same job after it completes the current frame. If a proc is unbooked, it aways tries to find + * work to do on another job. + * + * @param r + * @return + */ + List findBookedVirtualProcs(ProcSearchInterface r); + + void unbookVirtualProcs(List procs); + + void unbookProc(ProcInterface proc); + + /** + * Return the Virtual proc with the specified unique ID. + * + * @param id + * @return + */ + VirtualProc getVirtualProc(String id); + + /** + * Return true if the given host is in the Up state. Other states are Down, Rebooting, + * RebootWhenIdle, etc. Only hosts in the Up state should be booked or dispatched. + * + * @param host + * @return + */ + boolean isHostUp(HostInterface host); + + /** + * Return true if the proc is an orphan. An orphan has not had a ping in 5 minutes. + * + * @param proc + * @return + */ + boolean isOprhan(ProcInterface proc); + + /** + * Return the number of stranded cores on the host. + */ + int getStrandedCoreUnits(HostInterface h); + + /** + * Return the number of stranded cores on the host. + */ + int getStrandedGpuUnits(HostInterface h); + + /** + * Return true of the host prefers a particular show. + * + * @param host + * @return + */ + boolean isPreferShow(HostInterface host); + + /** + * Return a host's preferred show. + * + * @param host + * @return + */ + ShowInterface getPreferredShow(HostInterface host); + + /** + * Return all running procs for the given host. + * + * @param host + * @return + */ + List findVirtualProcs(HostInterface host); + + /** + * Return all running procs for the given LocalHostAssignment. + * + * @param l + * @return + */ + List findVirtualProcs(LocalHostAssignment l); + + /** + * Set the hosts available idle cores and memory. + * + * @param host + * @param report + */ + void setHostResources(DispatchHost host, HostReport report); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/HostManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/HostManagerService.java index 6abb08090..8f5ce90d5 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/HostManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/HostManagerService.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.sql.Timestamp; @@ -58,367 +54,357 @@ @Transactional public class HostManagerService implements HostManager { - private static final Logger logger = LogManager.getLogger(HostManagerService.class); - - private HostDao hostDao; - private RqdClient rqdClient; - private ProcDao procDao; - private ShowDao showDao; - private FacilityDao facilityDao; - private SubscriptionDao subscriptionDao; - private AllocationDao allocationDao; - - public HostManagerService() { } - - @Override - public void setHostLock(HostInterface host, LockState lock, Source source) { - hostDao.updateHostLock(host, lock, source); - rqdClient.setHostLock(host, lock); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isLocked(HostInterface host) { - return hostDao.isHostLocked(host); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isHostUp(HostInterface host) { - return hostDao.isHostUp(host); - } - - @Override - public void setHostState(HostInterface host, HardwareState state) { - hostDao.updateHostState(host, state); - } - - @Override - public void setHostFreeTempDir(HostInterface host, Long freeTempDir) { - hostDao.updateHostFreeTempDir(host, freeTempDir); - } - - public void rebootWhenIdle(HostInterface host) { - try { - hostDao.updateHostState(host, HardwareState.REBOOT_WHEN_IDLE); - rqdClient.rebootWhenIdle(host); - } - catch (RqdClientException e) { - logger.info("failed to contact host: " + host.getName() + " for reboot"); - } - } - - public void rebootNow(HostInterface host) { + private static final Logger logger = LogManager.getLogger(HostManagerService.class); + + private HostDao hostDao; + private RqdClient rqdClient; + private ProcDao procDao; + private ShowDao showDao; + private FacilityDao facilityDao; + private SubscriptionDao subscriptionDao; + private AllocationDao allocationDao; + + public HostManagerService() {} + + @Override + public void setHostLock(HostInterface host, LockState lock, Source source) { + hostDao.updateHostLock(host, lock, source); + rqdClient.setHostLock(host, lock); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isLocked(HostInterface host) { + return hostDao.isHostLocked(host); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isHostUp(HostInterface host) { + return hostDao.isHostUp(host); + } + + @Override + public void setHostState(HostInterface host, HardwareState state) { + hostDao.updateHostState(host, state); + } + + @Override + public void setHostFreeTempDir(HostInterface host, Long freeTempDir) { + hostDao.updateHostFreeTempDir(host, freeTempDir); + } + + public void rebootWhenIdle(HostInterface host) { + try { + hostDao.updateHostState(host, HardwareState.REBOOT_WHEN_IDLE); + rqdClient.rebootWhenIdle(host); + } catch (RqdClientException e) { + logger.info("failed to contact host: " + host.getName() + " for reboot"); + } + } + + public void rebootNow(HostInterface host) { + try { + hostDao.updateHostState(host, HardwareState.REBOOTING); + rqdClient.rebootNow(host); + } catch (RqdClientException e) { + logger.info("failed to contact host: " + host.getName() + " for reboot"); + hostDao.updateHostState(host, HardwareState.DOWN); + } + } + + @Override + public void setHostStatistics(HostInterface host, long totalMemory, long freeMemory, + long totalSwap, long freeSwap, long totalMcp, long freeMcp, long totalGpuMemory, + long freeGpuMemory, int load, Timestamp bootTime, String os) { + + hostDao.updateHostStats(host, totalMemory, freeMemory, totalSwap, freeSwap, totalMcp, freeMcp, + totalGpuMemory, freeGpuMemory, load, bootTime, os); + } + + @Transactional(propagation = Propagation.SUPPORTS, readOnly = true) + public HostInterface findHost(String name) { + return hostDao.findHost(name); + } + + @Transactional(propagation = Propagation.SUPPORTS, readOnly = true) + public HostInterface getHost(String id) { + return hostDao.getHost(id); + } + + @Transactional(propagation = Propagation.REQUIRED) + public DispatchHost createHost(HostReport report) { + return createHost(report.getHost()); + } + + @Transactional(propagation = Propagation.REQUIRED) + public DispatchHost createHost(RenderHost rhost) { + // Find suitable allocation with facility and tags. + AllocationEntity alloc = null; + if (rhost.getTagsCount() > 0) { + String facility = rhost.getFacility(); + for (String tag : rhost.getTagsList()) { try { - hostDao.updateHostState(host, HardwareState.REBOOTING); - rqdClient.rebootNow(host); - } - catch (RqdClientException e) { - logger.info("failed to contact host: " + host.getName() + " for reboot"); - hostDao.updateHostState(host, HardwareState.DOWN); - } - } - - @Override - public void setHostStatistics(HostInterface host, - long totalMemory, long freeMemory, - long totalSwap, long freeSwap, - long totalMcp, long freeMcp, - long totalGpuMemory, long freeGpuMemory, - int load, Timestamp bootTime, - String os) { - - hostDao.updateHostStats(host, - totalMemory, freeMemory, - totalSwap, freeSwap, - totalMcp, freeMcp, - totalGpuMemory, freeGpuMemory, - load, bootTime, os); - } - - @Transactional(propagation = Propagation.SUPPORTS, readOnly=true) - public HostInterface findHost(String name) { - return hostDao.findHost(name); - } - - @Transactional(propagation = Propagation.SUPPORTS, readOnly=true) - public HostInterface getHost(String id) { - return hostDao.getHost(id); - } - - @Transactional(propagation = Propagation.REQUIRED) - public DispatchHost createHost(HostReport report) { - return createHost(report.getHost()); - } - - @Transactional(propagation = Propagation.REQUIRED) - public DispatchHost createHost(RenderHost rhost) { - // Find suitable allocation with facility and tags. - AllocationEntity alloc = null; - if (rhost.getTagsCount() > 0) { - String facility = rhost.getFacility(); - for (String tag : rhost.getTagsList()) { - try { - alloc = allocationDao.findAllocationEntity(facility, tag); - logger.info("set " + rhost.getName() + - " to the given allocation " + alloc.getName()); - break; - } - catch (EmptyResultDataAccessException e) { - // Allocation doesn't exist. ignore. - } - } - } - if (alloc == null) { - alloc = getDefaultAllocationDetail(); - logger.info("set " + rhost.getName() + - " to the default allocation " + alloc.getName()); - } - return createHost(rhost, alloc); - } - - @Transactional(propagation = Propagation.REQUIRED) - public DispatchHost createHost(RenderHost rhost, AllocationEntity alloc) { - - hostDao.insertRenderHost(rhost, alloc, false); - DispatchHost host = hostDao.findDispatchHost(rhost.getName()); - - hostDao.tagHost(host, alloc.tag, HostTagType.ALLOC); - hostDao.tagHost(host, host.name, HostTagType.HOSTNAME); - - if (rhost.getTagsCount() > 0) { - for (String tag : rhost.getTagsList()) { - hostDao.tagHost(host, tag, HostTagType.MANUAL); - } - } - - // Don't tag anything with hardware yet, we don't watch new procs - // that report in to automatically start running frames. - - hostDao.recalcuateTags(host.id); - return host; - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public DispatchHost findDispatchHost(String name) { - return hostDao.findDispatchHost(name); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public HostEntity findHostDetail(String name) { - return hostDao.findHostDetail(name); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public DispatchHost getDispatchHost(String id) { - return hostDao.getDispatchHost(id); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public HostEntity getHostDetail(HostInterface host) { - return hostDao.getHostDetail(host); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public HostEntity getHostDetail(String id) { - return hostDao.getHostDetail(id); - } - - @Transactional(propagation = Propagation.SUPPORTS) - public AllocationEntity getDefaultAllocationDetail() { - return allocationDao.getDefaultAllocationEntity(); - } - - public void addTags(HostInterface host, String[] tags) { - for (String tag : tags) { - if (tag == null) { continue; } - if (tag.length() == 0) { continue; } - hostDao.tagHost(host, tag, HostTagType.MANUAL); - } - hostDao.recalcuateTags(host.getHostId()); - } - - public void removeTags(HostInterface host, String[] tags) { - for (String tag: tags) { - hostDao.removeTag(host, tag); - } - hostDao.recalcuateTags(host.getHostId()); - } - - public void renameTag(HostInterface host, String oldTag, String newTag) { - hostDao.renameTag(host, oldTag, newTag); - hostDao.recalcuateTags(host.getHostId()); - } - - public void setAllocation(HostInterface host, AllocationInterface alloc) { - - if (procDao.findVirtualProcs(host).size() > 0) { - throw new EntityModificationError("You cannot move hosts with " + - "running procs between allocations."); - } - - hostDao.lockForUpdate(host); - hostDao.updateHostSetAllocation(host, alloc); - hostDao.recalcuateTags(host.getHostId()); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public int getStrandedCoreUnits(HostInterface h) { - return hostDao.getStrandedCoreUnits(h); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public int getStrandedGpuUnits(HostInterface h) { - return hostDao.getStrandedGpus(h); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean verifyRunningProc(String procId, String frameId) { - return procDao.verifyRunningProc(procId, frameId); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public List findVirtualProcs(FrameSearchInterface request) { - return procDao.findVirtualProcs(request); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public VirtualProc findVirtualProc(FrameInterface frame) { - return procDao.findVirtualProc(frame); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public List findVirtualProcs(HardwareState state) { - return procDao.findVirtualProcs(state); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public List findVirtualProcs(LocalHostAssignment l) { - return procDao.findVirtualProcs(l); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public List findVirtualProcs(ProcSearchInterface r) { - return procDao.findVirtualProcs(r); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public List findVirtualProcs(HostInterface host) { - return procDao.findVirtualProcs(host); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public List findBookedVirtualProcs(ProcSearchInterface r) { - return procDao.findBookedVirtualProcs(r); - } - - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void unbookVirtualProcs(List procs) { - for (VirtualProc proc: procs) { - unbookProc(proc); + alloc = allocationDao.findAllocationEntity(facility, tag); + logger.info("set " + rhost.getName() + " to the given allocation " + alloc.getName()); + break; + } catch (EmptyResultDataAccessException e) { + // Allocation doesn't exist. ignore. } + } } - - @Transactional(propagation = Propagation.REQUIRED) - public void unbookProc(ProcInterface proc) { - procDao.unbookProc(proc); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void setHostResources(DispatchHost host, HostReport report) { - hostDao.updateHostResources(host, report); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public VirtualProc getVirtualProc(String id) { - return procDao.getVirtualProc(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isOprhan(ProcInterface proc) { - return procDao.isOrphan(proc); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isPreferShow(HostInterface host) { - return hostDao.isPreferShow(host); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public ShowInterface getPreferredShow(HostInterface host) { - return showDao.getShowDetail(host); - } - - public void deleteHost(HostInterface host) { - hostDao.deleteHost(host); - } - - public AllocationDao getAllocationDao() { - return allocationDao; - } - - public void setAllocationDao(AllocationDao allocationDao) { - this.allocationDao = allocationDao; - } - - public HostDao getHostDao() { - return hostDao; - } - - public void setHostDao(HostDao hostDao) { - this.hostDao = hostDao; - } - - public ProcDao getProcDao() { - return procDao; - } - - public void setProcDao(ProcDao procDao) { - this.procDao = procDao; - } - - public RqdClient getRqdClient() { - return rqdClient; - } - - public void setRqdClient(RqdClient rqdClient) { - this.rqdClient = rqdClient; - } - - public FacilityDao getFacilityDao() { - return facilityDao; - } - - public void setFacilityDao(FacilityDao facilityDao) { - this.facilityDao = facilityDao; - } - - public ShowDao getShowDao() { - return showDao; - } - - public void setShowDao(ShowDao showDao) { - this.showDao = showDao; - } - - public SubscriptionDao getSubscriptionDao() { - return subscriptionDao; - } - - public void setSubscriptionDao(SubscriptionDao subscriptionDao) { - this.subscriptionDao = subscriptionDao; + if (alloc == null) { + alloc = getDefaultAllocationDetail(); + logger.info("set " + rhost.getName() + " to the default allocation " + alloc.getName()); } + return createHost(rhost, alloc); + } + + @Transactional(propagation = Propagation.REQUIRED) + public DispatchHost createHost(RenderHost rhost, AllocationEntity alloc) { + + hostDao.insertRenderHost(rhost, alloc, false); + DispatchHost host = hostDao.findDispatchHost(rhost.getName()); + + hostDao.tagHost(host, alloc.tag, HostTagType.ALLOC); + hostDao.tagHost(host, host.name, HostTagType.HOSTNAME); + + if (rhost.getTagsCount() > 0) { + for (String tag : rhost.getTagsList()) { + hostDao.tagHost(host, tag, HostTagType.MANUAL); + } + } + + // Don't tag anything with hardware yet, we don't watch new procs + // that report in to automatically start running frames. + + hostDao.recalcuateTags(host.id); + return host; + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public DispatchHost findDispatchHost(String name) { + return hostDao.findDispatchHost(name); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public HostEntity findHostDetail(String name) { + return hostDao.findHostDetail(name); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public DispatchHost getDispatchHost(String id) { + return hostDao.getDispatchHost(id); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public HostEntity getHostDetail(HostInterface host) { + return hostDao.getHostDetail(host); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public HostEntity getHostDetail(String id) { + return hostDao.getHostDetail(id); + } + + @Transactional(propagation = Propagation.SUPPORTS) + public AllocationEntity getDefaultAllocationDetail() { + return allocationDao.getDefaultAllocationEntity(); + } + + public void addTags(HostInterface host, String[] tags) { + for (String tag : tags) { + if (tag == null) { + continue; + } + if (tag.length() == 0) { + continue; + } + hostDao.tagHost(host, tag, HostTagType.MANUAL); + } + hostDao.recalcuateTags(host.getHostId()); + } + + public void removeTags(HostInterface host, String[] tags) { + for (String tag : tags) { + hostDao.removeTag(host, tag); + } + hostDao.recalcuateTags(host.getHostId()); + } + + public void renameTag(HostInterface host, String oldTag, String newTag) { + hostDao.renameTag(host, oldTag, newTag); + hostDao.recalcuateTags(host.getHostId()); + } + + public void setAllocation(HostInterface host, AllocationInterface alloc) { + + if (procDao.findVirtualProcs(host).size() > 0) { + throw new EntityModificationError( + "You cannot move hosts with " + "running procs between allocations."); + } + + hostDao.lockForUpdate(host); + hostDao.updateHostSetAllocation(host, alloc); + hostDao.recalcuateTags(host.getHostId()); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public int getStrandedCoreUnits(HostInterface h) { + return hostDao.getStrandedCoreUnits(h); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public int getStrandedGpuUnits(HostInterface h) { + return hostDao.getStrandedGpus(h); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean verifyRunningProc(String procId, String frameId) { + return procDao.verifyRunningProc(procId, frameId); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List findVirtualProcs(FrameSearchInterface request) { + return procDao.findVirtualProcs(request); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public VirtualProc findVirtualProc(FrameInterface frame) { + return procDao.findVirtualProc(frame); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List findVirtualProcs(HardwareState state) { + return procDao.findVirtualProcs(state); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List findVirtualProcs(LocalHostAssignment l) { + return procDao.findVirtualProcs(l); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List findVirtualProcs(ProcSearchInterface r) { + return procDao.findVirtualProcs(r); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List findVirtualProcs(HostInterface host) { + return procDao.findVirtualProcs(host); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List findBookedVirtualProcs(ProcSearchInterface r) { + return procDao.findBookedVirtualProcs(r); + } + + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void unbookVirtualProcs(List procs) { + for (VirtualProc proc : procs) { + unbookProc(proc); + } + } + + @Transactional(propagation = Propagation.REQUIRED) + public void unbookProc(ProcInterface proc) { + procDao.unbookProc(proc); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void setHostResources(DispatchHost host, HostReport report) { + hostDao.updateHostResources(host, report); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public VirtualProc getVirtualProc(String id) { + return procDao.getVirtualProc(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isOprhan(ProcInterface proc) { + return procDao.isOrphan(proc); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isPreferShow(HostInterface host) { + return hostDao.isPreferShow(host); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public ShowInterface getPreferredShow(HostInterface host) { + return showDao.getShowDetail(host); + } + + public void deleteHost(HostInterface host) { + hostDao.deleteHost(host); + } + + public AllocationDao getAllocationDao() { + return allocationDao; + } + + public void setAllocationDao(AllocationDao allocationDao) { + this.allocationDao = allocationDao; + } + + public HostDao getHostDao() { + return hostDao; + } + + public void setHostDao(HostDao hostDao) { + this.hostDao = hostDao; + } + + public ProcDao getProcDao() { + return procDao; + } + + public void setProcDao(ProcDao procDao) { + this.procDao = procDao; + } + + public RqdClient getRqdClient() { + return rqdClient; + } + + public void setRqdClient(RqdClient rqdClient) { + this.rqdClient = rqdClient; + } + + public FacilityDao getFacilityDao() { + return facilityDao; + } + + public void setFacilityDao(FacilityDao facilityDao) { + this.facilityDao = facilityDao; + } + + public ShowDao getShowDao() { + return showDao; + } + + public void setShowDao(ShowDao showDao) { + this.showDao = showDao; + } + + public SubscriptionDao getSubscriptionDao() { + return subscriptionDao; + } + + public void setSubscriptionDao(SubscriptionDao subscriptionDao) { + this.subscriptionDao = subscriptionDao; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/JmsMover.java b/cuebot/src/main/java/com/imageworks/spcue/service/JmsMover.java index ce231331b..1befa2aed 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/JmsMover.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/JmsMover.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.util.concurrent.LinkedBlockingQueue; @@ -40,68 +36,64 @@ import com.imageworks.spcue.util.CueExceptionUtil; public class JmsMover extends ThreadPoolExecutor { - private static final Logger logger = LogManager.getLogger(JmsMover.class); - private final Gson gson = new GsonBuilder().serializeNulls().create(); - - @Autowired - private Environment env; - private JmsTemplate template; - private Topic topic; - - private static final int THREAD_POOL_SIZE_INITIAL = 1; - private static final int THREAD_POOL_SIZE_MAX = 1; - private static final int QUEUE_SIZE_INITIAL = 1000; - - public JmsMover() { - super(THREAD_POOL_SIZE_INITIAL, THREAD_POOL_SIZE_MAX, 10 , TimeUnit.SECONDS, - new LinkedBlockingQueue(QUEUE_SIZE_INITIAL)); - } - - public void send(Object m) { - if (env.getRequiredProperty("messaging.enabled", Boolean.class)) { + private static final Logger logger = LogManager.getLogger(JmsMover.class); + private final Gson gson = new GsonBuilder().serializeNulls().create(); + + @Autowired + private Environment env; + private JmsTemplate template; + private Topic topic; + + private static final int THREAD_POOL_SIZE_INITIAL = 1; + private static final int THREAD_POOL_SIZE_MAX = 1; + private static final int QUEUE_SIZE_INITIAL = 1000; + + public JmsMover() { + super(THREAD_POOL_SIZE_INITIAL, THREAD_POOL_SIZE_MAX, 10, TimeUnit.SECONDS, + new LinkedBlockingQueue(QUEUE_SIZE_INITIAL)); + } + + public void send(Object m) { + if (env.getRequiredProperty("messaging.enabled", Boolean.class)) { + try { + execute(new Runnable() { + @Override + public void run() { try { - execute(new Runnable() { - @Override - public void run() { - try { - template.send(topic, new MessageCreator() { - @Override - public Message createMessage(Session session) - throws javax.jms.JMSException { - return session.createTextMessage(gson.toJson(m)); - } - }); - } catch (JmsException e) { - logger.warn("Failed to send JMS message"); - CueExceptionUtil.logStackTrace( - "JmsProducer " + this.getClass().toString() + - " caught error ", e); - } - } - }); - } catch (RejectedExecutionException e) { - logger.warn("Outgoing JMS message queue is full!"); - CueExceptionUtil.logStackTrace( - "JmsProducer " + this.getClass().toString() + - " caught error ", e); + template.send(topic, new MessageCreator() { + @Override + public Message createMessage(Session session) throws javax.jms.JMSException { + return session.createTextMessage(gson.toJson(m)); + } + }); + } catch (JmsException e) { + logger.warn("Failed to send JMS message"); + CueExceptionUtil + .logStackTrace("JmsProducer " + this.getClass().toString() + " caught error ", e); } - } + } + }); + } catch (RejectedExecutionException e) { + logger.warn("Outgoing JMS message queue is full!"); + CueExceptionUtil + .logStackTrace("JmsProducer " + this.getClass().toString() + " caught error ", e); + } } + } - public JmsTemplate getTemplate() { - return template; - } + public JmsTemplate getTemplate() { + return template; + } - public void setTemplate(JmsTemplate template) { - this.template = template; - } + public void setTemplate(JmsTemplate template) { + this.template = template; + } - public Topic getTopic() { - return topic; - } + public Topic getTopic() { + return topic; + } - public void setTopic(Topic topic) { - this.topic = topic; - } + public void setTopic(Topic topic) { + this.topic = topic; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/JobLauncher.java b/cuebot/src/main/java/com/imageworks/spcue/service/JobLauncher.java index f46616115..7b2f09016 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/JobLauncher.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/JobLauncher.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.io.File; @@ -44,197 +40,188 @@ * Job launching functions. */ public class JobLauncher implements ApplicationContextAware { - private static final Logger logger = LogManager.getLogger(JobLauncher.class); - private ApplicationContext context; - - private JobManager jobManager; - private DepartmentManager departmentManager; - private AdminManager adminManager; - private ThreadPoolTaskExecutor launchQueue; - private EmailSupport emailSupport; - private JmsMover jmsMover; - private LocalBookingSupport localBookingSupport; - - /** - * When true, disables log path creation and - * proc points sync. - */ - public volatile boolean testMode = false; - - @Override - public void setApplicationContext(ApplicationContext context) - throws BeansException { - this.context = context; - } - - public JobSpec parse(String xml) { - JobSpec spec = (JobSpec) this.context.getBean("jobSpec"); - spec.parse(xml); - return spec; - } - - public JobSpec parse(File file) { - JobSpec spec = (JobSpec) this.context.getBean("jobSpec"); - spec.parse(file); - return spec; - } - - public void launch(String xml) { - JobSpec spec = (JobSpec) this.context.getBean("jobSpec"); - spec.parse(xml); - launch(spec); - } - - public void launch(File file) { - JobSpec spec = (JobSpec) this.context.getBean("jobSpec"); - spec.parse(file); - launch(spec); - } - - public void launch(final JobSpec spec) { - - verifyJobSpec(spec); - - try { - jobManager.launchJobSpec(spec); - - for (BuildableJob job: spec.getJobs()) { - /* - * If isLocal is set, need to create local host assignment. - */ - JobDetail d = job.detail; - if (d.isLocal) { - logger.info(d.localHostName + " will do local dispatch. " + d.getJobId() + " " + d.localHostName); - LocalHostAssignment lha = new LocalHostAssignment(); - lha.setJobId(d.getJobId()); - lha.setThreads(d.localThreadNumber); - lha.setMaxCoreUnits(d.localMaxCores * 100); - lha.setMaxMemory(d.localMaxMemory); - lha.setMaxGpuUnits(d.localMaxGpus); - lha.setMaxGpuMemory(d.localMaxGpuMemory); - lha.setType(RenderPartitionType.JOB_PARTITION); - - try { - localBookingSupport.bookLocal(d, d.localHostName, d.user, lha); - } - catch (DataIntegrityViolationException e) { - logger.info(d.name + " failed to create host local assignment."); - } - } - } - - /* - * This has to happen outside of the job launching transaction - * or else it can lock up booking because it updates the - * job_resource table. It can take quite some time to launch - * a job with dependencies, so the transaction should not - * touch any rows that are currently in the "live" data set. - */ - if (!testMode) { - Set depts = new HashSet(); - for (BuildableJob job: spec.getJobs()) { - JobDetail d = jobManager.getJobDetail(job.detail.id); - jmsMover.send(d); - if (departmentManager.isManaged(d)) { - if (!depts.contains(d.deptId)) { - departmentManager.syncJobsWithTask(d); - depts.add(d.deptId); - } - } - } - } - } catch (Exception e) { - // Catch anything and email the user a report as to - // why the job launch failed. - emailSupport.reportLaunchError(spec, e); + private static final Logger logger = LogManager.getLogger(JobLauncher.class); + private ApplicationContext context; + + private JobManager jobManager; + private DepartmentManager departmentManager; + private AdminManager adminManager; + private ThreadPoolTaskExecutor launchQueue; + private EmailSupport emailSupport; + private JmsMover jmsMover; + private LocalBookingSupport localBookingSupport; + + /** + * When true, disables log path creation and proc points sync. + */ + public volatile boolean testMode = false; + + @Override + public void setApplicationContext(ApplicationContext context) throws BeansException { + this.context = context; + } + + public JobSpec parse(String xml) { + JobSpec spec = (JobSpec) this.context.getBean("jobSpec"); + spec.parse(xml); + return spec; + } + + public JobSpec parse(File file) { + JobSpec spec = (JobSpec) this.context.getBean("jobSpec"); + spec.parse(file); + return spec; + } + + public void launch(String xml) { + JobSpec spec = (JobSpec) this.context.getBean("jobSpec"); + spec.parse(xml); + launch(spec); + } + + public void launch(File file) { + JobSpec spec = (JobSpec) this.context.getBean("jobSpec"); + spec.parse(file); + launch(spec); + } + + public void launch(final JobSpec spec) { + + verifyJobSpec(spec); + + try { + jobManager.launchJobSpec(spec); + + for (BuildableJob job : spec.getJobs()) { + /* + * If isLocal is set, need to create local host assignment. + */ + JobDetail d = job.detail; + if (d.isLocal) { + logger.info( + d.localHostName + " will do local dispatch. " + d.getJobId() + " " + d.localHostName); + LocalHostAssignment lha = new LocalHostAssignment(); + lha.setJobId(d.getJobId()); + lha.setThreads(d.localThreadNumber); + lha.setMaxCoreUnits(d.localMaxCores * 100); + lha.setMaxMemory(d.localMaxMemory); + lha.setMaxGpuUnits(d.localMaxGpus); + lha.setMaxGpuMemory(d.localMaxGpuMemory); + lha.setType(RenderPartitionType.JOB_PARTITION); + + try { + localBookingSupport.bookLocal(d, d.localHostName, d.user, lha); + } catch (DataIntegrityViolationException e) { + logger.info(d.name + " failed to create host local assignment."); + } } - } - - public void verifyJobSpec(JobSpec spec) { - - for (BuildableJob job: spec.getJobs()) { - if (jobManager.isJobPending(job.detail.name)) { - throw new EntityCreationError("The job " + job.detail.name - + " is already pending"); - } - } - - try { - ShowEntity s = adminManager.findShowEntity(spec.getShow()); - if (!s.active) { - throw new EntityCreationError("The " + spec.getShow() + - " show has been deactivated. Please contact " + - "administrator of your OpenCue deployment to reactivate " + - "this show."); + } + + /* + * This has to happen outside of the job launching transaction or else it can lock up booking + * because it updates the job_resource table. It can take quite some time to launch a job with + * dependencies, so the transaction should not touch any rows that are currently in the "live" + * data set. + */ + if (!testMode) { + Set depts = new HashSet(); + for (BuildableJob job : spec.getJobs()) { + JobDetail d = jobManager.getJobDetail(job.detail.id); + jmsMover.send(d); + if (departmentManager.isManaged(d)) { + if (!depts.contains(d.deptId)) { + departmentManager.syncJobsWithTask(d); + depts.add(d.deptId); } + } } - catch (EmptyResultDataAccessException e) { - throw new EntityCreationError("The " + spec.getShow() + - " does not exist. Please contact " + - "administrator of your OpenCue deployment to have this show " + - "created."); - } - } - - public void queueAndLaunch(final JobSpec spec) { - verifyJobSpec(spec); - launchQueue.execute(new DispatchLaunchJob(spec, this)); - } - - public EmailSupport getEmailSupport() { - return emailSupport; - } - - public void setEmailSupport(EmailSupport emailSupport) { - this.emailSupport = emailSupport; - } - - public JobManager getJobManager() { - return jobManager; - } - - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } - - public DepartmentManager getDepartmentManager() { - return departmentManager; - } - - public void setDepartmentManager(DepartmentManager departmentManager) { - this.departmentManager = departmentManager; - } - - public AdminManager getAdminManager() { - return adminManager; - } - - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } - - public ThreadPoolTaskExecutor getLaunchQueue() { - return launchQueue; - } - - public void setLaunchQueue(ThreadPoolTaskExecutor launchQueue) { - this.launchQueue = launchQueue; - } - - public JmsMover getJmsMover() { - return jmsMover; - } - - public void setJmsMover(JmsMover jmsMover) { - this.jmsMover = jmsMover; - } - - public LocalBookingSupport getLocalBookingSupport() { - return localBookingSupport; - } - - public void setLocalBookingSupport(LocalBookingSupport localBookingSupport) { - this.localBookingSupport = localBookingSupport; - } + } + } catch (Exception e) { + // Catch anything and email the user a report as to + // why the job launch failed. + emailSupport.reportLaunchError(spec, e); + } + } + + public void verifyJobSpec(JobSpec spec) { + + for (BuildableJob job : spec.getJobs()) { + if (jobManager.isJobPending(job.detail.name)) { + throw new EntityCreationError("The job " + job.detail.name + " is already pending"); + } + } + + try { + ShowEntity s = adminManager.findShowEntity(spec.getShow()); + if (!s.active) { + throw new EntityCreationError( + "The " + spec.getShow() + " show has been deactivated. Please contact " + + "administrator of your OpenCue deployment to reactivate " + "this show."); + } + } catch (EmptyResultDataAccessException e) { + throw new EntityCreationError("The " + spec.getShow() + " does not exist. Please contact " + + "administrator of your OpenCue deployment to have this show " + "created."); + } + } + + public void queueAndLaunch(final JobSpec spec) { + verifyJobSpec(spec); + launchQueue.execute(new DispatchLaunchJob(spec, this)); + } + + public EmailSupport getEmailSupport() { + return emailSupport; + } + + public void setEmailSupport(EmailSupport emailSupport) { + this.emailSupport = emailSupport; + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public DepartmentManager getDepartmentManager() { + return departmentManager; + } + + public void setDepartmentManager(DepartmentManager departmentManager) { + this.departmentManager = departmentManager; + } + + public AdminManager getAdminManager() { + return adminManager; + } + + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } + + public ThreadPoolTaskExecutor getLaunchQueue() { + return launchQueue; + } + + public void setLaunchQueue(ThreadPoolTaskExecutor launchQueue) { + this.launchQueue = launchQueue; + } + + public JmsMover getJmsMover() { + return jmsMover; + } + + public void setJmsMover(JmsMover jmsMover) { + this.jmsMover = jmsMover; + } + + public LocalBookingSupport getLocalBookingSupport() { + return localBookingSupport; + } + + public void setLocalBookingSupport(LocalBookingSupport localBookingSupport) { + this.localBookingSupport = localBookingSupport; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/JobManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/JobManager.java index 0000ca454..077a08638 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/JobManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/JobManager.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.util.List; @@ -42,469 +38,456 @@ import com.imageworks.spcue.util.FrameSet; /** - * JobManager pretty much handles all job management functions. From launching - * killing jobs, to managing the layers, frames, etc within jobs. + * JobManager pretty much handles all job management functions. From launching killing jobs, to + * managing the layers, frames, etc within jobs. */ public interface JobManager { - /** - * Pause/unpause a job - * - * @param job - * @param paused - */ - void setJobPaused(JobInterface job, boolean paused); - - /** - * - * @param id - * @return - */ - public DispatchJob getDispatchJob(String id); - - /** - * - * @param id - * @return - */ - public DispatchFrame getDispatchFrame(String id); - - /** - * Returns true if there is a pending job with the specifed - * name on the cue. - * - * @param name - * @return - */ - boolean isJobPending(String name); - - /** - * Returns true if the job has no more frames that - * can possibly be dispatched. - * - * @return - */ - boolean isJobComplete(JobInterface job); - - /** - * Returns true if the layer is complete. - * - * @param layer - * @return - */ - boolean isLayerComplete(LayerInterface layer); - - /** - * Launches a job spec. - * - * @param spec - */ - void launchJobSpec(JobSpec spec); - - /** - * Creates a new job entry - * - * @param BuildableJob job - * @return JobDetail - */ - JobDetail createJob(BuildableJob job); - - /** - * Removes an existing job entry. The job must be in the - * Finished state before it can be removed. - * - * @param JobDetail job - */ - void removeJob(JobInterface job); - - /** - * Shutting down a job will signal RQD to kill all frames - * and drop all dependencies for specified job. Job is - * put into Shutdown state which should be commited - * before any other operations are done on the job. - * When shutdown is complete, the job shoud be marked Finished. - * - * @param JobDetail job - */ - boolean shutdownJob(JobInterface job); - - /** - * Finds and active job by name. - * - * @param String name - * @return JobDetail - */ - JobDetail findJobDetail(String name); - - /** - * Finds and active job by name. - * - * @param String name - * @return JobDetail - */ - JobInterface findJob(String name); - - /** - * Gets an active job by ID. - * - * @param String id - * @return JobDetail - */ - JobDetail getJobDetail(String id); - - /** - * Gets a job by unique id - * - * @param id - * @return - */ - JobInterface getJob(String id); - - /** - * - * @param id - * @return LayerDetail - */ - LayerDetail getLayerDetail(String id); - - /** - * Return a layer by its unique ID. - * - * @param id - * @return LayerDetail - */ - LayerInterface getLayer(String id); - - /** - * - * @param id - * @return FrameDetail - */ - FrameDetail getFrameDetail(String id); - - /** - * Return a frame with the given ID. - * - * @param id - * @return - */ - FrameInterface getFrame(String id); - - /** - * Marks a specific frame as waiting, setting its dependency - * count to 0 in the process even though it has active - * dependencies. - * - * @param frame - */ - public void markFrameAsWaiting(FrameInterface frame); - - /** - * Marks a specific frame as Depend if the frame has - * active dependencies. This will pretty much undo - * a markFrameAsWaiting. If the frame has no active - * depends this call should have no effect. - * - * @param frame - */ - public void markFrameAsDepend(FrameInterface frame); - - /** - * Return the result of the given FrameSearch. - * - * @param job - * @param r - * @return - */ - public List findFrames(FrameSearchInterface r); - - /** - * Updates specified frame to new state. - * - * @param frame - * @param state - */ - public void updateFrameState(FrameInterface frame, FrameState state); - - /** - * Updates specified job to new state. - * - * @param job - * @param state - */ - public void updateJobState(JobInterface job, JobState state); - - - /** - * Reorders the specified layer. - * - * @param job - * @param frameSet - */ - public void reorderLayer(LayerInterface layer, FrameSet frameSet, Order order); - - /** - * - * @param layer - * @param frameSet - */ - public void staggerLayer(LayerInterface layer, String range, int stagger); - - /** - * Returns all of the layers for the specified job - * - * @param job - * @return - */ - public List getLayers(JobInterface job); - - /** - * Returns all of the layers for the specified job - * - * @param job - * @return - */ - public List getLayerDetails(JobInterface job); - - /** - * Creates the job log directory. The JobDetail object - * must have the logDir property populated. - * - * @param newJob - */ - public void createJobLogDirectory(JobDetail newJob); - - /** - * Optimizes layer settings based on the specified maxRss - * and run time. - * - * @param layer - * @param maxRss - * @param runTime - */ - void optimizeLayer(LayerInterface layer, int cores, long maxRss, int runTime); - - /** - * Return true if the given job is booked greater than min cores. - * - * @param job - * @return - */ - boolean isOverMinCores(JobInterface job); - - /** - * Increase the layer memory requirement to given KB value. - * - * @param layer - * @param memKb - */ - void increaseLayerMemoryRequirement(LayerInterface layer, long memKb); - - /** - * Appends a tag to a layer's existing tags. - * - * @param layer - * @param tag - */ - void appendLayerTag(LayerInterface layer, String tag); - - /** - * Replace all existing tags with the specified tag. - * - * @param layer - * @param tag - */ - void setLayerTag(LayerInterface layer, String tag); - - /** - * Return true if the given layer is threadable. - * - * @param layer - * @return - */ - boolean isLayerThreadable(LayerInterface layer); - - /** - * Enable or disable the layer memory optimizer. - */ - void enableMemoryOptimizer(LayerInterface layer, boolean state); - - /** - * Return the frame for the given layer and frame number. - * - * @param layer - * @param number - * @return - */ - FrameInterface findFrame(LayerInterface layer, int number); - - /** - * - * @param job - * @return - */ - FrameDetail findLongestFrame(JobInterface job); - - /** - * - * @param job - * @return - */ - FrameDetail findShortestFrame(JobInterface job); - - /** - * - * @param job - * @return - */ - FrameStateTotals getFrameStateTotals(JobInterface job); - - /** - * - * @param job - * @return - */ - ExecutionSummary getExecutionSummary(JobInterface job); - - /** - * - * @param job - * @return - */ - FrameDetail findHighestMemoryFrame(JobInterface job); - - /** - * - * @param job - * @return - */ - FrameDetail findLowestMemoryFrame(JobInterface job); - - /** - * Return the frame state totals by layer. - * - * @param layer - * @return - */ - FrameStateTotals getFrameStateTotals(LayerInterface layer); - - /** - * Return the execution summary by layer. - * - * @param layer - * @return - */ - ExecutionSummary getExecutionSummary(LayerInterface layer); - - /** - * Update the checkpoint state for the given frame. - * - * @param frame - * @param state - */ - void updateCheckpointState(FrameInterface frame, CheckpointState state); - - /** - * Return a list of frames that failed to checkpoint within - * the given checkpoint point. - * - * @param cutoffTimeMs - * @return - */ - List getStaleCheckpoints(int cutoffTimeSec); - - /** - * Return a list of registered layer outputs. - * - * @param layer - * @return - */ - List getLayerOutputs(LayerInterface layer); - - /** - * Register layer output. - * - * @param layer - * @return - */ - void registerLayerOutput(LayerInterface layer, String filespec); - - /** - * Return thread stats for the given layer. - * - * @param layer - * @return - */ - List getThreadStats(LayerInterface layer); - - /** - * Update the max core value for the given layer. - * - * @param layer - * @param coreUnits - */ - void setLayerMaxCores(LayerInterface layer, int coreUnits); - - /** - * Update the min core value for the given layer. - * - * @param layer - * @param coreUnits - */ - void setLayerMinCores(LayerInterface layer, int coreUnits); - - /** - * Update the max gpu value for the given layer. - * - * @param layer - * @param gpuUnits - */ - void setLayerMaxGpus(LayerInterface layer, int gpuUnits); - - /** - * Update the min gpu value for the given layer. - * - * @param layer - * @param gpuUnits - */ - void setLayerMinGpus(LayerInterface layer, int gpuUnits); - - /** - * Add a limit to the given layer. - * - * @param layer - * @param limitId - */ - void addLayerLimit(LayerInterface layer, String limitId); - - /** - * Remove a limit from the given layer. - * - * @param layer - * @param limitId - */ - void dropLayerLimit(LayerInterface layer, String limitId); - - /** - * Return a list of limits for the given layer. - * - * @param layer - */ - List getLayerLimits(LayerInterface layer); - - /** - * Update email(s) of subscribers for job - * - * @param job - * @param email - */ - void updateEmail(JobInterface job, String email); - - /** - * Return a list of limits for the given layer. - * - * @param job - */ - String getEmail(JobInterface job); + /** + * Pause/unpause a job + * + * @param job + * @param paused + */ + void setJobPaused(JobInterface job, boolean paused); + + /** + * + * @param id + * @return + */ + public DispatchJob getDispatchJob(String id); + + /** + * + * @param id + * @return + */ + public DispatchFrame getDispatchFrame(String id); + + /** + * Returns true if there is a pending job with the specifed name on the cue. + * + * @param name + * @return + */ + boolean isJobPending(String name); + + /** + * Returns true if the job has no more frames that can possibly be dispatched. + * + * @return + */ + boolean isJobComplete(JobInterface job); + + /** + * Returns true if the layer is complete. + * + * @param layer + * @return + */ + boolean isLayerComplete(LayerInterface layer); + + /** + * Launches a job spec. + * + * @param spec + */ + void launchJobSpec(JobSpec spec); + + /** + * Creates a new job entry + * + * @param BuildableJob job + * @return JobDetail + */ + JobDetail createJob(BuildableJob job); + + /** + * Removes an existing job entry. The job must be in the Finished state before it can be removed. + * + * @param JobDetail job + */ + void removeJob(JobInterface job); + + /** + * Shutting down a job will signal RQD to kill all frames and drop all dependencies for specified + * job. Job is put into Shutdown state which should be commited before any other operations are + * done on the job. When shutdown is complete, the job shoud be marked Finished. + * + * @param JobDetail job + */ + boolean shutdownJob(JobInterface job); + + /** + * Finds and active job by name. + * + * @param String name + * @return JobDetail + */ + JobDetail findJobDetail(String name); + + /** + * Finds and active job by name. + * + * @param String name + * @return JobDetail + */ + JobInterface findJob(String name); + + /** + * Gets an active job by ID. + * + * @param String id + * @return JobDetail + */ + JobDetail getJobDetail(String id); + + /** + * Gets a job by unique id + * + * @param id + * @return + */ + JobInterface getJob(String id); + + /** + * + * @param id + * @return LayerDetail + */ + LayerDetail getLayerDetail(String id); + + /** + * Return a layer by its unique ID. + * + * @param id + * @return LayerDetail + */ + LayerInterface getLayer(String id); + + /** + * + * @param id + * @return FrameDetail + */ + FrameDetail getFrameDetail(String id); + + /** + * Return a frame with the given ID. + * + * @param id + * @return + */ + FrameInterface getFrame(String id); + + /** + * Marks a specific frame as waiting, setting its dependency count to 0 in the process even though + * it has active dependencies. + * + * @param frame + */ + public void markFrameAsWaiting(FrameInterface frame); + + /** + * Marks a specific frame as Depend if the frame has active dependencies. This will pretty much + * undo a markFrameAsWaiting. If the frame has no active depends this call should have no effect. + * + * @param frame + */ + public void markFrameAsDepend(FrameInterface frame); + + /** + * Return the result of the given FrameSearch. + * + * @param job + * @param r + * @return + */ + public List findFrames(FrameSearchInterface r); + + /** + * Updates specified frame to new state. + * + * @param frame + * @param state + */ + public void updateFrameState(FrameInterface frame, FrameState state); + + /** + * Updates specified job to new state. + * + * @param job + * @param state + */ + public void updateJobState(JobInterface job, JobState state); + + /** + * Reorders the specified layer. + * + * @param job + * @param frameSet + */ + public void reorderLayer(LayerInterface layer, FrameSet frameSet, Order order); + + /** + * + * @param layer + * @param frameSet + */ + public void staggerLayer(LayerInterface layer, String range, int stagger); + + /** + * Returns all of the layers for the specified job + * + * @param job + * @return + */ + public List getLayers(JobInterface job); + + /** + * Returns all of the layers for the specified job + * + * @param job + * @return + */ + public List getLayerDetails(JobInterface job); + + /** + * Creates the job log directory. The JobDetail object must have the logDir property populated. + * + * @param newJob + */ + public void createJobLogDirectory(JobDetail newJob); + + /** + * Optimizes layer settings based on the specified maxRss and run time. + * + * @param layer + * @param maxRss + * @param runTime + */ + void optimizeLayer(LayerInterface layer, int cores, long maxRss, int runTime); + + /** + * Return true if the given job is booked greater than min cores. + * + * @param job + * @return + */ + boolean isOverMinCores(JobInterface job); + + /** + * Increase the layer memory requirement to given KB value. + * + * @param layer + * @param memKb + */ + void increaseLayerMemoryRequirement(LayerInterface layer, long memKb); + + /** + * Appends a tag to a layer's existing tags. + * + * @param layer + * @param tag + */ + void appendLayerTag(LayerInterface layer, String tag); + + /** + * Replace all existing tags with the specified tag. + * + * @param layer + * @param tag + */ + void setLayerTag(LayerInterface layer, String tag); + + /** + * Return true if the given layer is threadable. + * + * @param layer + * @return + */ + boolean isLayerThreadable(LayerInterface layer); + + /** + * Enable or disable the layer memory optimizer. + */ + void enableMemoryOptimizer(LayerInterface layer, boolean state); + + /** + * Return the frame for the given layer and frame number. + * + * @param layer + * @param number + * @return + */ + FrameInterface findFrame(LayerInterface layer, int number); + + /** + * + * @param job + * @return + */ + FrameDetail findLongestFrame(JobInterface job); + + /** + * + * @param job + * @return + */ + FrameDetail findShortestFrame(JobInterface job); + + /** + * + * @param job + * @return + */ + FrameStateTotals getFrameStateTotals(JobInterface job); + + /** + * + * @param job + * @return + */ + ExecutionSummary getExecutionSummary(JobInterface job); + + /** + * + * @param job + * @return + */ + FrameDetail findHighestMemoryFrame(JobInterface job); + + /** + * + * @param job + * @return + */ + FrameDetail findLowestMemoryFrame(JobInterface job); + + /** + * Return the frame state totals by layer. + * + * @param layer + * @return + */ + FrameStateTotals getFrameStateTotals(LayerInterface layer); + + /** + * Return the execution summary by layer. + * + * @param layer + * @return + */ + ExecutionSummary getExecutionSummary(LayerInterface layer); + + /** + * Update the checkpoint state for the given frame. + * + * @param frame + * @param state + */ + void updateCheckpointState(FrameInterface frame, CheckpointState state); + + /** + * Return a list of frames that failed to checkpoint within the given checkpoint point. + * + * @param cutoffTimeMs + * @return + */ + List getStaleCheckpoints(int cutoffTimeSec); + + /** + * Return a list of registered layer outputs. + * + * @param layer + * @return + */ + List getLayerOutputs(LayerInterface layer); + + /** + * Register layer output. + * + * @param layer + * @return + */ + void registerLayerOutput(LayerInterface layer, String filespec); + + /** + * Return thread stats for the given layer. + * + * @param layer + * @return + */ + List getThreadStats(LayerInterface layer); + + /** + * Update the max core value for the given layer. + * + * @param layer + * @param coreUnits + */ + void setLayerMaxCores(LayerInterface layer, int coreUnits); + + /** + * Update the min core value for the given layer. + * + * @param layer + * @param coreUnits + */ + void setLayerMinCores(LayerInterface layer, int coreUnits); + + /** + * Update the max gpu value for the given layer. + * + * @param layer + * @param gpuUnits + */ + void setLayerMaxGpus(LayerInterface layer, int gpuUnits); + + /** + * Update the min gpu value for the given layer. + * + * @param layer + * @param gpuUnits + */ + void setLayerMinGpus(LayerInterface layer, int gpuUnits); + + /** + * Add a limit to the given layer. + * + * @param layer + * @param limitId + */ + void addLayerLimit(LayerInterface layer, String limitId); + + /** + * Remove a limit from the given layer. + * + * @param layer + * @param limitId + */ + void dropLayerLimit(LayerInterface layer, String limitId); + + /** + * Return a list of limits for the given layer. + * + * @param layer + */ + List getLayerLimits(LayerInterface layer); + + /** + * Update email(s) of subscribers for job + * + * @param job + * @param email + */ + void updateEmail(JobInterface job, String email); + + /** + * Return a list of limits for the given layer. + * + * @param job + */ + String getEmail(JobInterface job); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerService.java index 2c9c14425..becadc0ec 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerService.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.util.List; @@ -71,600 +67,589 @@ @Transactional public class JobManagerService implements JobManager { - private static final Logger logger = LogManager.getLogger(JobManagerService.class); - - private JobDao jobDao; - private ShowDao showDao; - private FrameDao frameDao; - private LayerDao layerDao; - private LimitDao limitDao; - private HostDao hostDao; - private DependManager dependManager; - private FilterManager filterManager; - private GroupDao groupDao; - private FacilityDao facilityDao; - private JobLogUtil jobLogUtil; - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isJobComplete(JobInterface job) { - return jobDao.isJobComplete(job); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isLayerComplete(LayerInterface layer) { - return layerDao.isLayerComplete(layer); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isLayerThreadable(LayerInterface layer) { - return layerDao.isThreadable(layer); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isJobPending(String name) { - return jobDao.exists(name); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void removeJob(JobInterface job) { - jobDao.deleteJob(job); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public JobDetail getJobDetail(String id) { - return jobDao.getJobDetail(id); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public JobInterface getJob(String id) { - return jobDao.getJob(id); - } - - @Transactional(propagation = Propagation.REQUIRED) - public JobDetail findJobDetail(String name) { - return jobDao.findJobDetail(name); - } - - @Transactional(propagation = Propagation.REQUIRED) - public JobInterface findJob(String name) { - return jobDao.findJob(name); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isOverMinCores(JobInterface job) { - return jobDao.isOverMinCores(job); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public DispatchJob getDispatchJob(String id) { - return jobDao.getDispatchJob(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public FrameInterface getFrame(String id) { - return frameDao.getFrame(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public FrameInterface findFrame(LayerInterface layer, int number) { - return frameDao.findFrame(layer, number); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public DispatchFrame getDispatchFrame(String id) { - return frameDao.getDispatchFrame(id); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public JobDetail findLastJob(String name) { - return jobDao.findLastJob(name); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void setJobPaused(JobInterface job, boolean paused) { - jobDao.updatePaused(job, paused); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void launchJobSpec(JobSpec spec) { - - for (BuildableJob job: spec.getJobs()) { - - JobDetail d = createJob(job); - if (job.maxCoresOverride != null) { - jobDao.updateMaxCores(d, - Convert.coresToWholeCoreUnits(job.maxCoresOverride.intValue())); - } - if (job.maxGpusOverride != null) { - jobDao.updateMaxGpus(d, job.maxGpusOverride.intValue()); - } - if (job.getPostJob() != null) { - BuildableJob postJob = job.getPostJob(); - postJob.env.put("CUE_PARENT_JOB_ID", d.id); - postJob.env.put("CUE_PARENT_JOB", d.name); - createJob(postJob); - jobDao.mapPostJob(job); - } - } + private static final Logger logger = LogManager.getLogger(JobManagerService.class); + + private JobDao jobDao; + private ShowDao showDao; + private FrameDao frameDao; + private LayerDao layerDao; + private LimitDao limitDao; + private HostDao hostDao; + private DependManager dependManager; + private FilterManager filterManager; + private GroupDao groupDao; + private FacilityDao facilityDao; + private JobLogUtil jobLogUtil; + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isJobComplete(JobInterface job) { + return jobDao.isJobComplete(job); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isLayerComplete(LayerInterface layer) { + return layerDao.isLayerComplete(layer); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isLayerThreadable(LayerInterface layer) { + return layerDao.isThreadable(layer); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isJobPending(String name) { + return jobDao.exists(name); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void removeJob(JobInterface job) { + jobDao.deleteJob(job); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public JobDetail getJobDetail(String id) { + return jobDao.getJobDetail(id); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public JobInterface getJob(String id) { + return jobDao.getJob(id); + } + + @Transactional(propagation = Propagation.REQUIRED) + public JobDetail findJobDetail(String name) { + return jobDao.findJobDetail(name); + } + + @Transactional(propagation = Propagation.REQUIRED) + public JobInterface findJob(String name) { + return jobDao.findJob(name); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isOverMinCores(JobInterface job) { + return jobDao.isOverMinCores(job); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public DispatchJob getDispatchJob(String id) { + return jobDao.getDispatchJob(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FrameInterface getFrame(String id) { + return frameDao.getFrame(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FrameInterface findFrame(LayerInterface layer, int number) { + return frameDao.findFrame(layer, number); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public DispatchFrame getDispatchFrame(String id) { + return frameDao.getDispatchFrame(id); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public JobDetail findLastJob(String name) { + return jobDao.findLastJob(name); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void setJobPaused(JobInterface job, boolean paused) { + jobDao.updatePaused(job, paused); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void launchJobSpec(JobSpec spec) { + + for (BuildableJob job : spec.getJobs()) { + + JobDetail d = createJob(job); + if (job.maxCoresOverride != null) { + jobDao.updateMaxCores(d, Convert.coresToWholeCoreUnits(job.maxCoresOverride.intValue())); + } + if (job.maxGpusOverride != null) { + jobDao.updateMaxGpus(d, job.maxGpusOverride.intValue()); + } + if (job.getPostJob() != null) { + BuildableJob postJob = job.getPostJob(); + postJob.env.put("CUE_PARENT_JOB_ID", d.id); + postJob.env.put("CUE_PARENT_JOB", d.name); + createJob(postJob); + jobDao.mapPostJob(job); + } + } + + for (BuildableDependency dep : spec.getDepends()) { + dep.setLaunchDepend(true); + dependManager.createDepend(dep); + } + + for (BuildableJob job : spec.getJobs()) { + jobDao.activateJob(job.detail, JobState.PENDING); + if (job.getPostJob() != null) { + jobDao.activateJob(job.getPostJob().detail, JobState.POSTED); + } + } + } + + @Transactional(propagation = Propagation.REQUIRED) + public JobDetail createJob(BuildableJob buildableJob) { + + logger.info("creating new job: " + buildableJob.detail.name); + long startTime = System.currentTimeMillis(); + + if (jobDao.exists(buildableJob.detail.name)) { + throw new JobLaunchException( + "error launching job, active job already exists: " + buildableJob.detail.name); + } + + if (buildableJob.getBuildableLayers().size() < 1) { + throw new JobLaunchException("error launching job, there were no layers defined!"); + } + + JobDetail job = buildableJob.detail; + + try { + /* + * Get the last job with the same name and try to use the memory settings for that job. Do + * this before inserting the new job we'll find this job as the last job. + */ + JobDetail lastJob = null; + try { + lastJob = findLastJob(job.name); + logger.info("Last job " + job.name + " was found as " + lastJob.name); + } catch (Exception e) { + logger.info("Last job " + job.name + " was NOT found"); + // don't have another version of the job in the DB. + } + + ShowEntity show = showDao.findShowDetail(job.showName); + if (!job.isPaused) { + job.isPaused = show.paused; + } + + job.showId = show.id; + job.logDir = job.name; + + /* + * The job gets inserted into the root group and unknown department. + */ + GroupDetail rootGroup = groupDao.getRootGroupDetail(job); + job.groupId = rootGroup.id; + job.deptId = rootGroup.deptId; + + resolveFacility(job); + + jobDao.insertJob(job, jobLogUtil); + jobDao.insertEnvironment(job, buildableJob.env); + + for (BuildableLayer buildableLayer : buildableJob.getBuildableLayers()) { + + LayerDetail layer = buildableLayer.layerDetail; + layer.jobId = job.id; + layer.showId = show.id; + + /** Not accurate anymore */ + List frames = CueUtil.normalizeFrameRange(layer.range, layer.chunkSize); + layer.totalFrameCount = frames.size(); - for (BuildableDependency dep: spec.getDepends()) { - dep.setLaunchDepend(true); - dependManager.createDepend(dep); + if (lastJob != null && !buildableLayer.isMemoryOverride) { + long pastMaxRSS = layerDao.findPastMaxRSS(lastJob, layer.name); + if (pastMaxRSS > 0) { + logger.info("found new maxRSS for layer: " + layer.name + " " + pastMaxRSS); + layer.minimumMemory = pastMaxRSS; + } } - for (BuildableJob job: spec.getJobs()) { - jobDao.activateJob(job.detail, JobState.PENDING); - if (job.getPostJob() != null) { - jobDao.activateJob(job.getPostJob().detail, JobState.POSTED); - } + if (layer.minimumCores > 0 && layer.minimumCores < Dispatcher.CORE_POINTS_RESERVED_MIN) { + layer.minimumCores = Dispatcher.CORE_POINTS_RESERVED_MIN; } - } - - @Transactional(propagation = Propagation.REQUIRED) - public JobDetail createJob(BuildableJob buildableJob) { - - logger.info("creating new job: " + buildableJob.detail.name); - long startTime = System.currentTimeMillis(); - - if (jobDao.exists(buildableJob.detail.name)) { - throw new JobLaunchException("error launching job, active job already exists: " + - buildableJob.detail.name); - } - - if (buildableJob.getBuildableLayers().size() < 1) { - throw new JobLaunchException("error launching job, there were no layers defined!"); - } - - JobDetail job = buildableJob.detail; - - try { - /* - * Get the last job with the same name and try to use - * the memory settings for that job. Do this before - * inserting the new job we'll find this job as the last job. - */ - JobDetail lastJob = null; - try { - lastJob = findLastJob(job.name); - logger.info("Last job " + job.name + " was found as " + lastJob.name); - } catch (Exception e) { - logger.info("Last job " + job.name + " was NOT found"); - // don't have another version of the job in the DB. - } - - ShowEntity show = showDao.findShowDetail(job.showName); - if (!job.isPaused) { job.isPaused = show.paused; } - - job.showId = show.id; - job.logDir = job.name; - - /* - * The job gets inserted into the root group and - * unknown department. - */ - GroupDetail rootGroup = groupDao.getRootGroupDetail(job); - job.groupId = rootGroup.id; - job.deptId = rootGroup.deptId; - - resolveFacility(job); - - jobDao.insertJob(job, jobLogUtil); - jobDao.insertEnvironment(job, buildableJob.env); - - for (BuildableLayer buildableLayer: buildableJob.getBuildableLayers()) { - - LayerDetail layer = buildableLayer.layerDetail; - layer.jobId = job.id; - layer.showId = show.id; - - /** Not accurate anymore */ - List frames = CueUtil.normalizeFrameRange(layer.range, - layer.chunkSize); - layer.totalFrameCount = frames.size(); - - if (lastJob != null && !buildableLayer.isMemoryOverride) { - long pastMaxRSS = layerDao.findPastMaxRSS(lastJob, layer.name); - if (pastMaxRSS > 0) { - logger.info("found new maxRSS for layer: " + layer.name + " " + pastMaxRSS); - layer.minimumMemory = pastMaxRSS; - } - } - - if (layer.minimumCores > 0 && layer.minimumCores < Dispatcher.CORE_POINTS_RESERVED_MIN) { - layer.minimumCores = Dispatcher.CORE_POINTS_RESERVED_MIN; - } - - logger.info("creating layer " + layer.name + " range: " + layer.range); - layerDao.insertLayerDetail(layer); - layerDao.insertLayerEnvironment(layer, buildableLayer.env); - layer.limits.stream() - .forEach(ln -> addLayerLimit(layer, limitDao.findLimit(ln).getLimitId())); - layer.outputs.stream().forEach(ln -> registerLayerOutput(layer, ln)); - frameDao.insertFrames(layer, frames); - } - - // The priority of a job is set on it's resource entry. - // To update it we set the priority after it's been inserted. - jobDao.updatePriority(job, job.priority); - - /* - * Finally, run any filters on the job which may set the job's - * priority. - */ - filterManager.runFiltersOnJob(job); - - CueUtil.logDuration(startTime, "created job " + job.getName() + " " + job.getId()); - return job; - - } catch (Exception e) { - logger.info("error launching job: " + job.name + "," + e); - throw new JobLaunchException("error launching job: " + job.name + "," + e, e); - } - } - - private void resolveFacility(JobDetail job) { - try { - if (job.facilityName == null) { - job.facilityId = - facilityDao.getDefaultFacility().getId(); - } - else { - job.facilityId = - facilityDao.getFacility(job.facilityName).getId(); - } - } catch (Exception e) { - throw new EntityRetrievalException("failed to find facility: " + job.facilityName, e); - } - } - - @Transactional(propagation = Propagation.REQUIRED) - public boolean shutdownJob(JobInterface job) { - // See JobManagerSupport - if (jobDao.updateJobFinished(job)) { - logger.info("shutting down job: " + job.getName()); - jobDao.activatePostJob(job); - logger.info("activating post jobs"); - return true; - } - return false; - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public List findFrames(FrameSearchInterface r) { - return frameDao.findFrames(r); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void updateFrameState(FrameInterface frame, FrameState state) { - frameDao.updateFrameState(frame, state); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public LayerDetail getLayerDetail(String id) { - return layerDao.getLayerDetail(id); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public LayerInterface getLayer(String id) { - return layerDao.getLayer(id); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void updateJobState(JobInterface job, JobState state) { - jobDao.updateState(job, state); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public FrameDetail getFrameDetail(String id) { - return frameDao.getFrameDetail(id); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void markFrameAsWaiting(FrameInterface frame) { - frameDao.markFrameAsWaiting(frame); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void markFrameAsDepend(FrameInterface frame) { - frameDao.markFrameAsDepend(frame); - } - - /** - * Creates a new job log directory. This is only called - * when launching a job. - * - * @param job - */ - @Transactional(propagation = Propagation.NEVER) - public void createJobLogDirectory(JobDetail job) { - if (!jobLogUtil.createJobLogDirectory(job.logDir)) { - throw new JobLaunchException("error launching job, unable to create log directory"); - } - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public List getLayers(JobInterface job) { - return layerDao.getLayers(job); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void increaseLayerMemoryRequirement(LayerInterface layer, long memKb) { - layerDao.increaseLayerMinMemory(layer, memKb); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void reorderLayer(LayerInterface layer, FrameSet frameSet, Order order) { - switch(order) { - case FIRST: - frameDao.reorderFramesFirst(layer, frameSet); - break; - case LAST: - frameDao.reorderFramesLast(layer, frameSet); - break; - case REVERSE: - frameDao.reorderLayerReverse(layer, frameSet); - break; - } - } - - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void staggerLayer(LayerInterface layer, String range, int stagger) { - frameDao.staggerLayer(layer, range, stagger); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public List getLayerDetails(JobInterface job) { - return layerDao.getLayerDetails(job); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public List getThreadStats(LayerInterface layer) { - return layerDao.getThreadStats(layer); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void optimizeLayer(LayerInterface layer, int cores, long maxRss, int runTime) { - layerDao.balanceLayerMinMemory(layer, maxRss); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void enableMemoryOptimizer(LayerInterface layer, boolean state) { - layerDao.enableMemoryOptimizer(layer, state); - } - - @Override - public void appendLayerTag(LayerInterface layer, String tag) { - layerDao.appendLayerTags(layer, tag); - } - - @Override - public void setLayerTag(LayerInterface layer, String tag) { - layerDao.updateLayerTags(layer, Sets.newHashSet(tag)); - } - @Override - public void setLayerMinCores(LayerInterface layer, int coreUnits) { - layerDao.updateLayerMinCores(layer, coreUnits); - } - - @Override - public void setLayerMaxCores(LayerInterface layer, int coreUnits) { - layerDao.updateLayerMaxCores(layer, coreUnits); - } - - @Override - public void setLayerMinGpus(LayerInterface layer, int gpu) { - layerDao.updateLayerMinGpus(layer, gpu); - } - - @Override - public void setLayerMaxGpus(LayerInterface layer, int gpu) { - layerDao.updateLayerMaxGpus(layer, gpu); - } - - @Override - public void addLayerLimit(LayerInterface layer, String limitId) { - layerDao.addLimit(layer, limitId); - } - - @Override - public void dropLayerLimit(LayerInterface layer, String limitId) { - layerDao.dropLimit(layer, limitId); - } - - @Override - public List getLayerLimits(LayerInterface layer) { - return layerDao.getLimits(layer); - } - - @Override - public void registerLayerOutput(LayerInterface layer, String filespec) { - try { - layerDao.insertLayerOutput(layer, filespec); - } catch (DataAccessException e) { - // Fail quietly but log it. - logger.warn("Failed to add layer output: " + filespec + "," + e); - } - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public List getLayerOutputs(LayerInterface layer) { - return layerDao.getLayerOutputs(layer); - } - - @Override - @Transactional(propagation = Propagation.SUPPORTS) - public void updateCheckpointState(FrameInterface frame, CheckpointState state) { - - if (frameDao.updateFrameCheckpointState(frame, state)) { - logger.info("Checkpoint state of frame " + frame.getId() + - " set to " + state.toString()); - } - else { - logger.warn("Failed to set checkpoint state of " + frame.getId() + - " to " + state.toString()); - } - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public FrameDetail findHighestMemoryFrame(JobInterface job) { - return frameDao.findHighestMemoryFrame(job); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public FrameDetail findLongestFrame(JobInterface job) { - return frameDao.findLongestFrame(job); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public FrameDetail findLowestMemoryFrame(JobInterface job) { - return frameDao.findLowestMemoryFrame(job); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public FrameDetail findShortestFrame(JobInterface job) { - return frameDao.findShortestFrame(job); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public ExecutionSummary getExecutionSummary(JobInterface job) { - return jobDao.getExecutionSummary(job); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public FrameStateTotals getFrameStateTotals(JobInterface job) { - return jobDao.getFrameStateTotals(job); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public ExecutionSummary getExecutionSummary(LayerInterface layer) { - return layerDao.getExecutionSummary(layer); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public FrameStateTotals getFrameStateTotals(LayerInterface layer) { - return layerDao.getFrameStateTotals(layer); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public List getStaleCheckpoints(int cutoffTimeSec) { - return frameDao.getStaleCheckpoints(cutoffTimeSec); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void updateEmail(JobInterface job, String email) { - jobDao.updateEmail(job, email); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public String getEmail(JobInterface job) { - return jobDao.getEmail(job); - } - - public DependManager getDependManager() { - return dependManager; - } - - public void setDependManager(DependManager dependManager) { - this.dependManager = dependManager; - } - - public FrameDao getFrameDao() { - return frameDao; - } - - public void setFrameDao(FrameDao frameDao) { - this.frameDao = frameDao; - } - - public LayerDao getLayerDao() { - return layerDao; - } - - public void setLayerDao(LayerDao layerDao) { - this.layerDao = layerDao; - } - - public LimitDao getLimitDao() { - return limitDao; - } - - public void setLimitDao(LimitDao limitDao) { - this.limitDao = limitDao; - } - - public ShowDao getShowDao() { - return showDao; - } - - public void setShowDao(ShowDao showDao) { - this.showDao = showDao; - } - - public JobDao getJobDao() { - return jobDao; - } - - public void setJobDao(JobDao workDao) { - this.jobDao = workDao; - } - - public FilterManager getFilterManager() { - return filterManager; - } - - public void setFilterManager(FilterManager filterManager) { - this.filterManager = filterManager; - } - - public GroupDao getGroupDao() { - return groupDao; - } - - public void setGroupDao(GroupDao groupDao) { - this.groupDao = groupDao; - } - - public FacilityDao getFacilityDao() { - return facilityDao; - } - - public void setFacilityDao(FacilityDao facilityDao) { - this.facilityDao = facilityDao; - } - - public HostDao getHostDao() { - return hostDao; - } - - public void setHostDao(HostDao hostDao) { - this.hostDao = hostDao; - } - - public JobLogUtil getJobLogUtil() { - return jobLogUtil; - } - - public void setJobLogUtil(JobLogUtil jobLogUtil) { - this.jobLogUtil = jobLogUtil; - } + logger.info("creating layer " + layer.name + " range: " + layer.range); + layerDao.insertLayerDetail(layer); + layerDao.insertLayerEnvironment(layer, buildableLayer.env); + layer.limits.stream() + .forEach(ln -> addLayerLimit(layer, limitDao.findLimit(ln).getLimitId())); + layer.outputs.stream().forEach(ln -> registerLayerOutput(layer, ln)); + frameDao.insertFrames(layer, frames); + } + + // The priority of a job is set on it's resource entry. + // To update it we set the priority after it's been inserted. + jobDao.updatePriority(job, job.priority); + + /* + * Finally, run any filters on the job which may set the job's priority. + */ + filterManager.runFiltersOnJob(job); + + CueUtil.logDuration(startTime, "created job " + job.getName() + " " + job.getId()); + return job; + + } catch (Exception e) { + logger.info("error launching job: " + job.name + "," + e); + throw new JobLaunchException("error launching job: " + job.name + "," + e, e); + } + } + + private void resolveFacility(JobDetail job) { + try { + if (job.facilityName == null) { + job.facilityId = facilityDao.getDefaultFacility().getId(); + } else { + job.facilityId = facilityDao.getFacility(job.facilityName).getId(); + } + } catch (Exception e) { + throw new EntityRetrievalException("failed to find facility: " + job.facilityName, e); + } + } + + @Transactional(propagation = Propagation.REQUIRED) + public boolean shutdownJob(JobInterface job) { + // See JobManagerSupport + if (jobDao.updateJobFinished(job)) { + logger.info("shutting down job: " + job.getName()); + jobDao.activatePostJob(job); + logger.info("activating post jobs"); + return true; + } + return false; + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List findFrames(FrameSearchInterface r) { + return frameDao.findFrames(r); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void updateFrameState(FrameInterface frame, FrameState state) { + frameDao.updateFrameState(frame, state); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public LayerDetail getLayerDetail(String id) { + return layerDao.getLayerDetail(id); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public LayerInterface getLayer(String id) { + return layerDao.getLayer(id); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void updateJobState(JobInterface job, JobState state) { + jobDao.updateState(job, state); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FrameDetail getFrameDetail(String id) { + return frameDao.getFrameDetail(id); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void markFrameAsWaiting(FrameInterface frame) { + frameDao.markFrameAsWaiting(frame); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void markFrameAsDepend(FrameInterface frame) { + frameDao.markFrameAsDepend(frame); + } + + /** + * Creates a new job log directory. This is only called when launching a job. + * + * @param job + */ + @Transactional(propagation = Propagation.NEVER) + public void createJobLogDirectory(JobDetail job) { + if (!jobLogUtil.createJobLogDirectory(job.logDir)) { + throw new JobLaunchException("error launching job, unable to create log directory"); + } + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getLayers(JobInterface job) { + return layerDao.getLayers(job); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void increaseLayerMemoryRequirement(LayerInterface layer, long memKb) { + layerDao.increaseLayerMinMemory(layer, memKb); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void reorderLayer(LayerInterface layer, FrameSet frameSet, Order order) { + switch (order) { + case FIRST: + frameDao.reorderFramesFirst(layer, frameSet); + break; + case LAST: + frameDao.reorderFramesLast(layer, frameSet); + break; + case REVERSE: + frameDao.reorderLayerReverse(layer, frameSet); + break; + } + } + + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void staggerLayer(LayerInterface layer, String range, int stagger) { + frameDao.staggerLayer(layer, range, stagger); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getLayerDetails(JobInterface job) { + return layerDao.getLayerDetails(job); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getThreadStats(LayerInterface layer) { + return layerDao.getThreadStats(layer); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void optimizeLayer(LayerInterface layer, int cores, long maxRss, int runTime) { + layerDao.balanceLayerMinMemory(layer, maxRss); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void enableMemoryOptimizer(LayerInterface layer, boolean state) { + layerDao.enableMemoryOptimizer(layer, state); + } + + @Override + public void appendLayerTag(LayerInterface layer, String tag) { + layerDao.appendLayerTags(layer, tag); + } + + @Override + public void setLayerTag(LayerInterface layer, String tag) { + layerDao.updateLayerTags(layer, Sets.newHashSet(tag)); + } + + @Override + public void setLayerMinCores(LayerInterface layer, int coreUnits) { + layerDao.updateLayerMinCores(layer, coreUnits); + } + + @Override + public void setLayerMaxCores(LayerInterface layer, int coreUnits) { + layerDao.updateLayerMaxCores(layer, coreUnits); + } + + @Override + public void setLayerMinGpus(LayerInterface layer, int gpu) { + layerDao.updateLayerMinGpus(layer, gpu); + } + + @Override + public void setLayerMaxGpus(LayerInterface layer, int gpu) { + layerDao.updateLayerMaxGpus(layer, gpu); + } + + @Override + public void addLayerLimit(LayerInterface layer, String limitId) { + layerDao.addLimit(layer, limitId); + } + + @Override + public void dropLayerLimit(LayerInterface layer, String limitId) { + layerDao.dropLimit(layer, limitId); + } + + @Override + public List getLayerLimits(LayerInterface layer) { + return layerDao.getLimits(layer); + } + + @Override + public void registerLayerOutput(LayerInterface layer, String filespec) { + try { + layerDao.insertLayerOutput(layer, filespec); + } catch (DataAccessException e) { + // Fail quietly but log it. + logger.warn("Failed to add layer output: " + filespec + "," + e); + } + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getLayerOutputs(LayerInterface layer) { + return layerDao.getLayerOutputs(layer); + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void updateCheckpointState(FrameInterface frame, CheckpointState state) { + + if (frameDao.updateFrameCheckpointState(frame, state)) { + logger.info("Checkpoint state of frame " + frame.getId() + " set to " + state.toString()); + } else { + logger.warn("Failed to set checkpoint state of " + frame.getId() + " to " + state.toString()); + } + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FrameDetail findHighestMemoryFrame(JobInterface job) { + return frameDao.findHighestMemoryFrame(job); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FrameDetail findLongestFrame(JobInterface job) { + return frameDao.findLongestFrame(job); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FrameDetail findLowestMemoryFrame(JobInterface job) { + return frameDao.findLowestMemoryFrame(job); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FrameDetail findShortestFrame(JobInterface job) { + return frameDao.findShortestFrame(job); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public ExecutionSummary getExecutionSummary(JobInterface job) { + return jobDao.getExecutionSummary(job); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FrameStateTotals getFrameStateTotals(JobInterface job) { + return jobDao.getFrameStateTotals(job); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public ExecutionSummary getExecutionSummary(LayerInterface layer) { + return layerDao.getExecutionSummary(layer); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FrameStateTotals getFrameStateTotals(LayerInterface layer) { + return layerDao.getFrameStateTotals(layer); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getStaleCheckpoints(int cutoffTimeSec) { + return frameDao.getStaleCheckpoints(cutoffTimeSec); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void updateEmail(JobInterface job, String email) { + jobDao.updateEmail(job, email); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public String getEmail(JobInterface job) { + return jobDao.getEmail(job); + } + + public DependManager getDependManager() { + return dependManager; + } + + public void setDependManager(DependManager dependManager) { + this.dependManager = dependManager; + } + + public FrameDao getFrameDao() { + return frameDao; + } + + public void setFrameDao(FrameDao frameDao) { + this.frameDao = frameDao; + } + + public LayerDao getLayerDao() { + return layerDao; + } + + public void setLayerDao(LayerDao layerDao) { + this.layerDao = layerDao; + } + + public LimitDao getLimitDao() { + return limitDao; + } + + public void setLimitDao(LimitDao limitDao) { + this.limitDao = limitDao; + } + + public ShowDao getShowDao() { + return showDao; + } + + public void setShowDao(ShowDao showDao) { + this.showDao = showDao; + } + + public JobDao getJobDao() { + return jobDao; + } + + public void setJobDao(JobDao workDao) { + this.jobDao = workDao; + } + + public FilterManager getFilterManager() { + return filterManager; + } + + public void setFilterManager(FilterManager filterManager) { + this.filterManager = filterManager; + } + + public GroupDao getGroupDao() { + return groupDao; + } + + public void setGroupDao(GroupDao groupDao) { + this.groupDao = groupDao; + } + + public FacilityDao getFacilityDao() { + return facilityDao; + } + + public void setFacilityDao(FacilityDao facilityDao) { + this.facilityDao = facilityDao; + } + + public HostDao getHostDao() { + return hostDao; + } + + public void setHostDao(HostDao hostDao) { + this.hostDao = hostDao; + } + + public JobLogUtil getJobLogUtil() { + return jobLogUtil; + } + + public void setJobLogUtil(JobLogUtil jobLogUtil) { + this.jobLogUtil = jobLogUtil; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerSupport.java b/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerSupport.java index e1d285b6d..678dc8de7 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerSupport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerSupport.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.util.Collection; @@ -58,572 +54,540 @@ * A non-transaction support class for managing jobs. */ public class JobManagerSupport { - private static final Logger logger = LogManager.getLogger(JobManagerSupport.class); - - private JobManager jobManager; - private DependManager dependManager; - private HostManager hostManager; - private RqdClient rqdClient; - private DepartmentManager departmentManager; - private DispatchSupport dispatchSupport; - private DispatchQueue manageQueue; - private RedirectManager redirectManager; - private EmailSupport emailSupport; - private FrameSearchFactory frameSearchFactory; - - public void queueShutdownJob(JobInterface job, Source source, boolean isManualKill) { - manageQueue.execute(new DispatchJobComplete(job, source, isManualKill, this)); - } - - public boolean shutdownJob(JobInterface job, Source source, boolean isManualKill) { - - if (isManualKill && source.getReason().isEmpty()) { - logger.info(job.getName() + "/" + job.getId() + - " **Invalid Job Kill Request** for " + source.toString()); - } - else { - if (jobManager.shutdownJob(job)) { - /* - * Satisfy any dependencies on just the - * job record, not layers or frames. - */ - satisfyWhatDependsOn(job); - - if (departmentManager.isManaged(job)) { - departmentManager.syncJobsWithTask(job); - } - - if (isManualKill) { - logger.info(job.getName() + "/" + job.getId() + - " is being manually killed by " + source.toString()); - - /** - * Sleep a bit here in case any frames were - * dispatched during the job shutdown process. - */ - try { - Thread.sleep(3000); - } catch (InterruptedException e1) { - logger.info(job.getName() + "/" + job.getId() + - " shutdown thread was interrupted."); - Thread.currentThread().interrupt(); - } - - // Report kill requests to sentry - Sentry.configureScope(scope -> { - scope.setExtra("Job Name", job.getName()); - scope.setExtra("Job ID", job.getId()); - scope.setExtra("Job Details", source.toString()); - scope.setExtra("Kill Reason", source.getReason()); - scope.setTag("job", job.getName()); - Sentry.captureMessage("Kill Request Successful"); - }); - - FrameSearchInterface search = frameSearchFactory.create(job); - FrameSearchCriteria newCriteria = search.getCriteria(); - FrameStateSeq states = newCriteria.getStates().toBuilder() - .addFrameStates(FrameState.RUNNING) - .build(); - search.setCriteria(newCriteria.toBuilder().setStates(states).build()); - - for (FrameInterface frame: jobManager.findFrames(search)) { - - VirtualProc proc = null; - try { - proc = hostManager.findVirtualProc(frame); - } - catch (DataAccessException e) { - logger.warn("Unable to find proc to kill frame " + frame + - " on job shutdown operation, " + e); - } - - if (manualStopFrame(frame, FrameState.WAITING)) { - try { - if (proc != null) { - kill(proc, source); - } - } catch (DataAccessException e) { - logger.warn("Failed to kill frame " + frame + - " on job shutdown operation, " + e); - } - catch (Exception e) { - logger.warn("error killing frame: " + frame); - } - } - } - } - - /* - * Send mail after all frames have been stopped or else the email - * will have inaccurate numbers. - */ - emailSupport.sendShutdownEmail(job); - - return true; - } - } - - return false; - } - - public void reorderJob(JobInterface job, FrameSet frameSet, Order order) { - List layers = jobManager.getLayers(job); - for (LayerInterface layer: layers) { - jobManager.reorderLayer(layer, frameSet, order); - } - } - - public void reorderLayer(LayerInterface layer, FrameSet frameSet, Order order) { - jobManager.reorderLayer(layer, frameSet, order); - } - - public void staggerJob(JobInterface job, String range, int stagger) { - List layers = jobManager.getLayers(job); - for (LayerInterface layer: layers) { - jobManager.staggerLayer(layer, range, stagger); - } - } - - public void staggerLayer(LayerInterface layer, String range, int stagger) { - jobManager.staggerLayer(layer, range, stagger); - } - - public void satisfyWhatDependsOn(FrameInterface frame) { - List depends = dependManager.getWhatDependsOn(frame); - logger.info("satisfying " + depends.size() + - " depends that are waiting on frame " + frame.getName()); - for (LightweightDependency depend: depends) { - dependManager.satisfyDepend(depend); - } - } - - public void satisfyWhatDependsOn(LayerInterface layer) { - List depends = dependManager.getWhatDependsOn(layer); - logger.info("satisfying " + depends.size() + - " depends that are waiting on layer " + layer.getName()); - for (LightweightDependency depend: dependManager.getWhatDependsOn(layer)) { - dependManager.satisfyDepend(depend); - } - } - - public void satisfyWhatDependsOn(JobInterface job) { - List depends = dependManager.getWhatDependsOn(job); - logger.info("satisfying " + depends.size() + - " depends that are waiting on job " + job.getName()); - for (LightweightDependency depend: dependManager.getWhatDependsOn(job)) { - dependManager.satisfyDepend(depend); - } - } - - public void satisfyWhatDependsOn(JobInterface job, DependTarget target) { - for (LightweightDependency depend: dependManager.getWhatDependsOn(job, target)) { - dependManager.satisfyDepend(depend); - } - } - - public void satisfyWhatDependsOn(FrameSearchInterface request) { - for (FrameInterface frame: jobManager.findFrames(request)) { - for (LightweightDependency depend: dependManager.getWhatDependsOn(frame)) { - dependManager.satisfyDepend(depend); - } - } - } - - public boolean isJobComplete(JobInterface job) { - return jobManager.isJobComplete(job); - } - - /* - * Destructive functions require a extra Source argument which contains - * information about the user making the call. This information is - * propagated down to the frame log file. - * - * There are three main destructive functions. - * kill, retry, and eat. - * - * Before a frame is retried or eaten, the new frame state must be - * set and committed to the DB before the call to RQD is made to - * actually kill the frame. This will tell the dispatcher what - * to do with the frame when RQD sends in the FrameCompleteReport. - * - * See RqdReportManagerService.determineFrameState - */ - - - /** - * Kill the specified frame. If RQD throws back an - * exception, the proc is considered lost and is - * manually removed. - * - * @param p - * @param source - */ - public void kill(VirtualProc p, Source source) { - try { - rqdClient.killFrame(p, source.toString()); - } - catch (java.lang.Throwable e) { - dispatchSupport.lostProc(p, "clearing due to failed kill," + - p.getName() + "," + e, Dispatcher.EXIT_STATUS_FAILED_KILL); - } - } - - /** - * Kill a list procs. If RQD throws back an - * exception, the proc is considered lost and is - * manually removed. - * - * @param procs - * @param source - */ - public void kill(Collection procs, Source source) { - for (VirtualProc p: procs) { - try { - rqdClient.killFrame(p, source.toString()); - } - catch (java.lang.Throwable e) { - dispatchSupport.lostProc(p, "clearing due to failed kill," + - p.getName() + "," + e, Dispatcher.EXIT_STATUS_FAILED_KILL); - } - } - } - - /** - * Kills a frame. This is a convenience method for when you have - * a reference to the Frame and - * - * @param frame - * @param source - */ - public void kill(FrameInterface frame, Source source) { - kill(hostManager.findVirtualProc(frame), source); - } - - /** - * Unbook and optionally kill all procs that match the specified - * search criteria. - * - * @param r - * @param killProc - * @param source - * @return - */ - public int unbookProcs(ProcSearchInterface r, boolean killProc, Source source) { - List procs = hostManager.findBookedVirtualProcs(r); - for (VirtualProc proc: procs) { - unbookProc(proc, killProc, source); - } - return procs.size(); - } - - /** - * Unbook and optionally kill all procs that match the specified - * search criteria. - * - * @param proc - * @param killProc - * @param source - * @return - */ - public void unbookProc(VirtualProc proc, boolean killProc, Source source) { - hostManager.unbookProc(proc); - if (killProc) { - kill(proc, source); - } - } - - /** - * Kill procs and optionally unbook them as well. - * - * @param host - * @param source - * @param unbook - */ - public void killProcs(HostInterface host, Source source, boolean unbook) { - - List procs = hostManager.findVirtualProcs(host); - - if (unbook) { - hostManager.unbookVirtualProcs(procs); - } - - for (VirtualProc proc: procs) { - kill(proc, source); - } - } - - /** - * Kill procs and optionally unbook them as well. - * - * @param r - * @param source - * @param unbook - */ - public void killProcs(FrameSearchInterface r, Source source, boolean unbook) { - - FrameSearchCriteria newCriteria = - r.getCriteria().toBuilder().setStates(FrameStateSeq.newBuilder().build()).build(); - r.setCriteria(newCriteria); - - List procs = hostManager.findVirtualProcs(r); - - if (unbook) { - hostManager.unbookVirtualProcs(procs); - } - - for (VirtualProc proc: procs) { - kill(proc, source); - } - } - - /** - * Kill procs and optionally unbook them as well. - * - * @param job - * @param source - * @param unbook - */ - public void killProcs(JobInterface job, Source source, boolean unbook) { - List procs = hostManager.findVirtualProcs(frameSearchFactory.create(job)); - if (unbook) { - hostManager.unbookVirtualProcs(procs); - } - - for (VirtualProc proc: procs) { - kill(proc, source); - } - } - - /** - * Retry frames that match the specified FrameSearch request. - * - * @param request - * @param source - */ - public void retryFrames(FrameSearchInterface request, Source source) { - for (FrameInterface frame: jobManager.findFrames(request)) { + private static final Logger logger = LogManager.getLogger(JobManagerSupport.class); + + private JobManager jobManager; + private DependManager dependManager; + private HostManager hostManager; + private RqdClient rqdClient; + private DepartmentManager departmentManager; + private DispatchSupport dispatchSupport; + private DispatchQueue manageQueue; + private RedirectManager redirectManager; + private EmailSupport emailSupport; + private FrameSearchFactory frameSearchFactory; + + public void queueShutdownJob(JobInterface job, Source source, boolean isManualKill) { + manageQueue.execute(new DispatchJobComplete(job, source, isManualKill, this)); + } + + public boolean shutdownJob(JobInterface job, Source source, boolean isManualKill) { + + if (isManualKill && source.getReason().isEmpty()) { + logger.info(job.getName() + "/" + job.getId() + " **Invalid Job Kill Request** for " + + source.toString()); + } else { + if (jobManager.shutdownJob(job)) { + /* + * Satisfy any dependencies on just the job record, not layers or frames. + */ + satisfyWhatDependsOn(job); + + if (departmentManager.isManaged(job)) { + departmentManager.syncJobsWithTask(job); + } + + if (isManualKill) { + logger.info(job.getName() + "/" + job.getId() + " is being manually killed by " + + source.toString()); + + /** + * Sleep a bit here in case any frames were dispatched during the job shutdown process. + */ + try { + Thread.sleep(3000); + } catch (InterruptedException e1) { + logger.info(job.getName() + "/" + job.getId() + " shutdown thread was interrupted."); + Thread.currentThread().interrupt(); + } + + // Report kill requests to sentry + Sentry.configureScope(scope -> { + scope.setExtra("Job Name", job.getName()); + scope.setExtra("Job ID", job.getId()); + scope.setExtra("Job Details", source.toString()); + scope.setExtra("Kill Reason", source.getReason()); + scope.setTag("job", job.getName()); + Sentry.captureMessage("Kill Request Successful"); + }); + + FrameSearchInterface search = frameSearchFactory.create(job); + FrameSearchCriteria newCriteria = search.getCriteria(); + FrameStateSeq states = + newCriteria.getStates().toBuilder().addFrameStates(FrameState.RUNNING).build(); + search.setCriteria(newCriteria.toBuilder().setStates(states).build()); + + for (FrameInterface frame : jobManager.findFrames(search)) { + + VirtualProc proc = null; try { - retryFrame(frame, source); - } catch (Exception e) { - CueExceptionUtil.logStackTrace("Failed to retry frame " + frame + - " from source " + source, e); + proc = hostManager.findVirtualProc(frame); + } catch (DataAccessException e) { + logger.warn("Unable to find proc to kill frame " + frame + + " on job shutdown operation, " + e); } - } - } - /** - * Retry a single frame. - * - * @param frame - * @param source - */ - public void retryFrame(FrameInterface frame, Source source) { - /** - * Have to find the proc before we stop the frame. - */ - VirtualProc proc = null; - try { - proc = hostManager.findVirtualProc(frame); - } catch (EmptyResultDataAccessException e) { - logger.info("failed to obtain information for " + - "proc running on frame: " + frame); - } - - if (manualStopFrame(frame, FrameState.WAITING)) { - if (proc != null) { - redirectManager.addRedirect(proc, (JobInterface) proc, false, source); - kill(proc, source); + if (manualStopFrame(frame, FrameState.WAITING)) { + try { + if (proc != null) { + kill(proc, source); + } + } catch (DataAccessException e) { + logger.warn("Failed to kill frame " + frame + " on job shutdown operation, " + e); + } catch (Exception e) { + logger.warn("error killing frame: " + frame); + } } - } - else { - jobManager.updateFrameState(frame, FrameState.WAITING); + } } - /** - * If a frame is retried that was part of a dependency, that - * dependency should become active again. + /* + * Send mail after all frames have been stopped or else the email will have inaccurate + * numbers. */ - - // Handle FrameOnFrame depends. - for (LightweightDependency depend: dependManager.getWhatDependsOn( - frame, false)) { - dependManager.unsatisfyDepend(depend); - } - - // Handle LayerOnLayer depends. - for (LightweightDependency depend: dependManager.getWhatDependsOn( - (LayerInterface) frame, false)) { - dependManager.unsatisfyDepend(depend); - } - - // set the job back to pending. - jobManager.updateJobState(jobManager.getJob(frame.getJobId()), JobState.PENDING); - - } - - /** - * Eat frames that match the specified FrameSearch. Eaten - * frames are considered "Succeeded" by the dispatcher. - * A Job with all eaten frames will leave the cue. - * - * @param request - * @param source - */ - public void eatFrames(FrameSearchInterface request, Source source) { - for (FrameInterface frame: jobManager.findFrames(request)) { - eatFrame(frame, source); - } - } - + emailSupport.sendShutdownEmail(job); + + return true; + } + } + + return false; + } + + public void reorderJob(JobInterface job, FrameSet frameSet, Order order) { + List layers = jobManager.getLayers(job); + for (LayerInterface layer : layers) { + jobManager.reorderLayer(layer, frameSet, order); + } + } + + public void reorderLayer(LayerInterface layer, FrameSet frameSet, Order order) { + jobManager.reorderLayer(layer, frameSet, order); + } + + public void staggerJob(JobInterface job, String range, int stagger) { + List layers = jobManager.getLayers(job); + for (LayerInterface layer : layers) { + jobManager.staggerLayer(layer, range, stagger); + } + } + + public void staggerLayer(LayerInterface layer, String range, int stagger) { + jobManager.staggerLayer(layer, range, stagger); + } + + public void satisfyWhatDependsOn(FrameInterface frame) { + List depends = dependManager.getWhatDependsOn(frame); + logger.info( + "satisfying " + depends.size() + " depends that are waiting on frame " + frame.getName()); + for (LightweightDependency depend : depends) { + dependManager.satisfyDepend(depend); + } + } + + public void satisfyWhatDependsOn(LayerInterface layer) { + List depends = dependManager.getWhatDependsOn(layer); + logger.info( + "satisfying " + depends.size() + " depends that are waiting on layer " + layer.getName()); + for (LightweightDependency depend : dependManager.getWhatDependsOn(layer)) { + dependManager.satisfyDepend(depend); + } + } + + public void satisfyWhatDependsOn(JobInterface job) { + List depends = dependManager.getWhatDependsOn(job); + logger + .info("satisfying " + depends.size() + " depends that are waiting on job " + job.getName()); + for (LightweightDependency depend : dependManager.getWhatDependsOn(job)) { + dependManager.satisfyDepend(depend); + } + } + + public void satisfyWhatDependsOn(JobInterface job, DependTarget target) { + for (LightweightDependency depend : dependManager.getWhatDependsOn(job, target)) { + dependManager.satisfyDepend(depend); + } + } + + public void satisfyWhatDependsOn(FrameSearchInterface request) { + for (FrameInterface frame : jobManager.findFrames(request)) { + for (LightweightDependency depend : dependManager.getWhatDependsOn(frame)) { + dependManager.satisfyDepend(depend); + } + } + } + + public boolean isJobComplete(JobInterface job) { + return jobManager.isJobComplete(job); + } + + /* + * Destructive functions require a extra Source argument which contains information about the user + * making the call. This information is propagated down to the frame log file. + * + * There are three main destructive functions. kill, retry, and eat. + * + * Before a frame is retried or eaten, the new frame state must be set and committed to the DB + * before the call to RQD is made to actually kill the frame. This will tell the dispatcher what + * to do with the frame when RQD sends in the FrameCompleteReport. + * + * See RqdReportManagerService.determineFrameState + */ + + /** + * Kill the specified frame. If RQD throws back an exception, the proc is considered lost and is + * manually removed. + * + * @param p + * @param source + */ + public void kill(VirtualProc p, Source source) { + try { + rqdClient.killFrame(p, source.toString()); + } catch (java.lang.Throwable e) { + dispatchSupport.lostProc(p, "clearing due to failed kill," + p.getName() + "," + e, + Dispatcher.EXIT_STATUS_FAILED_KILL); + } + } + + /** + * Kill a list procs. If RQD throws back an exception, the proc is considered lost and is manually + * removed. + * + * @param procs + * @param source + */ + public void kill(Collection procs, Source source) { + for (VirtualProc p : procs) { + try { + rqdClient.killFrame(p, source.toString()); + } catch (java.lang.Throwable e) { + dispatchSupport.lostProc(p, "clearing due to failed kill," + p.getName() + "," + e, + Dispatcher.EXIT_STATUS_FAILED_KILL); + } + } + } + + /** + * Kills a frame. This is a convenience method for when you have a reference to the Frame and + * + * @param frame + * @param source + */ + public void kill(FrameInterface frame, Source source) { + kill(hostManager.findVirtualProc(frame), source); + } + + /** + * Unbook and optionally kill all procs that match the specified search criteria. + * + * @param r + * @param killProc + * @param source + * @return + */ + public int unbookProcs(ProcSearchInterface r, boolean killProc, Source source) { + List procs = hostManager.findBookedVirtualProcs(r); + for (VirtualProc proc : procs) { + unbookProc(proc, killProc, source); + } + return procs.size(); + } + + /** + * Unbook and optionally kill all procs that match the specified search criteria. + * + * @param proc + * @param killProc + * @param source + * @return + */ + public void unbookProc(VirtualProc proc, boolean killProc, Source source) { + hostManager.unbookProc(proc); + if (killProc) { + kill(proc, source); + } + } + + /** + * Kill procs and optionally unbook them as well. + * + * @param host + * @param source + * @param unbook + */ + public void killProcs(HostInterface host, Source source, boolean unbook) { + + List procs = hostManager.findVirtualProcs(host); + + if (unbook) { + hostManager.unbookVirtualProcs(procs); + } + + for (VirtualProc proc : procs) { + kill(proc, source); + } + } + + /** + * Kill procs and optionally unbook them as well. + * + * @param r + * @param source + * @param unbook + */ + public void killProcs(FrameSearchInterface r, Source source, boolean unbook) { + + FrameSearchCriteria newCriteria = + r.getCriteria().toBuilder().setStates(FrameStateSeq.newBuilder().build()).build(); + r.setCriteria(newCriteria); + + List procs = hostManager.findVirtualProcs(r); + + if (unbook) { + hostManager.unbookVirtualProcs(procs); + } + + for (VirtualProc proc : procs) { + kill(proc, source); + } + } + + /** + * Kill procs and optionally unbook them as well. + * + * @param job + * @param source + * @param unbook + */ + public void killProcs(JobInterface job, Source source, boolean unbook) { + List procs = hostManager.findVirtualProcs(frameSearchFactory.create(job)); + if (unbook) { + hostManager.unbookVirtualProcs(procs); + } + + for (VirtualProc proc : procs) { + kill(proc, source); + } + } + + /** + * Retry frames that match the specified FrameSearch request. + * + * @param request + * @param source + */ + public void retryFrames(FrameSearchInterface request, Source source) { + for (FrameInterface frame : jobManager.findFrames(request)) { + try { + retryFrame(frame, source); + } catch (Exception e) { + CueExceptionUtil.logStackTrace("Failed to retry frame " + frame + " from source " + source, + e); + } + } + } + + /** + * Retry a single frame. + * + * @param frame + * @param source + */ + public void retryFrame(FrameInterface frame, Source source) { /** - * Eat the specified frame. Eaten frames are - * considered "Succeeded" by the dispatcher. A Job - * with all eaten frames will leave the cue. - * - * @param frame - * @param source + * Have to find the proc before we stop the frame. */ - public void eatFrame(FrameInterface frame, Source source) { - /** - * Have to find the proc before we stop the frame. - */ - VirtualProc proc = null; - try { - proc = hostManager.findVirtualProc(frame); - } catch (EmptyResultDataAccessException e) { - logger.info("failed to obtain information " + - "for proc running on frame: " + frame); - } - - if (manualStopFrame(frame, FrameState.EATEN)) { - if (proc != null) { - kill(proc, source); - } - } - else { - jobManager.updateFrameState(frame, FrameState.EATEN); - } - if (jobManager.isJobComplete(frame)) { - queueShutdownJob(frame, source, false); - } + VirtualProc proc = null; + try { + proc = hostManager.findVirtualProc(frame); + } catch (EmptyResultDataAccessException e) { + logger.info("failed to obtain information for " + "proc running on frame: " + frame); } - /** - * Marks the result of the specified frame search as - * FrameState.Waiting and decrease the depend count to 0 - * no matter how many active depends exists. - * - * @param request - * @param source - */ - public void markFramesAsWaiting(FrameSearchInterface request, Source source) { - for (FrameInterface frame: jobManager.findFrames(request)) { - jobManager.markFrameAsWaiting(frame); - } + if (manualStopFrame(frame, FrameState.WAITING)) { + if (proc != null) { + redirectManager.addRedirect(proc, (JobInterface) proc, false, source); + kill(proc, source); + } + } else { + jobManager.updateFrameState(frame, FrameState.WAITING); } /** - * Stops the specified frame. Return true if the call to - * this method actually stops the frame, ie the state changes - * from Running to the given state. Return false if the - * frame was already stopped. - * - * Stopping the frame also removes the link between the frame - * and the proc. The proc still exists, but, its assigned - * frame is null. - * - * @param frame - * @param state + * If a frame is retried that was part of a dependency, that dependency should become active + * again. */ - private boolean manualStopFrame(FrameInterface frame, FrameState state) { - if (dispatchSupport.stopFrame(frame, state, - state.ordinal() + 500)) { - dispatchSupport.updateUsageCounters(frame, - state.ordinal() + 500); - logger.info("Manually stopping frame: "+ frame); - return true; - } - return false; - } - - public DependManager getDependManager() { - return dependManager; - } - public void setDependManager(DependManager dependManager) { - this.dependManager = dependManager; + // Handle FrameOnFrame depends. + for (LightweightDependency depend : dependManager.getWhatDependsOn(frame, false)) { + dependManager.unsatisfyDepend(depend); } - public JobManager getJobManager() { - return jobManager; + // Handle LayerOnLayer depends. + for (LightweightDependency depend : dependManager.getWhatDependsOn((LayerInterface) frame, + false)) { + dependManager.unsatisfyDepend(depend); } - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } - - public DispatchQueue getManageQueue() { - return manageQueue; - } + // set the job back to pending. + jobManager.updateJobState(jobManager.getJob(frame.getJobId()), JobState.PENDING); - public void setManageQueue(DispatchQueue manageQueue) { - this.manageQueue = manageQueue; - } - - public HostManager getHostManager() { - return hostManager; - } + } - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; + /** + * Eat frames that match the specified FrameSearch. Eaten frames are considered "Succeeded" by the + * dispatcher. A Job with all eaten frames will leave the cue. + * + * @param request + * @param source + */ + public void eatFrames(FrameSearchInterface request, Source source) { + for (FrameInterface frame : jobManager.findFrames(request)) { + eatFrame(frame, source); } + } - public DispatchSupport getDispatchSupport() { - return dispatchSupport; - } - - public void setDispatchSupport(DispatchSupport dispatchSupport) { - this.dispatchSupport = dispatchSupport; - } - - public RqdClient getRqdClient() { - return rqdClient; - } - - public void setRqdClient(RqdClient rqdClient) { - this.rqdClient = rqdClient; - } - - public DepartmentManager getDepartmentManager() { - return departmentManager; - } - - public void setDepartmentManager(DepartmentManager departmentManager) { - this.departmentManager = departmentManager; - } - - public RedirectManager getRedirectManager() { - return redirectManager; - } - - public void setRedirectManager(RedirectManager redirectManager) { - this.redirectManager = redirectManager; - } - - public EmailSupport getEmailSupport() { - return emailSupport; - } - - public void setEmailSupport(EmailSupport emailSupport) { - this.emailSupport = emailSupport; - } - - public FrameSearchFactory getFrameSearchFactory() { - return frameSearchFactory; - } - - public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { - this.frameSearchFactory = frameSearchFactory; - } + /** + * Eat the specified frame. Eaten frames are considered "Succeeded" by the dispatcher. A Job with + * all eaten frames will leave the cue. + * + * @param frame + * @param source + */ + public void eatFrame(FrameInterface frame, Source source) { + /** + * Have to find the proc before we stop the frame. + */ + VirtualProc proc = null; + try { + proc = hostManager.findVirtualProc(frame); + } catch (EmptyResultDataAccessException e) { + logger.info("failed to obtain information " + "for proc running on frame: " + frame); + } + + if (manualStopFrame(frame, FrameState.EATEN)) { + if (proc != null) { + kill(proc, source); + } + } else { + jobManager.updateFrameState(frame, FrameState.EATEN); + } + if (jobManager.isJobComplete(frame)) { + queueShutdownJob(frame, source, false); + } + } + + /** + * Marks the result of the specified frame search as FrameState.Waiting and decrease the depend + * count to 0 no matter how many active depends exists. + * + * @param request + * @param source + */ + public void markFramesAsWaiting(FrameSearchInterface request, Source source) { + for (FrameInterface frame : jobManager.findFrames(request)) { + jobManager.markFrameAsWaiting(frame); + } + } + + /** + * Stops the specified frame. Return true if the call to this method actually stops the frame, ie + * the state changes from Running to the given state. Return false if the frame was already + * stopped. + * + * Stopping the frame also removes the link between the frame and the proc. The proc still exists, + * but, its assigned frame is null. + * + * @param frame + * @param state + */ + private boolean manualStopFrame(FrameInterface frame, FrameState state) { + if (dispatchSupport.stopFrame(frame, state, state.ordinal() + 500)) { + dispatchSupport.updateUsageCounters(frame, state.ordinal() + 500); + logger.info("Manually stopping frame: " + frame); + return true; + } + return false; + } + + public DependManager getDependManager() { + return dependManager; + } + + public void setDependManager(DependManager dependManager) { + this.dependManager = dependManager; + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public DispatchQueue getManageQueue() { + return manageQueue; + } + + public void setManageQueue(DispatchQueue manageQueue) { + this.manageQueue = manageQueue; + } + + public HostManager getHostManager() { + return hostManager; + } + + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; + } + + public DispatchSupport getDispatchSupport() { + return dispatchSupport; + } + + public void setDispatchSupport(DispatchSupport dispatchSupport) { + this.dispatchSupport = dispatchSupport; + } + + public RqdClient getRqdClient() { + return rqdClient; + } + + public void setRqdClient(RqdClient rqdClient) { + this.rqdClient = rqdClient; + } + + public DepartmentManager getDepartmentManager() { + return departmentManager; + } + + public void setDepartmentManager(DepartmentManager departmentManager) { + this.departmentManager = departmentManager; + } + + public RedirectManager getRedirectManager() { + return redirectManager; + } + + public void setRedirectManager(RedirectManager redirectManager) { + this.redirectManager = redirectManager; + } + + public EmailSupport getEmailSupport() { + return emailSupport; + } + + public void setEmailSupport(EmailSupport emailSupport) { + this.emailSupport = emailSupport; + } + + public FrameSearchFactory getFrameSearchFactory() { + return frameSearchFactory; + } + + public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { + this.frameSearchFactory = frameSearchFactory; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/JobSpec.java b/cuebot/src/main/java/com/imageworks/spcue/service/JobSpec.java index 3ec17e62d..e1236f7e7 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/JobSpec.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/JobSpec.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.io.File; @@ -60,996 +56,936 @@ import com.imageworks.spcue.util.CueUtil; public class JobSpec { - @Autowired - private Environment env; - - private static final Logger logger = LogManager.getLogger(JobSpec.class); + @Autowired + private Environment env; - private String facility; + private static final Logger logger = LogManager.getLogger(JobSpec.class); - private String show; + private String facility; - private String shot; + private String show; - private String user; + private String shot; - private String email; + private String user; - private Optional uid; + private String email; - private int totalFrames = 0; + private Optional uid; - private Document doc; + private int totalFrames = 0; - private ServiceManager serviceManager; - - /** - * Maximum number of cores a layer can get per frame. - */ - public static final int MAX_CORES = 800; + private Document doc; - /** - * The maximum number of layers a job can have. Increases this with care, - * its usually not worth it. The more layers you have the longer a job takes - * to dispatch which could lead to dispatches being dropped. - */ - public static final int MAX_LAYERS = 1000; + private ServiceManager serviceManager; - /** - * The maximum number of frames a job can have. Increase this with care. The - * more frames a job has, the longer it takes to dispatch, which could lead - * to dispatches being dropped. - */ - public static final int MAX_FRAMES = 100000; + /** + * Maximum number of cores a layer can get per frame. + */ + public static final int MAX_CORES = 800; - // The default number of retries per frame - public static final int FRAME_RETRIES_DEFAULT = 1; + /** + * The maximum number of layers a job can have. Increases this with care, its usually not worth + * it. The more layers you have the longer a job takes to dispatch which could lead to dispatches + * being dropped. + */ + public static final int MAX_LAYERS = 1000; - // The default maximum number of retries per frame. - public static final int FRAME_RETRIES_MAX = 1; + /** + * The maximum number of frames a job can have. Increase this with care. The more frames a job + * has, the longer it takes to dispatch, which could lead to dispatches being dropped. + */ + public static final int MAX_FRAMES = 100000; - // The default minimum number of retries per frame. - public static final int FRAME_RETRIES_MIN = 0; + // The default number of retries per frame + public static final int FRAME_RETRIES_DEFAULT = 1; - public static final String DEFAULT_SERVICE = "default"; + // The default maximum number of retries per frame. + public static final int FRAME_RETRIES_MAX = 1; - public static final String SPCUE_DTD_URL = "http://localhost:8080/spcue/dtd/"; + // The default minimum number of retries per frame. + public static final int FRAME_RETRIES_MIN = 0; - private List jobs = new ArrayList(); + public static final String DEFAULT_SERVICE = "default"; - private List depends = new ArrayList(); + public static final String SPCUE_DTD_URL = "http://localhost:8080/spcue/dtd/"; - public JobSpec() { - } + private List jobs = new ArrayList(); - public static final String NAME_REGEX = "^([\\w\\.-]{3,})$"; + private List depends = new ArrayList(); - public static final Pattern NAME_PATTERN = Pattern.compile(NAME_REGEX); + public JobSpec() {} - public String conformJobName(String name) { + public static final String NAME_REGEX = "^([\\w\\.-]{3,})$"; - if (name == null) { - throw new SpecBuilderException("Job names cannot be null"); - } + public static final Pattern NAME_PATTERN = Pattern.compile(NAME_REGEX); - String prefix = String.format("%s-%s-%s_", show, shot, user); - String suffix = name; + public String conformJobName(String name) { - /* - * Find the job's suffix - */ - if (suffix.startsWith(prefix)) { - int index = prefix.length() - 1; - suffix = suffix.substring(index); - } + if (name == null) { + throw new SpecBuilderException("Job names cannot be null"); + } - suffix = suffix.toLowerCase(); - suffix = suffix.replaceAll("[_]{2,}", "_"); + String prefix = String.format("%s-%s-%s_", show, shot, user); + String suffix = name; - suffix = suffix.replace("-", "_"); + /* + * Find the job's suffix + */ + if (suffix.startsWith(prefix)) { + int index = prefix.length() - 1; + suffix = suffix.substring(index); + } - Matcher matcher = NAME_PATTERN.matcher(suffix); - if (!matcher.matches()) { - throw new SpecBuilderException( - "The job name suffix: " - + suffix - + " must be composed of alpha numeric characters, periods, " - + "and underscores and be at least 3 characters long"); - } + suffix = suffix.toLowerCase(); + suffix = suffix.replaceAll("[_]{2,}", "_"); - suffix = suffix.replaceAll("^[_]{1,}", ""); - prefix = prefix.replaceAll("[_]{1,}$", ""); + suffix = suffix.replace("-", "_"); - return String.format("%s_%s", prefix, suffix).toLowerCase(); + Matcher matcher = NAME_PATTERN.matcher(suffix); + if (!matcher.matches()) { + throw new SpecBuilderException("The job name suffix: " + suffix + + " must be composed of alpha numeric characters, periods, " + + "and underscores and be at least 3 characters long"); } - public static String conformName(String type, String name) { + suffix = suffix.replaceAll("^[_]{1,}", ""); + prefix = prefix.replaceAll("[_]{1,}$", ""); - String lowerType = type.toLowerCase(); + return String.format("%s_%s", prefix, suffix).toLowerCase(); + } - if (name.length() < 3) { - throw new SpecBuilderException( - "The " + lowerType + " name must be at least 3 characters."); - } + public static String conformName(String type, String name) { - String newName = name; - newName = newName.replace("-", "_"); - newName = newName.toLowerCase(); + String lowerType = type.toLowerCase(); - Matcher matcher = NAME_PATTERN.matcher(newName); - if (!matcher.matches()) { - throw new SpecBuilderException("The " + lowerType + " name: " + newName - + " is not in the proper format. " + type + " names must be " - + "alpha numeric, no dashes or punctuation."); - } - - return newName; + if (name.length() < 3) { + throw new SpecBuilderException("The " + lowerType + " name must be at least 3 characters."); } - public static String conformShowName(String name) { - return conformName("Show", name); - } + String newName = name; + newName = newName.replace("-", "_"); + newName = newName.toLowerCase(); - public static String conformShotName(String name) { - return conformName("Shot", name); + Matcher matcher = NAME_PATTERN.matcher(newName); + if (!matcher.matches()) { + throw new SpecBuilderException( + "The " + lowerType + " name: " + newName + " is not in the proper format. " + type + + " names must be " + "alpha numeric, no dashes or punctuation."); } - public static String conformLayerName(String name) { - return conformName("Layer", name); - } + return newName; + } - public static final String FRAME_NAME_REGEX = "^([\\d]{4,6})-([\\w]+)$"; + public static String conformShowName(String name) { + return conformName("Show", name); + } - public static final Pattern FRAME_NAME_PATTERN = Pattern - .compile(FRAME_NAME_REGEX); + public static String conformShotName(String name) { + return conformName("Shot", name); + } - public String conformFrameName(String name) { - Matcher m = FRAME_NAME_PATTERN.matcher(name); - if (!m.matches()) { - throw new SpecBuilderException("The frame name: " + name - + " is not in the proper format."); - } - return String.format("%04d-%s", Integer.valueOf(m.group(1)), - conformLayerName(m.group(2))); - } - - /** - * Grabs the show/shot/user/uid for this spec. - */ - private void handleSpecTag() { - Element rootElement = doc.getRootElement(); - facility = rootElement.getChildTextTrim("facility"); - if (facility != null) { - facility = facility.toLowerCase(); - } + public static String conformLayerName(String name) { + return conformName("Layer", name); + } - show = rootElement.getChildTextTrim("show"); - shot = conformShotName(rootElement.getChildTextTrim("shot")); - user = rootElement.getChildTextTrim("user"); - uid = Optional.ofNullable(rootElement.getChildTextTrim("uid")).map(Integer::parseInt); - email = rootElement.getChildTextTrim("email"); + public static final String FRAME_NAME_REGEX = "^([\\d]{4,6})-([\\w]+)$"; - if (user.equals("root") || uid.equals(Optional.of(0))) { - throw new SpecBuilderException("Cannot launch jobs as root."); - } - } - - /** - * Loop over all tags - * - */ - private void handleJobsTag() { - List elements = doc.getRootElement().getChildren("job"); - if (elements == null) { - return; - } + public static final Pattern FRAME_NAME_PATTERN = Pattern.compile(FRAME_NAME_REGEX); - for (Object tmpElement : elements) { - Element jobElement = (Element) tmpElement; - jobs.add(handleJobTag(jobElement)); - } + public String conformFrameName(String name) { + Matcher m = FRAME_NAME_PATTERN.matcher(name); + if (!m.matches()) { + throw new SpecBuilderException("The frame name: " + name + " is not in the proper format."); } + return String.format("%04d-%s", Integer.valueOf(m.group(1)), conformLayerName(m.group(2))); + } - /** - * Loop over all tags - * - */ - private void handleDependsTags() { - Element delements = doc.getRootElement().getChild("depends"); - if (delements == null) { - return; - } - List elements = delements.getChildren("depend"); - if (elements == null) { - return; - } - for (Object tmpElement : elements) { - Element dependElement = (Element) tmpElement; - depends.add(handleDependTag(dependElement)); - } + /** + * Grabs the show/shot/user/uid for this spec. + */ + private void handleSpecTag() { + Element rootElement = doc.getRootElement(); + facility = rootElement.getChildTextTrim("facility"); + if (facility != null) { + facility = facility.toLowerCase(); } - /** - * - * @param jobTag - * @return - */ - private BuildableJob handleJobTag(Element jobTag) { - - /* - * Read in the job tag - */ - JobDetail job = new JobDetail(); - job.name = conformJobName(jobTag.getAttributeValue("name")); - job.state = JobState.STARTUP; - job.isPaused = Convert.stringToBool(jobTag.getChildTextTrim("paused")); - job.isAutoEat = Convert.stringToBool(jobTag.getChildTextTrim("autoeat")); - job.isLocal = false; - Element local = jobTag.getChild("localbook"); - if (local != null) { - job.isLocal = true; - job.localHostName = local.getAttributeValue("host"); - if (local.getAttributeValue("cores") != null) - job.localMaxCores = Integer.parseInt(local.getAttributeValue("cores")); - if (local.getAttributeValue("memory") != null) - job.localMaxMemory = Long.parseLong(local.getAttributeValue("memory")); - if (local.getAttributeValue("threads") != null) - job.localThreadNumber = Integer.parseInt(local.getAttributeValue("threads")); - if (local.getAttributeValue("gpus") != null) - job.localMaxGpus = Integer.parseInt(local.getAttributeValue("gpus")); - if (local.getAttributeValue("gpu") != null) { - logger.warn(job.name + " localbook has the deprecated gpu. Use gpu_memory."); - job.localMaxGpuMemory = Long.parseLong(local.getAttributeValue("gpu")); - } - if (local.getAttributeValue("gpu_memory") != null) - job.localMaxGpuMemory = Long.parseLong(local.getAttributeValue("gpu_memory")); - } + show = rootElement.getChildTextTrim("show"); + shot = conformShotName(rootElement.getChildTextTrim("shot")); + user = rootElement.getChildTextTrim("user"); + uid = Optional.ofNullable(rootElement.getChildTextTrim("uid")).map(Integer::parseInt); + email = rootElement.getChildTextTrim("email"); - job.maxCoreUnits = 20000; - job.minCoreUnits = 100; - job.startTime = CueUtil.getTime(); - job.maxRetries = FRAME_RETRIES_DEFAULT; - job.shot = shot; - job.user = user; - job.uid = uid; - job.email = email; - job.os = null; // default to no OS specified - job.showName = show; - job.facilityName = facility; - job.deptName = jobTag.getChildTextTrim("dept"); - - BuildableJob buildableJob = new BuildableJob(job); - - if (jobTag.getChildTextTrim("os") != null) { - job.os = jobTag.getChildTextTrim("os"); - } + if (user.equals("root") || uid.equals(Optional.of(0))) { + throw new SpecBuilderException("Cannot launch jobs as root."); + } + } - if (jobTag.getChildTextTrim("maxretries") != null) { - job.maxRetries = Integer.valueOf(jobTag - .getChildTextTrim("maxretries")); - if (job.maxRetries > FRAME_RETRIES_MAX) { - job.maxRetries = FRAME_RETRIES_MAX; - } else if (job.maxRetries < FRAME_RETRIES_MIN) { - job.maxRetries = FRAME_RETRIES_MIN; - } - } + /** + * Loop over all tags + * + */ + private void handleJobsTag() { + List elements = doc.getRootElement().getChildren("job"); + if (elements == null) { + return; + } - if (jobTag.getChildTextTrim("maxcores") != null) { - buildableJob.maxCoresOverride = Integer.valueOf(jobTag - .getChildTextTrim("maxcores")); - } - if (jobTag.getChildTextTrim("maxgpus") != null) { - buildableJob.maxGpusOverride = Integer.valueOf(jobTag - .getChildTextTrim("maxgpus")); - } + for (Object tmpElement : elements) { + Element jobElement = (Element) tmpElement; + jobs.add(handleJobTag(jobElement)); + } + } - if (jobTag.getChildTextTrim("priority") != null) { - job.priority = Integer.valueOf(jobTag.getChildTextTrim("priority")); - } + /** + * Loop over all tags + * + */ + private void handleDependsTags() { + Element delements = doc.getRootElement().getChild("depends"); + if (delements == null) { + return; + } + List elements = delements.getChildren("depend"); + if (elements == null) { + return; + } + for (Object tmpElement : elements) { + Element dependElement = (Element) tmpElement; + depends.add(handleDependTag(dependElement)); + } + } + /** + * + * @param jobTag + * @return + */ + private BuildableJob handleJobTag(Element jobTag) { - Element envTag = jobTag.getChild("env"); - if (envTag != null) { - handleEnvironmentTag(envTag, buildableJob.env); - } + /* + * Read in the job tag + */ + JobDetail job = new JobDetail(); + job.name = conformJobName(jobTag.getAttributeValue("name")); + job.state = JobState.STARTUP; + job.isPaused = Convert.stringToBool(jobTag.getChildTextTrim("paused")); + job.isAutoEat = Convert.stringToBool(jobTag.getChildTextTrim("autoeat")); + job.isLocal = false; + Element local = jobTag.getChild("localbook"); + if (local != null) { + job.isLocal = true; + job.localHostName = local.getAttributeValue("host"); + if (local.getAttributeValue("cores") != null) + job.localMaxCores = Integer.parseInt(local.getAttributeValue("cores")); + if (local.getAttributeValue("memory") != null) + job.localMaxMemory = Long.parseLong(local.getAttributeValue("memory")); + if (local.getAttributeValue("threads") != null) + job.localThreadNumber = Integer.parseInt(local.getAttributeValue("threads")); + if (local.getAttributeValue("gpus") != null) + job.localMaxGpus = Integer.parseInt(local.getAttributeValue("gpus")); + if (local.getAttributeValue("gpu") != null) { + logger.warn(job.name + " localbook has the deprecated gpu. Use gpu_memory."); + job.localMaxGpuMemory = Long.parseLong(local.getAttributeValue("gpu")); + } + if (local.getAttributeValue("gpu_memory") != null) + job.localMaxGpuMemory = Long.parseLong(local.getAttributeValue("gpu_memory")); + } + + job.maxCoreUnits = 20000; + job.minCoreUnits = 100; + job.startTime = CueUtil.getTime(); + job.maxRetries = FRAME_RETRIES_DEFAULT; + job.shot = shot; + job.user = user; + job.uid = uid; + job.email = email; + job.os = null; // default to no OS specified + job.showName = show; + job.facilityName = facility; + job.deptName = jobTag.getChildTextTrim("dept"); + + BuildableJob buildableJob = new BuildableJob(job); + + if (jobTag.getChildTextTrim("os") != null) { + job.os = jobTag.getChildTextTrim("os"); + } + + if (jobTag.getChildTextTrim("maxretries") != null) { + job.maxRetries = Integer.valueOf(jobTag.getChildTextTrim("maxretries")); + if (job.maxRetries > FRAME_RETRIES_MAX) { + job.maxRetries = FRAME_RETRIES_MAX; + } else if (job.maxRetries < FRAME_RETRIES_MIN) { + job.maxRetries = FRAME_RETRIES_MIN; + } + } + + if (jobTag.getChildTextTrim("maxcores") != null) { + buildableJob.maxCoresOverride = Integer.valueOf(jobTag.getChildTextTrim("maxcores")); + } + if (jobTag.getChildTextTrim("maxgpus") != null) { + buildableJob.maxGpusOverride = Integer.valueOf(jobTag.getChildTextTrim("maxgpus")); + } + + if (jobTag.getChildTextTrim("priority") != null) { + job.priority = Integer.valueOf(jobTag.getChildTextTrim("priority")); + } + + Element envTag = jobTag.getChild("env"); + if (envTag != null) { + handleEnvironmentTag(envTag, buildableJob.env); + } + + handleLayerTags(buildableJob, jobTag); + + if (buildableJob.getBuildableLayers().size() > MAX_LAYERS) { + throw new SpecBuilderException("The job " + job.name + " has over " + MAX_LAYERS + " layers"); + } + + if (buildableJob.getBuildableLayers().size() < 1) { + throw new SpecBuilderException("The job " + job.name + " has no layers"); + } + + return buildableJob; + } + + /** + * + * @param buildableJob + * @param jobTag + */ + private void handleLayerTags(BuildableJob buildableJob, Element jobTag) { + + Set layerNames = new HashSet(); + int dispatchOrder = 0; + + for (Object layerTmp : jobTag.getChild("layers").getChildren("layer")) { + + Element layerTag = (Element) layerTmp; + + /* + * Setup a LayerDetail and Buildable layer, add layer to job + */ + LayerDetail layer = new LayerDetail(); + BuildableLayer buildableLayer = new BuildableLayer(layer); + + /* + * Setup the layer type + */ + String layerType = layerTag.getAttributeValue("type"); + /* + * The Enum is capitalized so make sure that we capitalize the string we received from the + * user. + */ + layer.type = LayerType.valueOf(layerType.toUpperCase()); + if (layer.type == null) { + throw new SpecBuilderException("error, the layer " + layer.name + + " was defined with an invalid type: " + layerTag.getAttributeValue("type")); + } + + /* + * If the layer is a post layer, we add it to the post job. + */ + if (layer.type.equals(LayerType.POST)) { + if (buildableJob.getPostJob() == null) { + buildableJob.setPostJob(initPostJob(buildableJob)); + } + buildableJob.getPostJob().addBuildableLayer(buildableLayer); + } else { + buildableJob.addBuildableLayer(buildableLayer); + } + + /* + * Check to make sure the name is unique for this job. + */ + if (layerTag.getAttributeValue("name") == null) { + throw new SpecBuilderException("error, the layer name cannot be null"); + } + + layer.name = conformLayerName(layerTag.getAttributeValue("name")); + + if (layerNames.contains(layer.name)) { + throw new SpecBuilderException("error, the layer " + layer.name + + " was already defined in job " + buildableJob.detail.name); + } + layerNames.add(layer.name); + + /* + * Setup the simple layer properties. + */ + layer.command = layerTag.getChildTextTrim("cmd"); + layer.range = layerTag.getChildTextTrim("range"); + layer.dispatchOrder = ++dispatchOrder; + + /* + * Determine some of the more complex attributes. + */ + determineResourceDefaults(layerTag, buildableJob, layer); + determineChunkSize(layerTag, layer); + determineMinimumCores(layerTag, layer); + determineMinimumGpus(layerTag, layer); + determineThreadable(layerTag, layer); + determineTags(buildableJob, layer, layerTag); + determineMinimumMemory(buildableJob, layerTag, layer, buildableLayer); + determineMinimumGpuMemory(buildableJob, layerTag, layer); + determineOutputs(layerTag, buildableJob, layer); + + // set a timeout value on the layer + if (layerTag.getChildTextTrim("timeout") != null) { + layer.timeout = Integer.parseInt(layerTag.getChildTextTrim("timeout")); + } + + if (layerTag.getChildTextTrim("timeout_llu") != null) { + layer.timeout_llu = Integer.parseInt(layerTag.getChildTextTrim("timeout_llu")); + } + + /* + * Handle the layer environment + */ + Element envTag = layerTag.getChild("env"); + if (envTag != null) { + handleEnvironmentTag(envTag, buildableLayer.env); + } + + totalFrames = totalFrames + getFrameRangeSize(layer.range, layer.chunkSize); + + if (buildableJob.getBuildableLayers().size() > MAX_LAYERS) { + throw new SpecBuilderException( + "error, your job has " + buildableJob.getBuildableLayers().size() + " layers, " + + " the maximum number of allowed layers is " + MAX_LAYERS); + } + + if (totalFrames > MAX_FRAMES) { + throw new SpecBuilderException("error, your job has " + totalFrames + + " frames, the maximum number of allowed " + "frames is " + MAX_FRAMES); + } + } + } + + /** + * Convert string given for memory, with m for megabytes or g for gigabytes to kilobytes. + * + * @param input + */ + private long convertMemoryInput(String input) { + if (input.contains("m")) { + double megs = Double.valueOf(input.substring(0, input.lastIndexOf("m"))); + return (long) (megs * 1024); + } else if (input.contains("g")) { + return Long.valueOf(input.substring(0, input.lastIndexOf("g"))) * CueUtil.GB; + } else { + return Long.valueOf(input) * CueUtil.GB; + } + } + + private void determineMinimumMemory(BuildableJob buildableJob, Element layerTag, + LayerDetail layer, BuildableLayer buildableLayer) { + + if (layerTag.getChildTextTrim("memory") == null) { + return; + } + + long minMemory; + String memory = layerTag.getChildTextTrim("memory").toLowerCase(); + + minMemory = convertMemoryInput(memory); + long memReservedMin = env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); + long memReservedMax = env.getRequiredProperty("dispatcher.memory.mem_reserved_max", Long.class); + + // Some quick sanity checks to make sure memory hasn't gone + // over or under reasonable defaults. + if (minMemory > memReservedMax) { + logger.warn("Setting memory for " + buildableJob.detail.name + "/" + layer.name + " to: " + + memReservedMax); + layer.minimumMemory = memReservedMax; + } else if (minMemory < memReservedMin) { + logger.warn(buildableJob.detail.name + "/" + layer.name + + "Specified too little memory, defaulting to: " + memReservedMin); + minMemory = memReservedMin; + } + + buildableLayer.isMemoryOverride = true; + layer.minimumMemory = minMemory; + + } + + /** + * If the gpu_memory option is set, set minimumGpuMemory to that supplied value + * + * @param layerTag + * @param layer + */ + private void determineMinimumGpuMemory(BuildableJob buildableJob, Element layerTag, + LayerDetail layer) { + + String gpu = layerTag.getChildTextTrim("gpu"); + String gpuMemory = layerTag.getChildTextTrim("gpu_memory"); + if (gpu == null && gpuMemory == null) { + return; + } + + String memory = null; + if (gpu != null) { + logger.warn( + buildableJob.detail.name + "/" + layer.name + " has the deprecated gpu. Use gpu_memory."); + memory = gpu.toLowerCase(); + } + if (gpuMemory != null) + memory = gpuMemory.toLowerCase(); + + long minGpuMemory; + try { + minGpuMemory = convertMemoryInput(memory); + long memGpuReservedMin = + env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_min", Long.class); + long memGpuReservedMax = + env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_max", Long.class); + + // Some quick sanity checks to make sure gpu memory hasn't gone + // over or under reasonable defaults. + if (minGpuMemory > memGpuReservedMax) { + throw new SpecBuilderException( + "Gpu memory requirements exceed " + "maximum. Are you specifying the correct units?"); + } else if (minGpuMemory < memGpuReservedMin) { + logger.warn(buildableJob.detail.name + "/" + layer.name + + "Specified too little gpu memory, defaulting to: " + memGpuReservedMin); + minGpuMemory = memGpuReservedMin; + } + + layer.minimumGpuMemory = minGpuMemory; + + } catch (Exception e) { + logger.info("Error setting gpu memory for " + buildableJob.detail.name + "/" + layer.name + + " failed, reason: " + e + ". Using default."); + layer.minimumGpuMemory = + env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_min", Long.class); + } + } + + /** + * Cores may be specified as a decimal or core points. + * + * If no core value is specified, we default to the value of + * Dispatcher.CORE_POINTS_RESERVED_DEFAULT + * + * If the value is specified but is less than the minimum allowed, then the value is reset to the + * default. + * + * If the value is specified but is greater than the max allowed, then the value is reset to the + * default. + * + */ + private void determineMinimumCores(Element layerTag, LayerDetail layer) { + + String cores = layerTag.getChildTextTrim("cores"); + if (cores == null) { + return; + } + + int corePoints = layer.minimumCores; + + if (cores.contains(".")) { + if (cores.contains("-")) { + corePoints = (int) (Double.valueOf(cores) * 100 - .5); + } else { + corePoints = (int) (Double.valueOf(cores) * 100 + .5); + } + } else { + corePoints = Integer.valueOf(cores); + } + + if (corePoints > 0 && corePoints < Dispatcher.CORE_POINTS_RESERVED_MIN) { + corePoints = Dispatcher.CORE_POINTS_RESERVED_DEFAULT; + } else if (corePoints > Dispatcher.CORE_POINTS_RESERVED_MAX) { + corePoints = Dispatcher.CORE_POINTS_RESERVED_MAX; + } + + layer.minimumCores = corePoints; + } + + /** + * Gpu is a int. + * + * If no gpu value is specified, we default to the value of Dispatcher.GPU_RESERVED_DEFAULT + */ + private void determineMinimumGpus(Element layerTag, LayerDetail layer) { + + String gpus = layerTag.getChildTextTrim("gpus"); + if (gpus != null) { + layer.minimumGpus = Integer.valueOf(gpus); + } + } + + private void determineChunkSize(Element layerTag, LayerDetail layer) { + layer.chunkSize = Integer.parseInt(layerTag.getChildTextTrim("chunk")); + } + + /** + * Determine if the layer is threadable. A manually set threadable option in the job spec should + * override the service defaults. + * + * @param layerTag + * @param layer + */ + private void determineThreadable(Element layerTag, LayerDetail layer) { + // Must have at least 1 core to thread. + if (layer.minimumCores > 0 && layer.minimumCores < 100) { + layer.isThreadable = false; + } else if (layerTag.getChildTextTrim("threadable") != null) { + layer.isThreadable = Convert.stringToBool(layerTag.getChildTextTrim("threadable")); + } + } + + private void determineResourceDefaults(Element layerTag, BuildableJob job, LayerDetail layer) { + + Element t_services = layerTag.getChild("services"); + List services = new ArrayList(); + + /* + * Build a list of services from the XML. Filter out duplicates and empty services. + */ + if (t_services != null) { - handleLayerTags(buildableJob, jobTag); + for (Object tmp : t_services.getChildren()) { + Element t_service = (Element) tmp; + String service_name = t_service.getTextTrim(); - if (buildableJob.getBuildableLayers().size() > MAX_LAYERS) { - throw new SpecBuilderException("The job " + job.name + " has over " - + MAX_LAYERS + " layers"); + if (service_name.length() == 0) { + continue; } - if (buildableJob.getBuildableLayers().size() < 1) { - throw new SpecBuilderException("The job " + job.name - + " has no layers"); + if (services.contains(service_name)) { + continue; } - - return buildableJob; + services.add(service_name); + } } - /** - * - * @param buildableJob - * @param jobTag + /* + * Start from the beginning and check each service. The first one that has a service record will + * be the one to use. */ - private void handleLayerTags(BuildableJob buildableJob, Element jobTag) { - - Set layerNames = new HashSet(); - int dispatchOrder = 0; - - for (Object layerTmp : jobTag.getChild("layers").getChildren("layer")) { - - Element layerTag = (Element) layerTmp; - - /* - * Setup a LayerDetail and Buildable layer, add layer to job - */ - LayerDetail layer = new LayerDetail(); - BuildableLayer buildableLayer = new BuildableLayer(layer); - - /* - * Setup the layer type - */ - String layerType = layerTag.getAttributeValue("type"); - /* - * The Enum is capitalized so make sure that we capitalize the - * string we received from the user. - */ - layer.type = LayerType.valueOf(layerType.toUpperCase()); - if (layer.type == null) { - throw new SpecBuilderException("error, the layer " + layer.name - + " was defined with an invalid type: " - + layerTag.getAttributeValue("type")); - } - - /* - * If the layer is a post layer, we add it to the post job. - */ - if (layer.type.equals(LayerType.POST)) { - if (buildableJob.getPostJob() == null) { - buildableJob.setPostJob(initPostJob(buildableJob)); - } - buildableJob.getPostJob().addBuildableLayer(buildableLayer); - } else { - buildableJob.addBuildableLayer(buildableLayer); - } - - /* - * Check to make sure the name is unique for this job. - */ - if (layerTag.getAttributeValue("name") == null) { - throw new SpecBuilderException( - "error, the layer name cannot be null"); - } - - layer.name = conformLayerName(layerTag.getAttributeValue("name")); - - if (layerNames.contains(layer.name)) { - throw new SpecBuilderException("error, the layer " + layer.name - + " was already defined in job " - + buildableJob.detail.name); - } - layerNames.add(layer.name); - - /* - * Setup the simple layer properties. - */ - layer.command = layerTag.getChildTextTrim("cmd"); - layer.range = layerTag.getChildTextTrim("range"); - layer.dispatchOrder = ++dispatchOrder; - - /* - * Determine some of the more complex attributes. - */ - determineResourceDefaults(layerTag, buildableJob, layer); - determineChunkSize(layerTag, layer); - determineMinimumCores(layerTag, layer); - determineMinimumGpus(layerTag, layer); - determineThreadable(layerTag, layer); - determineTags(buildableJob, layer, layerTag); - determineMinimumMemory(buildableJob, layerTag, layer, - buildableLayer); - determineMinimumGpuMemory(buildableJob, layerTag, layer); - determineOutputs(layerTag, buildableJob, layer); - - // set a timeout value on the layer - if (layerTag.getChildTextTrim("timeout") != null) { - layer.timeout = Integer.parseInt(layerTag.getChildTextTrim("timeout")); - } - - if (layerTag.getChildTextTrim("timeout_llu") != null) { - layer.timeout_llu = Integer.parseInt(layerTag.getChildTextTrim("timeout_llu")); - } - - /* - * Handle the layer environment - */ - Element envTag = layerTag.getChild("env"); - if (envTag != null) { - handleEnvironmentTag(envTag, buildableLayer.env); - } - - totalFrames = totalFrames - + getFrameRangeSize(layer.range, layer.chunkSize); - - if (buildableJob.getBuildableLayers().size() > MAX_LAYERS) { - throw new SpecBuilderException("error, your job has " - + buildableJob.getBuildableLayers().size() - + " layers, " - + " the maximum number of allowed layers is " - + MAX_LAYERS); - } - - if (totalFrames > MAX_FRAMES) { - throw new SpecBuilderException("error, your job has " - + totalFrames - + " frames, the maximum number of allowed " - + "frames is " + MAX_FRAMES); - } - } - } - - /** - * Convert string given for memory, with m for megabytes or g for gigabytes - * to kilobytes. - * - * @param input + ServiceEntity primaryService = null; + for (String service_name : services) { + try { + primaryService = serviceManager.getService(service_name, job.detail.showName); + // Once a service is found, break; + break; + } catch (EmptyResultDataAccessException e) { + logger.warn("warning, service not found for layer " + layer.getName() + " " + service_name); + } + } + + /* + * If no primary service was found, use the default service. */ - private long convertMemoryInput(String input) { - if (input.contains("m")) { - double megs = Double.valueOf(input.substring(0, input.lastIndexOf("m"))); - return (long) (megs * 1024); - } else if (input.contains("g")) { - return Long.valueOf(input.substring(0, input.lastIndexOf("g"))) * CueUtil.GB; - } else { - return Long.valueOf(input) * CueUtil.GB; - } + if (primaryService == null) { + primaryService = serviceManager.getService(DEFAULT_SERVICE); + services.add(primaryService.name); } - private void determineMinimumMemory(BuildableJob buildableJob, - Element layerTag, LayerDetail layer, BuildableLayer buildableLayer) { + Element t_limits = layerTag.getChild("limits"); + List limits = new ArrayList(); - if (layerTag.getChildTextTrim("memory") == null) { - return; - } + if (t_limits != null) { + for (Object tmp : t_limits.getChildren()) { + Element t_limit = (Element) tmp; + String limitName = t_limit.getTextTrim(); - long minMemory; - String memory = layerTag.getChildTextTrim("memory").toLowerCase(); - - minMemory = convertMemoryInput(memory); - long memReservedMin = env.getRequiredProperty( - "dispatcher.memory.mem_reserved_min", - Long.class); - long memReservedMax = env.getRequiredProperty( - "dispatcher.memory.mem_reserved_max", - Long.class); - - // Some quick sanity checks to make sure memory hasn't gone - // over or under reasonable defaults. - if (minMemory > memReservedMax) { - logger.warn("Setting memory for " + buildableJob.detail.name + - "/" + layer.name + " to: "+ memReservedMax); - layer.minimumMemory = memReservedMax; + if (limitName.length() == 0) { + continue; } - else if (minMemory < memReservedMin) { - logger.warn(buildableJob.detail.name + "/" + layer.name + - "Specified too little memory, defaulting to: " + - memReservedMin); - minMemory = memReservedMin; - } - - buildableLayer.isMemoryOverride = true; - layer.minimumMemory = minMemory; - - } - - /** - * If the gpu_memory option is set, set minimumGpuMemory to that supplied value - * - * @param layerTag - * @param layer - */ - private void determineMinimumGpuMemory(BuildableJob buildableJob, Element layerTag, - LayerDetail layer) { - - String gpu = layerTag.getChildTextTrim("gpu"); - String gpuMemory = layerTag.getChildTextTrim("gpu_memory"); - if (gpu == null && gpuMemory == null) { - return; - } - - String memory = null; - if (gpu != null) { - logger.warn(buildableJob.detail.name + "/" + layer.name + - " has the deprecated gpu. Use gpu_memory."); - memory = gpu.toLowerCase(); - } - if (gpuMemory != null) - memory = gpuMemory.toLowerCase(); - - long minGpuMemory; - try { - minGpuMemory = convertMemoryInput(memory); - long memGpuReservedMin = env.getRequiredProperty( - "dispatcher.memory.mem_gpu_reserved_min", - Long.class); - long memGpuReservedMax = env.getRequiredProperty( - "dispatcher.memory.mem_gpu_reserved_max", - Long.class); - - // Some quick sanity checks to make sure gpu memory hasn't gone - // over or under reasonable defaults. - if (minGpuMemory > memGpuReservedMax) { - throw new SpecBuilderException("Gpu memory requirements exceed " + - "maximum. Are you specifying the correct units?"); - } - else if (minGpuMemory < memGpuReservedMin) { - logger.warn(buildableJob.detail.name + "/" + layer.name + - "Specified too little gpu memory, defaulting to: " + - memGpuReservedMin); - minGpuMemory = memGpuReservedMin; - } - - layer.minimumGpuMemory = minGpuMemory; - } catch (Exception e) { - logger.info("Error setting gpu memory for " + - buildableJob.detail.name + "/" + layer.name + - " failed, reason: " + e + ". Using default."); - layer.minimumGpuMemory = env.getRequiredProperty( - "dispatcher.memory.mem_gpu_reserved_min", - Long.class); + if (limits.contains(limitName)) { + continue; } + limits.add(limitName); + } } - /** - * Cores may be specified as a decimal or core points. - * - * If no core value is specified, we default to the value of - * Dispatcher.CORE_POINTS_RESERVED_DEFAULT - * - * If the value is specified but is less than the minimum allowed, then the - * value is reset to the default. - * - * If the value is specified but is greater than the max allowed, then the - * value is reset to the default. - * - */ - private void determineMinimumCores(Element layerTag, LayerDetail layer) { + logger.info("primary service: " + primaryService.getName() + " " + layer.getName()); - String cores = layerTag.getChildTextTrim("cores"); - if (cores == null) { - return; - } - - int corePoints = layer.minimumCores; + /* + * Now apply the primaryService values to the layer. + */ + layer.isThreadable = primaryService.threadable; + layer.maximumCores = primaryService.maxCores; + layer.minimumCores = primaryService.minCores; + layer.minimumMemory = primaryService.minMemory; + layer.maximumGpus = primaryService.maxGpus; + layer.minimumGpus = primaryService.minGpus; + layer.minimumGpuMemory = primaryService.minGpuMemory; + layer.tags.addAll(primaryService.tags); + layer.services.addAll(services); + layer.limits.addAll(limits); + layer.timeout = primaryService.timeout; + layer.timeout_llu = primaryService.timeout_llu; + } + + private void determineOutputs(Element layerTag, BuildableJob job, LayerDetail layer) { + + Element t_outputs = layerTag.getChild("outputs"); + List outputs = new ArrayList(); + /* + * Build a list of outputs from the XML. Filter out duplicates and empty outputs. + */ + if (t_outputs != null) { + for (Object tmp : t_outputs.getChildren()) { + Element t_output = (Element) tmp; + String output_path = t_output.getTextTrim(); - if (cores.contains(".")) { - if (cores.contains("-")) { - corePoints = (int) (Double.valueOf(cores) * 100 - .5); - } else { - corePoints = (int) (Double.valueOf(cores) * 100 + .5); - } - } else { - corePoints = Integer.valueOf(cores); + if (output_path.length() == 0) { + continue; } - if (corePoints > 0 && corePoints < Dispatcher.CORE_POINTS_RESERVED_MIN) { - corePoints = Dispatcher.CORE_POINTS_RESERVED_DEFAULT; + if (outputs.contains(output_path)) { + continue; } - else if (corePoints > Dispatcher.CORE_POINTS_RESERVED_MAX) { - corePoints = Dispatcher.CORE_POINTS_RESERVED_MAX; - } - - layer.minimumCores = corePoints; + outputs.add(output_path); + } } + layer.outputs.addAll(outputs); + } - /** - * Gpu is a int. - * - * If no gpu value is specified, we default to the value of - * Dispatcher.GPU_RESERVED_DEFAULT - */ - private void determineMinimumGpus(Element layerTag, LayerDetail layer) { + /** + * Converts the job space tagging format into a set of strings. Also verifies each tag. + * + * @param job + * @param layer + * @return + */ + private void determineTags(BuildableJob job, LayerDetail layer, Element layerTag) { + Set newTags = new LinkedHashSet(); + String tags = layerTag.getChildTextTrim("tags"); - String gpus = layerTag.getChildTextTrim("gpus"); - if (gpus != null) { - layer.minimumGpus = Integer.valueOf(gpus); - } + if (tags == null) { + return; } - private void determineChunkSize(Element layerTag, LayerDetail layer) { - layer.chunkSize = Integer.parseInt(layerTag.getChildTextTrim("chunk")); + if (tags.length() == 0) { + return; } - /** - * Determine if the layer is threadable. A manually set threadable - * option in the job spec should override the service defaults. - * - * @param layerTag - * @param layer - */ - private void determineThreadable(Element layerTag, LayerDetail layer) { - // Must have at least 1 core to thread. - if (layer.minimumCores > 0 && layer.minimumCores < 100) { - layer.isThreadable = false; - } - else if (layerTag.getChildTextTrim("threadable") != null) { - layer.isThreadable = Convert.stringToBool( - layerTag.getChildTextTrim("threadable")); - } + String[] e = tags.replaceAll(" ", "").split("\\|"); + for (String s : e) { + if (e.length == 0) { + continue; + } + Matcher matcher = NAME_PATTERN.matcher(s); + if (!matcher.matches()) { + throw new SpecBuilderException("error, invalid tag " + s + + ", tags must be alpha numberic and at least " + "3 characters in length."); + } + newTags.add(s); } - private void determineResourceDefaults(Element layerTag, - BuildableJob job, LayerDetail layer) { - - Element t_services = layerTag.getChild("services"); - List services = new ArrayList(); - - /* - * Build a list of services from the XML. Filter - * out duplicates and empty services. - */ - if (t_services != null) { - - for (Object tmp : t_services.getChildren()) { - Element t_service = (Element) tmp; - String service_name = t_service.getTextTrim(); - - if (service_name.length() == 0) { - continue; - } - - if (services.contains(service_name)) { - continue; - } - services.add(service_name); - } - } - - /* - * Start from the beginning and check each service. The first - * one that has a service record will be the one to use. - */ - ServiceEntity primaryService = null; - for (String service_name: services) { - try { - primaryService = serviceManager.getService(service_name, - job.detail.showName); - // Once a service is found, break; - break; - } catch (EmptyResultDataAccessException e) { - logger.warn("warning, service not found for layer " + - layer.getName() + " " + service_name); - } - } - - /* - * If no primary service was found, use the default service. - */ - if (primaryService == null) { - primaryService = serviceManager.getService(DEFAULT_SERVICE); - services.add(primaryService.name); - } - - Element t_limits = layerTag.getChild("limits"); - List limits = new ArrayList(); - - if (t_limits != null) { - for (Object tmp : t_limits.getChildren()) { - Element t_limit = (Element) tmp; - String limitName = t_limit.getTextTrim(); - - if (limitName.length() == 0) { - continue; - } + if (newTags.size() > 0) { + layer.tags = newTags; + } + } - if (limits.contains(limitName)) { - continue; - } - limits.add(limitName); - } - } + /** + * Determine the frame range + * + * @param range + * @param chunkSize + * @return + */ + public int getFrameRangeSize(String range, int chunkSize) { + try { + return CueUtil.normalizeFrameRange(range, chunkSize).size(); + } catch (Exception e) { + throw new SpecBuilderException("error, the range " + range + " is invalid"); + } + } + private BuildableDependency handleDependTag(Element tag) { - logger.info("primary service: " + primaryService.getName() + " " + - layer.getName()); - - /* - * Now apply the primaryService values to the layer. - */ - layer.isThreadable = primaryService.threadable; - layer.maximumCores = primaryService.maxCores; - layer.minimumCores = primaryService.minCores; - layer.minimumMemory = primaryService.minMemory; - layer.maximumGpus = primaryService.maxGpus; - layer.minimumGpus = primaryService.minGpus; - layer.minimumGpuMemory = primaryService.minGpuMemory; - layer.tags.addAll(primaryService.tags); - layer.services.addAll(services); - layer.limits.addAll(limits); - layer.timeout = primaryService.timeout; - layer.timeout_llu = primaryService.timeout_llu; - } + BuildableDependency depend = new BuildableDependency(); + depend.type = DependType.valueOf(tag.getAttributeValue("type").toUpperCase()); - private void determineOutputs(Element layerTag, - BuildableJob job, LayerDetail layer) { - - Element t_outputs = layerTag.getChild("outputs"); - List outputs = new ArrayList(); - /* - * Build a list of outputs from the XML. Filter - * out duplicates and empty outputs. - */ - if (t_outputs != null) { - for (Object tmp : t_outputs.getChildren()) { - Element t_output = (Element) tmp; - String output_path = t_output.getTextTrim(); - - if (output_path.length() == 0) { - continue; - } - - if (outputs.contains(output_path)) { - continue; - } - outputs.add(output_path); - } - } - layer.outputs.addAll(outputs); + /* + * If the depend type is layer on layer, allow dependAny to be set. Depend any is not + * implemented for any other depend type. + */ + if (depend.type.equals(DependType.LAYER_ON_LAYER)) { + depend.anyFrame = Convert.stringToBool(tag.getAttributeValue("anyframe")); } - /** - * Converts the job space tagging format into a set of strings. Also - * verifies each tag. - * - * @param job - * @param layer - * @return + /* + * Set job names */ - private void determineTags(BuildableJob job, LayerDetail layer, - Element layerTag) { - Set newTags = new LinkedHashSet(); - String tags = layerTag.getChildTextTrim("tags"); - - if (tags == null) { - return; - } + depend.setDependErJobName(conformJobName(tag.getChildTextTrim("depjob"))); + depend.setDependOnJobName(conformJobName(tag.getChildTextTrim("onjob"))); - if (tags.length() == 0) { - return; - } - - String[] e = tags.replaceAll(" ", "").split("\\|"); - for (String s : e) { - if (e.length == 0) { - continue; - } - Matcher matcher = NAME_PATTERN.matcher(s); - if (!matcher.matches()) { - throw new SpecBuilderException("error, invalid tag " + s - + ", tags must be alpha numberic and at least " - + "3 characters in length."); - } - newTags.add(s); - } + /* + * Set layer names + */ + String depLayer = tag.getChildTextTrim("deplayer"); + String onLayer = tag.getChildTextTrim("onlayer"); - if (newTags.size() > 0) { - layer.tags = newTags; - } + if (depLayer != null) { + depend.setDependErLayerName(conformLayerName(depLayer)); + } + if (onLayer != null) { + depend.setDependOnLayerName(conformLayerName(onLayer)); } - /** - * Determine the frame range - * - * @param range - * @param chunkSize - * @return + /* + * Set frame names */ - public int getFrameRangeSize(String range, int chunkSize) { + String depFrame = tag.getChildTextTrim("depframe"); + String onFrame = tag.getChildTextTrim("onframe"); + + if (depFrame != null) { + depFrame = conformFrameName(depFrame); + depend.setDependErFrameName(depFrame); + } + if (onFrame != null) { + onFrame = conformFrameName(onFrame); + depend.setDependOnFrameName(onFrame); + } + + // double check to make sure we don't have two of the same frame/ + if (onFrame != null && depFrame != null) { + if (onFrame.equals(depFrame)) { + throw new SpecBuilderException("The frame name: " + depFrame + " cannot depend on itself."); + } + } + + return depend; + } + + /** + * Tags a env tag and populates the supplied map with key value pairs. + * + * @param tag + * @param map + */ + private void handleEnvironmentTag(Element tag, Map map) { + if (tag == null) { + return; + } + for (Object tmp : tag.getChildren()) { + Element envTag = (Element) tmp; + String key = envTag.getAttributeValue("name"); + if (key == null) { + continue; + } + map.put(key, envTag.getTextTrim()); + } + } + + public void parse(File file) { + try { + doc = new SAXBuilder(true).build(file); + + } catch (Exception e) { + throw new SpecBuilderException("Failed to parse job spec XML, " + e); + } + + handleSpecTag(); + handleJobsTag(); + handleDependsTags(); + } + + private class DTDRedirector implements EntityResolver { + public InputSource resolveEntity(String publicId, String systemId) + throws SAXException, IOException { + if (systemId.startsWith(SPCUE_DTD_URL)) { + // Redirect to resource file. try { - return CueUtil.normalizeFrameRange(range, chunkSize).size(); + String filename = systemId.substring(SPCUE_DTD_URL.length()); + InputStream dtd = getClass().getResourceAsStream("/public/dtd/" + filename); + return new InputSource(dtd); } catch (Exception e) { - throw new SpecBuilderException("error, the range " + range - + " is invalid"); + throw new SpecBuilderException("Failed to redirect DTD " + systemId + ", " + e); } + } else { + // Use default resolver. + return null; + } } + } - private BuildableDependency handleDependTag(Element tag) { - - BuildableDependency depend = new BuildableDependency(); - depend.type = DependType.valueOf(tag.getAttributeValue("type").toUpperCase()); - - /* - * If the depend type is layer on layer, allow dependAny to be set. - * Depend any is not implemented for any other depend type. - */ - if (depend.type.equals(DependType.LAYER_ON_LAYER)) { - depend.anyFrame = Convert.stringToBool(tag - .getAttributeValue("anyframe")); - } - - /* - * Set job names - */ - depend - .setDependErJobName(conformJobName(tag - .getChildTextTrim("depjob"))); - depend - .setDependOnJobName(conformJobName(tag - .getChildTextTrim("onjob"))); - - /* - * Set layer names - */ - String depLayer = tag.getChildTextTrim("deplayer"); - String onLayer = tag.getChildTextTrim("onlayer"); - - if (depLayer != null) { - depend.setDependErLayerName(conformLayerName(depLayer)); - } - if (onLayer != null) { - depend.setDependOnLayerName(conformLayerName(onLayer)); - } - - /* - * Set frame names - */ - String depFrame = tag.getChildTextTrim("depframe"); - String onFrame = tag.getChildTextTrim("onframe"); - - if (depFrame != null) { - depFrame = conformFrameName(depFrame); - depend.setDependErFrameName(depFrame); - } - if (onFrame != null) { - onFrame = conformFrameName(onFrame); - depend.setDependOnFrameName(onFrame); - } + public void parse(String cjsl) { + try { + SAXBuilder builder = new SAXBuilder(true); + builder.setEntityResolver(new DTDRedirector()); + doc = builder.build(new StringReader(cjsl)); - // double check to make sure we don't have two of the same frame/ - if (onFrame != null && depFrame != null) { - if (onFrame.equals(depFrame)) { - throw new SpecBuilderException("The frame name: " + depFrame - + " cannot depend on itself."); - } - } - - return depend; + } catch (Exception e) { + throw new SpecBuilderException("Failed to parse job spec XML, " + e); } - /** - * Tags a env tag and populates the supplied map with key value pairs. - * - * @param tag - * @param map - */ - private void handleEnvironmentTag(Element tag, Map map) { - if (tag == null) { - return; - } - for (Object tmp : tag.getChildren()) { - Element envTag = (Element) tmp; - String key = envTag.getAttributeValue("name"); - if (key == null) { - continue; - } - map.put(key, envTag.getTextTrim()); - } - } + handleSpecTag(); + handleJobsTag(); + handleDependsTags(); + } - public void parse(File file) { - try { - doc = new SAXBuilder(true).build(file); + private BuildableJob initPostJob(BuildableJob parent) { - } catch (Exception e) { - throw new SpecBuilderException("Failed to parse job spec XML, " + e); - } + JobDetail job = new JobDetail(); + job.name = parent.detail.name + "_post_job_" + System.currentTimeMillis(); + job.name = job.name.replace(user, "monitor"); + job.state = JobState.STARTUP; + job.isPaused = false; + job.maxCoreUnits = 500; + job.startTime = CueUtil.getTime(); + job.maxRetries = 2; + job.shot = shot; + job.user = "monitor"; + job.uid = uid; + job.email = null; + job.os = parent.detail.os; - handleSpecTag(); - handleJobsTag(); - handleDependsTags(); - } + job.showName = show; + job.facilityName = facility; + job.deptName = parent.detail.deptName; - private class DTDRedirector implements EntityResolver { - public InputSource resolveEntity(String publicId, - String systemId) throws SAXException, IOException { - if (systemId.startsWith(SPCUE_DTD_URL)) { - // Redirect to resource file. - try { - String filename = systemId.substring(SPCUE_DTD_URL.length()); - InputStream dtd = getClass().getResourceAsStream("/public/dtd/" + filename); - return new InputSource(dtd); - } catch (Exception e) { - throw new SpecBuilderException("Failed to redirect DTD " + systemId + ", " + e); - } - } else { - // Use default resolver. - return null; - } - } - } + BuildableJob postJob = new BuildableJob(job); - public void parse(String cjsl) { - try { - SAXBuilder builder = new SAXBuilder(true); - builder.setEntityResolver(new DTDRedirector()); - doc = builder.build(new StringReader(cjsl)); - - } catch (Exception e) { - throw new SpecBuilderException("Failed to parse job spec XML, " + e); - } - - handleSpecTag(); - handleJobsTag(); - handleDependsTags(); + for (String key : parent.env.keySet()) { + postJob.env.put(key, parent.env.get(key)); } - private BuildableJob initPostJob(BuildableJob parent) { - - JobDetail job = new JobDetail(); - job.name = parent.detail.name + "_post_job_" - + System.currentTimeMillis(); - job.name = job.name.replace(user, "monitor"); - job.state = JobState.STARTUP; - job.isPaused = false; - job.maxCoreUnits = 500; - job.startTime = CueUtil.getTime(); - job.maxRetries = 2; - job.shot = shot; - job.user = "monitor"; - job.uid = uid; - job.email = null; - job.os = parent.detail.os; - - job.showName = show; - job.facilityName = facility; - job.deptName = parent.detail.deptName; - - BuildableJob postJob = new BuildableJob(job); - - for (String key : parent.env.keySet()) { - postJob.env.put(key, parent.env.get(key)); - } - - return postJob; - } + return postJob; + } - public Document getDoc() { - return doc; - } + public Document getDoc() { + return doc; + } - public List getDepends() { - return depends; - } + public List getDepends() { + return depends; + } - public List getJobs() { - return jobs; - } + public List getJobs() { + return jobs; + } - public String getShot() { - return shot; - } + public String getShot() { + return shot; + } - public String getShow() { - return show; - } + public String getShow() { + return show; + } - public Optional getUid() { - return uid; - } + public Optional getUid() { + return uid; + } - public String getUser() { - return user; - } + public String getUser() { + return user; + } - public ServiceManager getServiceManager() { - return serviceManager; - } + public ServiceManager getServiceManager() { + return serviceManager; + } - public void setServiceManager(ServiceManager serviceManager) { - this.serviceManager = serviceManager; - } + public void setServiceManager(ServiceManager serviceManager) { + this.serviceManager = serviceManager; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/LocalBookingSupport.java b/cuebot/src/main/java/com/imageworks/spcue/service/LocalBookingSupport.java index 7bf7db136..efaecc4f5 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/LocalBookingSupport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/LocalBookingSupport.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import org.apache.logging.log4j.Logger; @@ -32,157 +28,149 @@ import com.imageworks.spcue.dispatcher.LocalDispatcher; import com.imageworks.spcue.grpc.host.LockState; - /** * Non transactional class for handling local booking logic. */ public class LocalBookingSupport { - private static final Logger logger = LogManager.getLogger(LocalBookingSupport.class); + private static final Logger logger = LogManager.getLogger(LocalBookingSupport.class); - private HostManager hostManager; - private LocalDispatcher localDispatcher; - private OwnerManager ownerManager; - private BookingManager bookingManager; + private HostManager hostManager; + private LocalDispatcher localDispatcher; + private OwnerManager ownerManager; + private BookingManager bookingManager; - public boolean bookLocal(JobInterface job, String hostname, String user, - LocalHostAssignment lha) { + public boolean bookLocal(JobInterface job, String hostname, String user, + LocalHostAssignment lha) { - logger.info("Setting up local booking for " + user + " on " + job); + logger.info("Setting up local booking for " + user + " on " + job); - DispatchHost host = hostManager.findDispatchHost(hostname); - if (host.lockState.equals(LockState.OPEN)) { - throw new SpcueRuntimeException( - "The host "+ host + " is not NIMBY locked"); - } + DispatchHost host = hostManager.findDispatchHost(hostname); + if (host.lockState.equals(LockState.OPEN)) { + throw new SpcueRuntimeException("The host " + host + " is not NIMBY locked"); + } - OwnerEntity owner = ownerManager.findOwner(user); - if (!ownerManager.isOwner(owner, host)) { - throw new SpcueRuntimeException(user - + " is not the owner of the host " + host.getName()); - } + OwnerEntity owner = ownerManager.findOwner(user); + if (!ownerManager.isOwner(owner, host)) { + throw new SpcueRuntimeException(user + " is not the owner of the host " + host.getName()); + } - bookingManager.createLocalHostAssignment(host, job, lha); + bookingManager.createLocalHostAssignment(host, job, lha); + + try { + if (localDispatcher.dispatchHost(host, job).size() > 0) { + return true; + } + } catch (Exception e) { + /* + * Eat everything here and we'll throw our own ice exception. + */ + logger.info("addRenderPartition to job " + job + " failed, " + e); + } - try { - if (localDispatcher.dispatchHost(host, job).size() > 0) { - return true; - } - } catch (Exception e) { - /* - * Eat everything here and we'll throw our own ice exception. - */ - logger.info("addRenderPartition to job " + job + " failed, " + e); - } + logger.info("bookLocal failed to book " + host + " to " + job + + ", there were no suitable frames to book."); - logger.info("bookLocal failed to book " + host + " to " + job - + ", there were no suitable frames to book."); + return false; + } - return false; - } + public boolean bookLocal(LayerInterface layer, String hostname, String user, + LocalHostAssignment lha) { - public boolean bookLocal(LayerInterface layer, String hostname, String user, - LocalHostAssignment lha) { + logger.info("Setting up local booking for " + user + " on " + layer); - logger.info("Setting up local booking for " + user + " on " + layer); + DispatchHost host = hostManager.findDispatchHost(hostname); + if (host.lockState.equals(LockState.OPEN)) { + throw new SpcueRuntimeException("The host " + host + " is not NIMBY locked"); + } - DispatchHost host = hostManager.findDispatchHost(hostname); - if (host.lockState.equals(LockState.OPEN)) { - throw new SpcueRuntimeException("The host " + host - + " is not NIMBY locked"); - } + OwnerEntity owner = ownerManager.findOwner(user); + if (!ownerManager.isOwner(owner, host)) { + throw new SpcueRuntimeException(user + " is not the owner of the host " + host.getName()); + } - OwnerEntity owner = ownerManager.findOwner(user); - if (!ownerManager.isOwner(owner, host)) { - throw new SpcueRuntimeException(user - + " is not the owner of the host " + host.getName()); - } + bookingManager.createLocalHostAssignment(host, layer, lha); + + try { + if (localDispatcher.dispatchHost(host, layer).size() > 0) { + return true; + } + } catch (Exception e) { + /* + * Eat everything here and we'll throw our own ice exception. + */ + logger.info("addRenderPartition to job " + layer + " failed, " + e); + } - bookingManager.createLocalHostAssignment(host, layer, lha); + logger.info("bookLocafailed to book " + host + " to " + layer + + ", there were no suitable frames to book."); - try { - if (localDispatcher.dispatchHost(host, layer).size() > 0) { - return true; - } - } catch (Exception e) { - /* - * Eat everything here and we'll throw our own ice exception. - */ - logger.info("addRenderPartition to job " + layer + " failed, " + e); - } + return false; - logger.info("bookLocafailed to book " + host + " to " + layer - + ", there were no suitable frames to book."); + } - return false; + public boolean bookLocal(FrameInterface frame, String hostname, String user, + LocalHostAssignment lha) { - } + logger.info("Setting up local booking for " + user + " on " + frame); - public boolean bookLocal(FrameInterface frame, String hostname, String user, - LocalHostAssignment lha) { - - logger.info("Setting up local booking for " + user + " on " + frame); - - DispatchHost host = hostManager.findDispatchHost(hostname); - if (host.lockState.equals(LockState.OPEN)) { - throw new SpcueRuntimeException("The host " + host - + " is not NIMBY locked"); - } - - OwnerEntity owner = ownerManager.findOwner(user); - if (!ownerManager.isOwner(owner, host)) { - throw new SpcueRuntimeException(user - + " is not the owner of the host " + host.getName()); - } - - bookingManager.createLocalHostAssignment(host, frame, lha); - try { - if (localDispatcher.dispatchHost(host, frame).size() > 0) { - return true; - } - } catch (Exception e) { - /* - * Eat everything here and we'll throw our own ice exception. - */ - logger.info("addRenderPartition to job " + frame + " failed, " + e); - } - - logger.info("bookLocafailed to book " + host + " to " + frame - + ", there were no suitable frames to book."); - - return false; + DispatchHost host = hostManager.findDispatchHost(hostname); + if (host.lockState.equals(LockState.OPEN)) { + throw new SpcueRuntimeException("The host " + host + " is not NIMBY locked"); } - public HostManager getHostManager() { - return hostManager; + OwnerEntity owner = ownerManager.findOwner(user); + if (!ownerManager.isOwner(owner, host)) { + throw new SpcueRuntimeException(user + " is not the owner of the host " + host.getName()); } - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; + bookingManager.createLocalHostAssignment(host, frame, lha); + try { + if (localDispatcher.dispatchHost(host, frame).size() > 0) { + return true; + } + } catch (Exception e) { + /* + * Eat everything here and we'll throw our own ice exception. + */ + logger.info("addRenderPartition to job " + frame + " failed, " + e); } - public LocalDispatcher getLocalDispatcher() { - return localDispatcher; - } + logger.info("bookLocafailed to book " + host + " to " + frame + + ", there were no suitable frames to book."); - public void setLocalDispatcher(LocalDispatcher localDispatcher) { - this.localDispatcher = localDispatcher; - } + return false; + } - public OwnerManager getOwnerManager() { - return ownerManager; - } + public HostManager getHostManager() { + return hostManager; + } - public void setOwnerManager(OwnerManager ownerManager) { - this.ownerManager = ownerManager; - } + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; + } - public BookingManager getBookingManager() { - return bookingManager; - } + public LocalDispatcher getLocalDispatcher() { + return localDispatcher; + } - public void setBookingManager(BookingManager bookingManager) { - this.bookingManager = bookingManager; - } -} + public void setLocalDispatcher(LocalDispatcher localDispatcher) { + this.localDispatcher = localDispatcher; + } + + public OwnerManager getOwnerManager() { + return ownerManager; + } + + public void setOwnerManager(OwnerManager ownerManager) { + this.ownerManager = ownerManager; + } + public BookingManager getBookingManager() { + return bookingManager; + } + + public void setBookingManager(BookingManager bookingManager) { + this.bookingManager = bookingManager; + } +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/MaintenanceManagerSupport.java b/cuebot/src/main/java/com/imageworks/spcue/service/MaintenanceManagerSupport.java index 64c6fce4e..f51b48a18 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/MaintenanceManagerSupport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/MaintenanceManagerSupport.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.util.List; @@ -46,245 +42,243 @@ public class MaintenanceManagerSupport { - private static final Logger logger = LogManager.getLogger(MaintenanceManagerSupport.class); - - @Autowired - private Environment env; - - private MaintenanceDao maintenanceDao; - - private ProcDao procDao; - - private FrameDao frameDao; - - private HostDao hostDao; - - private JobManager jobManager; - - private DispatchSupport dispatchSupport; - - private HistoricalSupport historicalSupport; - - private DepartmentManager departmentManager; - - private static final long WAIT_FOR_HOST_REPORTS_MS = 600000; - - private static final int CHECKPOINT_MAX_WAIT_SEC = 300; - - private long dbConnectionFailureTime = 0; - - /** - * Checks the cue for down hosts. If there are any down they are cleared of - * procs. Additionally the orphaned proc check is done. - * - * If a DB Connection exception is thrown, its caught and the current time - * is noted. Once the DB comes back up, down proc checks will not resume for - * WAIT_FOR_HOST_REPORTS_MS milliseconds. This is to give procs a chance to - * report back in. - * - */ - public void checkHardwareState() { - try { - - if (!maintenanceDao - .lockTask(MaintenanceTask.LOCK_HARDWARE_STATE_CHECK)) { - return; - } - try { - if (dbConnectionFailureTime > 0) { - if (System.currentTimeMillis() - dbConnectionFailureTime < WAIT_FOR_HOST_REPORTS_MS) { - logger.warn("NOT running checkHardwareState, waiting for hosts to report in."); - return; - } - dbConnectionFailureTime = 0; - } - - int hosts = maintenanceDao.setUpHostsToDown(); - if (hosts > 0) { - clearDownProcs(); - - boolean autoDeleteDownHosts = env.getProperty( - "maintenance.auto_delete_down_hosts", Boolean.class, false); - if (autoDeleteDownHosts) { - hostDao.deleteDownHosts(); - } - } - clearOrphanedProcs(); - } finally { - maintenanceDao - .unlockTask(MaintenanceTask.LOCK_HARDWARE_STATE_CHECK); - } - } catch (Exception e) { - // This catch could be more specific using CannotGetJdbcConnectionException, but we need - // to catch a wider range of exceptions from HikariPool. - // HikariPool will log this message very frequently with error level, the following check - // avoids polluting the logs by logging it twice - if (!e.getMessage().contains("Exception during pool initialization")) { - logger.warn("Error obtaining DB connection for hardware state check", e); - } - // If this fails, then the network went down, set the current time. - dbConnectionFailureTime = System.currentTimeMillis(); - } - } + private static final Logger logger = LogManager.getLogger(MaintenanceManagerSupport.class); + + @Autowired + private Environment env; + + private MaintenanceDao maintenanceDao; + + private ProcDao procDao; + + private FrameDao frameDao; + + private HostDao hostDao; + + private JobManager jobManager; + + private DispatchSupport dispatchSupport; - public void archiveFinishedJobs() { - if (!maintenanceDao.lockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER)) { + private HistoricalSupport historicalSupport; + + private DepartmentManager departmentManager; + + private static final long WAIT_FOR_HOST_REPORTS_MS = 600000; + + private static final int CHECKPOINT_MAX_WAIT_SEC = 300; + + private long dbConnectionFailureTime = 0; + + /** + * Checks the cue for down hosts. If there are any down they are cleared of procs. Additionally + * the orphaned proc check is done. + * + * If a DB Connection exception is thrown, its caught and the current time is noted. Once the DB + * comes back up, down proc checks will not resume for WAIT_FOR_HOST_REPORTS_MS milliseconds. This + * is to give procs a chance to report back in. + * + */ + public void checkHardwareState() { + try { + + if (!maintenanceDao.lockTask(MaintenanceTask.LOCK_HARDWARE_STATE_CHECK)) { + return; + } + try { + if (dbConnectionFailureTime > 0) { + if (System.currentTimeMillis() - dbConnectionFailureTime < WAIT_FOR_HOST_REPORTS_MS) { + logger.warn("NOT running checkHardwareState, waiting for hosts to report in."); return; + } + dbConnectionFailureTime = 0; } - try { - historicalSupport.archiveHistoricalJobData(); - } catch (Exception e) { - logger.warn("failed to archive finished jobs: " + e); - } finally { - maintenanceDao.unlockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER); - } - } - private void clearOrphanedProcs() { - List procs = procDao.findOrphanedVirtualProcs(100); - for (VirtualProc proc: procs) { - try { - dispatchSupport.lostProc(proc, - "Removed by maintenance, orphaned", - Dispatcher.EXIT_STATUS_FRAME_ORPHAN); - - Sentry.configureScope(scope -> { - scope.setExtra("frame_id", proc.getFrameId()); - scope.setExtra("host_id", proc.getHostId()); - scope.setExtra("name", proc.getName()); - Sentry.captureMessage("Manager cleaning orphan procs"); - }); - } catch (Exception e) { - logger.info("failed to clear orphaned proc: " + proc.getName() + " " + e); - } - } + int hosts = maintenanceDao.setUpHostsToDown(); + if (hosts > 0) { + clearDownProcs(); - List frames = frameDao.getOrphanedFrames(); - for (FrameInterface frame: frames) { - try { - frameDao.updateFrameStopped(frame, FrameState.WAITING, - Dispatcher.EXIT_STATUS_FRAME_ORPHAN); - } catch (Exception e) { - logger.info("failed to clear orphaned frame: " + - frame.getName() + " " + e); - } + boolean autoDeleteDownHosts = + env.getProperty("maintenance.auto_delete_down_hosts", Boolean.class, false); + if (autoDeleteDownHosts) { + hostDao.deleteDownHosts(); + } } + clearOrphanedProcs(); + } finally { + maintenanceDao.unlockTask(MaintenanceTask.LOCK_HARDWARE_STATE_CHECK); + } + } catch (Exception e) { + // This catch could be more specific using CannotGetJdbcConnectionException, but + // we need + // to catch a wider range of exceptions from HikariPool. + // HikariPool will log this message very frequently with error level, the + // following check + // avoids polluting the logs by logging it twice + if (!e.getMessage().contains("Exception during pool initialization")) { + logger.warn("Error obtaining DB connection for hardware state check", e); + } + // If this fails, then the network went down, set the current time. + dbConnectionFailureTime = System.currentTimeMillis(); } + } - private void clearDownProcs() { - List procs = procDao.findVirtualProcs(HardwareState.DOWN); - logger.warn("found " + procs.size() + " that are down."); - for (VirtualProc proc: procs) { - try { - dispatchSupport.lostProc(proc, - proc.getName() + " was marked as down.", - Dispatcher.EXIT_STATUS_DOWN_HOST); - FrameInterface f = frameDao.getFrame(proc.frameId); - FrameDetail frameDetail = frameDao.getFrameDetail(f); - Sentry.configureScope(scope -> { - scope.setExtra("host", proc.getName()); - scope.setExtra("procId", proc.getProcId()); - scope.setExtra("frame Name", frameDetail.getName()); - scope.setExtra("frame Exit Status", String.valueOf(frameDetail.exitStatus)); - scope.setExtra("Frame Job ID", frameDetail.getJobId()); - Sentry.captureMessage("MaintenanceManager proc removed due to host offline"); - }); - } catch (Exception e) { - logger.info("failed to down proc: " + proc.getName() + " " + e); - } - } + public void archiveFinishedJobs() { + if (!maintenanceDao.lockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER)) { + return; } - - public void clearStaleCheckpoints() { - logger.info("Checking for stale checkpoint frames."); - if (!maintenanceDao.lockTask(MaintenanceTask.LOCK_STALE_CHECKPOINT)) { return; } - try { - List frames = jobManager.getStaleCheckpoints(CHECKPOINT_MAX_WAIT_SEC); - logger.warn("found " + frames.size() + " frames that failed to checkpoint"); - for (FrameInterface frame: frames) { - jobManager.updateCheckpointState(frame, CheckpointState.DISABLED); - jobManager.updateFrameState(frame, FrameState.WAITING); - } - } catch (Exception e) { - logger.warn("failed to unlock stale checkpoint " + e); - } finally { - maintenanceDao.unlockTask(MaintenanceTask.LOCK_STALE_CHECKPOINT); - } + try { + historicalSupport.archiveHistoricalJobData(); + } catch (Exception e) { + logger.warn("failed to archive finished jobs: " + e); + } finally { + maintenanceDao.unlockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER); } - - public void updateTaskValues() { - if (!maintenanceDao.lockTask(MaintenanceTask.LOCK_TASK_UPDATE, 700)) { return; } - try { - logger.info("running task updates"); - for (PointDetail pd: departmentManager.getManagedPointConfs()) { - departmentManager.updateManagedTasks(pd); - } - } catch (Exception e) { - logger.warn("failed to archive finished jobs: " + e); - } finally { - maintenanceDao.unlockTask(MaintenanceTask.LOCK_TASK_UPDATE); - } + } + + private void clearOrphanedProcs() { + List procs = procDao.findOrphanedVirtualProcs(100); + for (VirtualProc proc : procs) { + try { + dispatchSupport.lostProc(proc, "Removed by maintenance, orphaned", + Dispatcher.EXIT_STATUS_FRAME_ORPHAN); + + Sentry.configureScope(scope -> { + scope.setExtra("frame_id", proc.getFrameId()); + scope.setExtra("host_id", proc.getHostId()); + scope.setExtra("name", proc.getName()); + Sentry.captureMessage("Manager cleaning orphan procs"); + }); + } catch (Exception e) { + logger.info("failed to clear orphaned proc: " + proc.getName() + " " + e); + } } - public FrameDao getFrameDao() { - return frameDao; + List frames = frameDao.getOrphanedFrames(); + for (FrameInterface frame : frames) { + try { + frameDao.updateFrameStopped(frame, FrameState.WAITING, Dispatcher.EXIT_STATUS_FRAME_ORPHAN); + } catch (Exception e) { + logger.info("failed to clear orphaned frame: " + frame.getName() + " " + e); + } } - - public void setFrameDao(FrameDao frameDao) { - this.frameDao = frameDao; + } + + private void clearDownProcs() { + List procs = procDao.findVirtualProcs(HardwareState.DOWN); + logger.warn("found " + procs.size() + " that are down."); + for (VirtualProc proc : procs) { + try { + dispatchSupport.lostProc(proc, proc.getName() + " was marked as down.", + Dispatcher.EXIT_STATUS_DOWN_HOST); + FrameInterface f = frameDao.getFrame(proc.frameId); + FrameDetail frameDetail = frameDao.getFrameDetail(f); + Sentry.configureScope(scope -> { + scope.setExtra("host", proc.getName()); + scope.setExtra("procId", proc.getProcId()); + scope.setExtra("frame Name", frameDetail.getName()); + scope.setExtra("frame Exit Status", String.valueOf(frameDetail.exitStatus)); + scope.setExtra("Frame Job ID", frameDetail.getJobId()); + Sentry.captureMessage("MaintenanceManager proc removed due to host offline"); + }); + } catch (Exception e) { + logger.info("failed to down proc: " + proc.getName() + " " + e); + } } + } - public void setHostDao(HostDao hostDao) { - this.hostDao = hostDao; + public void clearStaleCheckpoints() { + logger.info("Checking for stale checkpoint frames."); + if (!maintenanceDao.lockTask(MaintenanceTask.LOCK_STALE_CHECKPOINT)) { + return; } - - public DispatchSupport getDispatchSupport() { - return dispatchSupport; + try { + List frames = jobManager.getStaleCheckpoints(CHECKPOINT_MAX_WAIT_SEC); + logger.warn("found " + frames.size() + " frames that failed to checkpoint"); + for (FrameInterface frame : frames) { + jobManager.updateCheckpointState(frame, CheckpointState.DISABLED); + jobManager.updateFrameState(frame, FrameState.WAITING); + } + } catch (Exception e) { + logger.warn("failed to unlock stale checkpoint " + e); + } finally { + maintenanceDao.unlockTask(MaintenanceTask.LOCK_STALE_CHECKPOINT); } + } - public void setDispatchSupport(DispatchSupport dispatchSupport) { - this.dispatchSupport = dispatchSupport; + public void updateTaskValues() { + if (!maintenanceDao.lockTask(MaintenanceTask.LOCK_TASK_UPDATE, 700)) { + return; } - - public MaintenanceDao getMaintenanceDao() { - return maintenanceDao; + try { + logger.info("running task updates"); + for (PointDetail pd : departmentManager.getManagedPointConfs()) { + departmentManager.updateManagedTasks(pd); + } + } catch (Exception e) { + logger.warn("failed to archive finished jobs: " + e); + } finally { + maintenanceDao.unlockTask(MaintenanceTask.LOCK_TASK_UPDATE); } + } - public void setMaintenanceDao(MaintenanceDao maintenanceDao) { - this.maintenanceDao = maintenanceDao; - } + public FrameDao getFrameDao() { + return frameDao; + } - public ProcDao getProcDao() { - return procDao; - } + public void setFrameDao(FrameDao frameDao) { + this.frameDao = frameDao; + } - public void setProcDao(ProcDao procDao) { - this.procDao = procDao; - } + public void setHostDao(HostDao hostDao) { + this.hostDao = hostDao; + } - public HistoricalSupport getHistoricalSupport() { - return historicalSupport; - } + public DispatchSupport getDispatchSupport() { + return dispatchSupport; + } - public void setHistoricalSupport(HistoricalSupport historicalSupport) { - this.historicalSupport = historicalSupport; - } + public void setDispatchSupport(DispatchSupport dispatchSupport) { + this.dispatchSupport = dispatchSupport; + } - public DepartmentManager getDepartmentManager() { - return departmentManager; - } + public MaintenanceDao getMaintenanceDao() { + return maintenanceDao; + } - public void setDepartmentManager(DepartmentManager departmentManager) { - this.departmentManager = departmentManager; - } + public void setMaintenanceDao(MaintenanceDao maintenanceDao) { + this.maintenanceDao = maintenanceDao; + } - public JobManager getJobManager() { - return jobManager; - } + public ProcDao getProcDao() { + return procDao; + } - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } + public void setProcDao(ProcDao procDao) { + this.procDao = procDao; + } -} + public HistoricalSupport getHistoricalSupport() { + return historicalSupport; + } + + public void setHistoricalSupport(HistoricalSupport historicalSupport) { + this.historicalSupport = historicalSupport; + } + public DepartmentManager getDepartmentManager() { + return departmentManager; + } + + public void setDepartmentManager(DepartmentManager departmentManager) { + this.departmentManager = departmentManager; + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManager.java index d639405ee..aa8ac3a3f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManager.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import com.imageworks.spcue.DeedEntity; @@ -27,79 +23,77 @@ public interface OwnerManager { - /** - * Return true if the given users owns the particular host. - * - * @param owner - * @param host - * @return - */ - boolean isOwner(OwnerEntity owner, HostInterface host); - - /** - * Create a new owner. - * - * @param user - * @param email - */ - OwnerEntity createOwner(String user, ShowInterface show); - - /** - * Get an owner record by ID. - * - * @param id - */ - OwnerEntity getOwner(String id); - - /** - * Return an owner record by name. - * - * @param name - */ - OwnerEntity findOwner(String name); - - /** - * Delete the specified owner and all his/her deeds. - * Return true if the owner was actually deleted. - * False if not. - */ - boolean deleteOwner(Entity owner); - - /** - * Set the show of the given user. - * - * @param owner - * @param show - */ - void setShow(Entity owner, ShowInterface show); - - /** - * Assigns the given host to the owner. - * - * @param owner - * @param host - */ - DeedEntity takeOwnership(OwnerEntity owner, HostInterface host); - - /** - * - * @param id - * @return - */ - DeedEntity getDeed(String id); - - /** - * Deletes a deed for the specified host. - * - * @param host - */ - void removeDeed(HostInterface host); - - /** - * Remove the given deed. - * - * @param deed - */ - void removeDeed(DeedEntity deed); + /** + * Return true if the given users owns the particular host. + * + * @param owner + * @param host + * @return + */ + boolean isOwner(OwnerEntity owner, HostInterface host); + + /** + * Create a new owner. + * + * @param user + * @param email + */ + OwnerEntity createOwner(String user, ShowInterface show); + + /** + * Get an owner record by ID. + * + * @param id + */ + OwnerEntity getOwner(String id); + + /** + * Return an owner record by name. + * + * @param name + */ + OwnerEntity findOwner(String name); + + /** + * Delete the specified owner and all his/her deeds. Return true if the owner was actually + * deleted. False if not. + */ + boolean deleteOwner(Entity owner); + + /** + * Set the show of the given user. + * + * @param owner + * @param show + */ + void setShow(Entity owner, ShowInterface show); + + /** + * Assigns the given host to the owner. + * + * @param owner + * @param host + */ + DeedEntity takeOwnership(OwnerEntity owner, HostInterface host); + + /** + * + * @param id + * @return + */ + DeedEntity getDeed(String id); + + /** + * Deletes a deed for the specified host. + * + * @param host + */ + void removeDeed(HostInterface host); + + /** + * Remove the given deed. + * + * @param deed + */ + void removeDeed(DeedEntity deed); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManagerService.java index d3fb63ee6..9bc87a565 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManagerService.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import org.springframework.transaction.annotation.Transactional; @@ -34,90 +30,88 @@ @Transactional public class OwnerManagerService implements OwnerManager { - private OwnerDao ownerDao; - private DeedDao deedDao; - private HostDao hostDao; - - @Override - public OwnerEntity createOwner(String user, ShowInterface show) { - OwnerEntity owner = new OwnerEntity(user); - ownerDao.insertOwner(owner, show); - return owner; - } - - @Override - public boolean deleteOwner(Entity owner) { - return ownerDao.deleteOwner(owner); - } - - @Override - public OwnerEntity findOwner(String name) { - return ownerDao.findOwner(name); - } - - @Override - public OwnerEntity getOwner(String id) { - return ownerDao.getOwner(id); - } - - @Override - public void setShow(Entity owner, ShowInterface show) { - ownerDao.updateShow(owner, show); - } - - @Override - public DeedEntity getDeed(String id) { - return deedDao.getDeed(id); - } - - @Override - public DeedEntity takeOwnership(OwnerEntity owner, HostInterface host) { - if (!hostDao.isNimbyHost(host)) { - throw new SpcueRuntimeException( - "Cannot setup deeeds on non-NIMBY hosts."); - } - - deedDao.deleteDeed(host); - return deedDao.insertDeed(owner, host); - } - - @Override - public void removeDeed(HostInterface host) { - deedDao.deleteDeed(host); - } - - @Override - public void removeDeed(DeedEntity deed) { - deedDao.deleteDeed(deed); - } - - @Override - public boolean isOwner(OwnerEntity owner, HostInterface host) { - return ownerDao.isOwner(owner, host); - } - - public OwnerDao getOwnerDao() { - return ownerDao; - } - - public void setOwnerDao(OwnerDao ownerDao) { - this.ownerDao = ownerDao; - } - - public DeedDao getDeedDao() { - return deedDao; - } - - public void setDeedDao(DeedDao deedDao) { - this.deedDao = deedDao; - } - - public HostDao getHostDao() { - return hostDao; - } - - public void setHostDao(HostDao hostDao) { - this.hostDao = hostDao; - } + private OwnerDao ownerDao; + private DeedDao deedDao; + private HostDao hostDao; + + @Override + public OwnerEntity createOwner(String user, ShowInterface show) { + OwnerEntity owner = new OwnerEntity(user); + ownerDao.insertOwner(owner, show); + return owner; + } + + @Override + public boolean deleteOwner(Entity owner) { + return ownerDao.deleteOwner(owner); + } + + @Override + public OwnerEntity findOwner(String name) { + return ownerDao.findOwner(name); + } + + @Override + public OwnerEntity getOwner(String id) { + return ownerDao.getOwner(id); + } + + @Override + public void setShow(Entity owner, ShowInterface show) { + ownerDao.updateShow(owner, show); + } + + @Override + public DeedEntity getDeed(String id) { + return deedDao.getDeed(id); + } + + @Override + public DeedEntity takeOwnership(OwnerEntity owner, HostInterface host) { + if (!hostDao.isNimbyHost(host)) { + throw new SpcueRuntimeException("Cannot setup deeeds on non-NIMBY hosts."); + } + + deedDao.deleteDeed(host); + return deedDao.insertDeed(owner, host); + } + + @Override + public void removeDeed(HostInterface host) { + deedDao.deleteDeed(host); + } + + @Override + public void removeDeed(DeedEntity deed) { + deedDao.deleteDeed(deed); + } + + @Override + public boolean isOwner(OwnerEntity owner, HostInterface host) { + return ownerDao.isOwner(owner, host); + } + + public OwnerDao getOwnerDao() { + return ownerDao; + } + + public void setOwnerDao(OwnerDao ownerDao) { + this.ownerDao = ownerDao; + } + + public DeedDao getDeedDao() { + return deedDao; + } + + public void setDeedDao(DeedDao deedDao) { + this.deedDao = deedDao; + } + + public HostDao getHostDao() { + return hostDao; + } + + public void setHostDao(HostDao hostDao) { + this.hostDao = hostDao; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/RedirectService.java b/cuebot/src/main/java/com/imageworks/spcue/service/RedirectService.java index 51bf4211d..a58341bf0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/RedirectService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/RedirectService.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.service; import javax.annotation.Resource; @@ -34,102 +31,98 @@ import com.imageworks.spcue.Redirect; import com.imageworks.spcue.dao.RedirectDao; -@Transactional(isolation=Isolation.SERIALIZABLE, propagation=Propagation.REQUIRES_NEW) -public class RedirectService { - - private static final Logger logger = - LogManager.getLogger(RedirectService.class); - - @Resource - private PlatformTransactionManager txManager; - - private RedirectDao redirectDao; - - public RedirectService(RedirectDao redirectDao) { - this.redirectDao = redirectDao; - } - - /** - * Check for redirect existence. - * - * @param key Redirect key - * - * @return True if redirect exists - */ - @Transactional(readOnly = true) - public boolean containsKey(String key) { - return redirectDao.containsKey(key); - } - - /** - * Count redirects in a group. - * - * @param groupId the group to query - * - * @return count of redirects in group - */ - @Transactional(readOnly = true) - public int countRedirectsWithGroup(String groupId) { - return redirectDao.countRedirectsWithGroup(groupId); - } - - /** - * Delete all redirects that are past expiration age. - * - * @return count of redirects deleted - */ - public int deleteExpired() { - return redirectDao.deleteExpired(); - } - - /** - * Add redirect. - * - * @param key Redirect key - * - * @param r Redirect to add - */ - @Transactional(propagation=Propagation.NOT_SUPPORTED) - public void put(String key, Redirect r) { - DefaultTransactionDefinition def = new DefaultTransactionDefinition(); - def.setPropagationBehavior(DefaultTransactionDefinition.PROPAGATION_REQUIRES_NEW); - def.setIsolationLevel(DefaultTransactionDefinition.ISOLATION_SERIALIZABLE); - - while (true) { - TransactionStatus status = txManager.getTransaction(def); - try { - redirectDao.put(key, r); - } - catch (CannotSerializeTransactionException e) { - // MERGE statement race lost; try again. - txManager.rollback(status); - continue; - } - catch (DuplicateKeyException e) { - if (e.getMessage() != null && e.getMessage().contains("C_REDIRECT_PK")) { - // MERGE statement race lost; try again. - txManager.rollback(status); - continue; - } - throw e; - } - catch (Exception e) { - txManager.rollback(status); - throw e; - } - txManager.commit(status); - break; +@Transactional(isolation = Isolation.SERIALIZABLE, propagation = Propagation.REQUIRES_NEW) +public class RedirectService { + + private static final Logger logger = LogManager.getLogger(RedirectService.class); + + @Resource + private PlatformTransactionManager txManager; + + private RedirectDao redirectDao; + + public RedirectService(RedirectDao redirectDao) { + this.redirectDao = redirectDao; + } + + /** + * Check for redirect existence. + * + * @param key Redirect key + * + * @return True if redirect exists + */ + @Transactional(readOnly = true) + public boolean containsKey(String key) { + return redirectDao.containsKey(key); + } + + /** + * Count redirects in a group. + * + * @param groupId the group to query + * + * @return count of redirects in group + */ + @Transactional(readOnly = true) + public int countRedirectsWithGroup(String groupId) { + return redirectDao.countRedirectsWithGroup(groupId); + } + + /** + * Delete all redirects that are past expiration age. + * + * @return count of redirects deleted + */ + public int deleteExpired() { + return redirectDao.deleteExpired(); + } + + /** + * Add redirect. + * + * @param key Redirect key + * + * @param r Redirect to add + */ + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void put(String key, Redirect r) { + DefaultTransactionDefinition def = new DefaultTransactionDefinition(); + def.setPropagationBehavior(DefaultTransactionDefinition.PROPAGATION_REQUIRES_NEW); + def.setIsolationLevel(DefaultTransactionDefinition.ISOLATION_SERIALIZABLE); + + while (true) { + TransactionStatus status = txManager.getTransaction(def); + try { + redirectDao.put(key, r); + } catch (CannotSerializeTransactionException e) { + // MERGE statement race lost; try again. + txManager.rollback(status); + continue; + } catch (DuplicateKeyException e) { + if (e.getMessage() != null && e.getMessage().contains("C_REDIRECT_PK")) { + // MERGE statement race lost; try again. + txManager.rollback(status); + continue; } + throw e; + } catch (Exception e) { + txManager.rollback(status); + throw e; + } + txManager.commit(status); + break; } - - /** - * Remove a redirect for a specific key. - * - * @param key - * - * @return The redirect that was removed, or null - */ - public Redirect remove(String key) { - return redirectDao.remove(key); - } + } + + /** + * Remove a redirect for a specific key. + * + * @param key + * + * @return The redirect that was removed, or null + */ + public Redirect remove(String key) { + return redirectDao.remove(key); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/ServiceManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/ServiceManager.java index 40f07c22c..41d3a86bf 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/ServiceManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/ServiceManager.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import com.imageworks.spcue.ServiceEntity; @@ -24,25 +20,24 @@ public interface ServiceManager { - public ServiceEntity getService(String id); + public ServiceEntity getService(String id); - public ServiceEntity getDefaultService(); + public ServiceEntity getDefaultService(); - public void createService(ServiceEntity s); + public void createService(ServiceEntity s); - void createService(ServiceOverrideEntity s); + void createService(ServiceOverrideEntity s); - void updateService(ServiceOverrideEntity s); + void updateService(ServiceOverrideEntity s); - void updateService(ServiceEntity s); + void updateService(ServiceEntity s); - void deleteService(ServiceOverrideEntity s); + void deleteService(ServiceOverrideEntity s); - void deleteService(ServiceEntity s); + void deleteService(ServiceEntity s); - ServiceOverrideEntity getServiceOverride(String id); + ServiceOverrideEntity getServiceOverride(String id); - ServiceEntity getService(String id, String show); + ServiceEntity getService(String id, String show); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/ServiceManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/ServiceManagerService.java index 96715611f..6ab8de774 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/ServiceManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/ServiceManagerService.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import org.springframework.dao.EmptyResultDataAccessException; @@ -33,76 +29,73 @@ @Transactional public class ServiceManagerService implements ServiceManager { - private ServiceDao serviceDao; - - private static final String DEFAULT_SERVICE = "default"; - - @Override - public void createService(ServiceEntity s) { - serviceDao.insert(s); - } - - @Override - public void createService(ServiceOverrideEntity s) { - serviceDao.insert(s); - } - - @Override - public void deleteService(ServiceEntity s) { - serviceDao.delete(s); - } - - @Override - public void deleteService(ServiceOverrideEntity s) { - serviceDao.delete(s); - } - - - @Override - public void updateService(ServiceEntity s) { - serviceDao.update(s); - } - - @Override - public void updateService(ServiceOverrideEntity s) { - serviceDao.update(s); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public ServiceEntity getService(String id, String show) { - try { - return serviceDao.getOverride(id, show); - } catch (EmptyResultDataAccessException e ) { - return serviceDao.get(id); - } - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public ServiceOverrideEntity getServiceOverride(String id) { - return serviceDao.getOverride(id); - } - - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public ServiceEntity getService(String id) { - return serviceDao.get(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public ServiceEntity getDefaultService() { - return serviceDao.get(DEFAULT_SERVICE); - } - - public ServiceDao getServiceDao() { - return serviceDao; - } - - public void setServiceDao(ServiceDao serviceDao) { - this.serviceDao = serviceDao; + private ServiceDao serviceDao; + + private static final String DEFAULT_SERVICE = "default"; + + @Override + public void createService(ServiceEntity s) { + serviceDao.insert(s); + } + + @Override + public void createService(ServiceOverrideEntity s) { + serviceDao.insert(s); + } + + @Override + public void deleteService(ServiceEntity s) { + serviceDao.delete(s); + } + + @Override + public void deleteService(ServiceOverrideEntity s) { + serviceDao.delete(s); + } + + @Override + public void updateService(ServiceEntity s) { + serviceDao.update(s); + } + + @Override + public void updateService(ServiceOverrideEntity s) { + serviceDao.update(s); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public ServiceEntity getService(String id, String show) { + try { + return serviceDao.getOverride(id, show); + } catch (EmptyResultDataAccessException e) { + return serviceDao.get(id); } + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public ServiceOverrideEntity getServiceOverride(String id) { + return serviceDao.getOverride(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public ServiceEntity getService(String id) { + return serviceDao.get(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public ServiceEntity getDefaultService() { + return serviceDao.get(DEFAULT_SERVICE); + } + + public ServiceDao getServiceDao() { + return serviceDao; + } + + public void setServiceDao(ServiceDao serviceDao) { + this.serviceDao = serviceDao; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/Whiteboard.java b/cuebot/src/main/java/com/imageworks/spcue/service/Whiteboard.java index 0f298c756..014b84cc5 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/Whiteboard.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/Whiteboard.java @@ -2,39 +2,34 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import com.imageworks.spcue.dao.NestedWhiteboardDao; import com.imageworks.spcue.dao.WhiteboardDao; /* - * The whiteboard service interface is identical to the WhiteboardDAO interface. - * Any service specific methods should be defined here. + * The whiteboard service interface is identical to the WhiteboardDAO interface. Any service + * specific methods should be defined here. */ public interface Whiteboard extends WhiteboardDao, NestedWhiteboardDao { - /** - * Returns true if the job is pending. - * - * @param name - * @return - */ - boolean isJobPending(String name); + /** + * Returns true if the job is pending. + * + * @param name + * @return + */ + boolean isJobPending(String name); } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/WhiteboardService.java b/cuebot/src/main/java/com/imageworks/spcue/service/WhiteboardService.java index 5fe08e1f6..3e7a3ea03 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/WhiteboardService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/WhiteboardService.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.service; import java.util.List; @@ -95,436 +91,425 @@ import com.imageworks.spcue.grpc.task.Task; import com.imageworks.spcue.grpc.task.TaskSeq; - /** -* Traditionally the "Whiteboard" was an actually whiteboard the PSTs used to -* use to track jobs. Over time that term has come to mean an interface -* from which you can query cue data. The WhiteboardService defines -* all the methods from which clients can obtain data. All grpc servants -* that return something go through here. -* -* The whiteboard is a ready only transaction with a SERIALIZABLE transaction -* level. Moving the SERIALIZABLE actually makes the requests run faster -* because the readers view of the DB is fixed throughout the transaction. -* -*/ + * Traditionally the "Whiteboard" was an actually whiteboard the PSTs used to use to track jobs. + * Over time that term has come to mean an interface from which you can query cue data. The + * WhiteboardService defines all the methods from which clients can obtain data. All grpc servants + * that return something go through here. + * + * The whiteboard is a ready only transaction with a SERIALIZABLE transaction level. Moving the + * SERIALIZABLE actually makes the requests run faster because the readers view of the DB is fixed + * throughout the transaction. + * + */ @Transactional(readOnly = true, propagation = Propagation.REQUIRED) public class WhiteboardService implements Whiteboard { - @SuppressWarnings("unused") - private static final Logger logger = LogManager.getLogger(WhiteboardService.class); + @SuppressWarnings("unused") + private static final Logger logger = LogManager.getLogger(WhiteboardService.class); - private WhiteboardDao whiteboardDao; + private WhiteboardDao whiteboardDao; - private NestedWhiteboardDao nestedWhiteboardDao; + private NestedWhiteboardDao nestedWhiteboardDao; - private JobDao jobDao; + private JobDao jobDao; - public JobDao getJobDao() { - return jobDao; - } + public JobDao getJobDao() { + return jobDao; + } - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } - public boolean isJobPending(String name) { - return jobDao.exists(name); - } + public boolean isJobPending(String name) { + return jobDao.exists(name); + } - public FilterSeq getFilters(ShowInterface show) { - return whiteboardDao.getFilters(show); - } + public FilterSeq getFilters(ShowInterface show) { + return whiteboardDao.getFilters(show); + } - public LayerSeq getLayers(JobInterface job) { - return whiteboardDao.getLayers(job); - } + public LayerSeq getLayers(JobInterface job) { + return whiteboardDao.getLayers(job); + } - public List getJobNames(JobSearchInterface r) { - return whiteboardDao.getJobNames(r); - } + public List getJobNames(JobSearchInterface r) { + return whiteboardDao.getJobNames(r); + } - public Job findJob(String name) { - return whiteboardDao.findJob(name); - } + public Job findJob(String name) { + return whiteboardDao.findJob(name); + } - public Job getJob(String id) { - return whiteboardDao.getJob(id); - } + public Job getJob(String id) { + return whiteboardDao.getJob(id); + } - public FrameSeq getFrames(FrameSearchInterface r) { - return this.whiteboardDao.getFrames(r); - } + public FrameSeq getFrames(FrameSearchInterface r) { + return this.whiteboardDao.getFrames(r); + } - public NestedHostSeq getHostWhiteboard() { - return nestedWhiteboardDao.getHostWhiteboard(); - } + public NestedHostSeq getHostWhiteboard() { + return nestedWhiteboardDao.getHostWhiteboard(); + } - public Show findShow(String name) { - return whiteboardDao.findShow(name); - } + public Show findShow(String name) { + return whiteboardDao.findShow(name); + } - public Show getShow(String id) { - return whiteboardDao.getShow(id); - } + public Show getShow(String id) { + return whiteboardDao.getShow(id); + } - public ShowSeq getShows() { - return whiteboardDao.getShows(); - } + public ShowSeq getShows() { + return whiteboardDao.getShows(); + } - public Subscription getSubscription(String id) { - return this.whiteboardDao.getSubscription(id); - } + public Subscription getSubscription(String id) { + return this.whiteboardDao.getSubscription(id); + } - public SubscriptionSeq getSubscriptions(ShowInterface show) { - return this.whiteboardDao.getSubscriptions(show); - } + public SubscriptionSeq getSubscriptions(ShowInterface show) { + return this.whiteboardDao.getSubscriptions(show); + } - public Allocation findAllocation(String name) { - return this.whiteboardDao.findAllocation(name); - } + public Allocation findAllocation(String name) { + return this.whiteboardDao.findAllocation(name); + } - public Allocation getAllocation(String id) { - return this.whiteboardDao.getAllocation(id); - } + public Allocation getAllocation(String id) { + return this.whiteboardDao.getAllocation(id); + } - public AllocationSeq getAllocations() { - return this.whiteboardDao.getAllocations(); - } + public AllocationSeq getAllocations() { + return this.whiteboardDao.getAllocations(); + } - public GroupSeq getGroups(ShowInterface show) { - return this.whiteboardDao.getGroups(show); - } + public GroupSeq getGroups(ShowInterface show) { + return this.whiteboardDao.getGroups(show); + } - public GroupSeq getGroups(GroupInterface group) { - return this.whiteboardDao.getGroups(group); - } + public GroupSeq getGroups(GroupInterface group) { + return this.whiteboardDao.getGroups(group); + } - public Group getGroup(String id) { - return this.whiteboardDao.getGroup(id); - } + public Group getGroup(String id) { + return this.whiteboardDao.getGroup(id); + } - public WhiteboardDao getWhiteboardDao() { - return whiteboardDao; - } + public WhiteboardDao getWhiteboardDao() { + return whiteboardDao; + } - public void setWhiteboardDao(WhiteboardDao whiteboardDao) { - this.whiteboardDao = whiteboardDao; - } + public void setWhiteboardDao(WhiteboardDao whiteboardDao) { + this.whiteboardDao = whiteboardDao; + } - public Action getAction(ActionInterface action) { - return whiteboardDao.getAction(action); - } + public Action getAction(ActionInterface action) { + return whiteboardDao.getAction(action); + } - public ActionSeq getActions(FilterInterface filter) { - return whiteboardDao.getActions(filter); - } + public ActionSeq getActions(FilterInterface filter) { + return whiteboardDao.getActions(filter); + } - public Matcher getMatcher(MatcherInterface matcher) { - return whiteboardDao.getMatcher(matcher); - } + public Matcher getMatcher(MatcherInterface matcher) { + return whiteboardDao.getMatcher(matcher); + } - public MatcherSeq getMatchers(FilterInterface filter) { - return whiteboardDao.getMatchers(filter); - } + public MatcherSeq getMatchers(FilterInterface filter) { + return whiteboardDao.getMatchers(filter); + } - public Filter getFilter(FilterInterface filter) { - return whiteboardDao.getFilter(filter); - } + public Filter getFilter(FilterInterface filter) { + return whiteboardDao.getFilter(filter); + } - public Filter findFilter(ShowInterface show, String name) { - return whiteboardDao.findFilter(show, name); - } + public Filter findFilter(ShowInterface show, String name) { + return whiteboardDao.findFilter(show, name); + } - public Group getRootGroup(ShowInterface show) { - return whiteboardDao.getRootGroup(show); - } + public Group getRootGroup(ShowInterface show) { + return whiteboardDao.getRootGroup(show); + } - public NestedGroup getJobWhiteboard(ShowInterface show) { - return nestedWhiteboardDao.getJobWhiteboard(show); - } + public NestedGroup getJobWhiteboard(ShowInterface show) { + return nestedWhiteboardDao.getJobWhiteboard(show); + } - public JobSeq getJobs(GroupInterface group) { - return whiteboardDao.getJobs(group); - } + public JobSeq getJobs(GroupInterface group) { + return whiteboardDao.getJobs(group); + } - public NestedWhiteboardDao getNestedWhiteboardDao() { - return nestedWhiteboardDao; - } + public NestedWhiteboardDao getNestedWhiteboardDao() { + return nestedWhiteboardDao; + } - public void setNestedWhiteboardDao(NestedWhiteboardDao nestedWhiteboardDao) { - this.nestedWhiteboardDao = nestedWhiteboardDao; - } + public void setNestedWhiteboardDao(NestedWhiteboardDao nestedWhiteboardDao) { + this.nestedWhiteboardDao = nestedWhiteboardDao; + } - public Depend getDepend(DependInterface depend) { - return whiteboardDao.getDepend(depend); - } - - public DependSeq getWhatDependsOnThis(JobInterface job) { - - return whiteboardDao.getWhatDependsOnThis(job); - } - - public DependSeq getWhatDependsOnThis(LayerInterface layer) { - return whiteboardDao.getWhatDependsOnThis(layer); - } - - public DependSeq getWhatDependsOnThis(FrameInterface frame) { - return whiteboardDao.getWhatDependsOnThis(frame); - } - - public DependSeq getWhatThisDependsOn(JobInterface job) { - return whiteboardDao.getWhatThisDependsOn(job); - } - - public DependSeq getWhatThisDependsOn(LayerInterface layer) { - return whiteboardDao.getWhatThisDependsOn(layer); - } - - public DependSeq getWhatThisDependsOn(FrameInterface frame) { - return whiteboardDao.getWhatThisDependsOn(frame); - } - - public DependSeq getDepends(JobInterface job) { - return whiteboardDao.getDepends(job); - } - - public Frame findFrame(String job, String layer, int frame) { - return whiteboardDao.findFrame(job, layer, frame); - } - - public Layer findLayer(String job, String layer) { - return whiteboardDao.findLayer(job, layer); - } - - public Host findHost(String name) { return whiteboardDao.findHost(name); - } - - public Depend getDepend(String id) { - return whiteboardDao.getDepend(id); - } - - public Group findGroup(String show, String group) { - return whiteboardDao.findGroup(show, group); - } - - public Filter findFilter(String show, String name) { - return whiteboardDao.findFilter(show, name); - } - - public UpdatedFrameCheckResult getUpdatedFrames(JobInterface job, - List layers, int epochTime) { - return whiteboardDao.getUpdatedFrames(job, layers, epochTime); - } - - public CommentSeq getComments(JobInterface j) { - return whiteboardDao.getComments(j); - } - - public CommentSeq getComments(HostInterface h) { - return whiteboardDao.getComments(h); - } - - public SubscriptionSeq getSubscriptions( - AllocationInterface alloc) { - return whiteboardDao.getSubscriptions(alloc); - } - - public Subscription findSubscription(String show, String alloc) { - return whiteboardDao.findSubscription(show, alloc); - } - - @Override - public Task getTask(ShowInterface show, DepartmentInterface dept, String shot) { - return whiteboardDao.getTask(show, dept, shot); - } - - @Override - public TaskSeq getTasks(ShowInterface show, DepartmentInterface dept) { - return whiteboardDao.getTasks(show, dept); - } - - @Override - public List getDepartmentNames() { - return whiteboardDao.getDepartmentNames(); - } - - @Override - public Department getDepartment( - ShowInterface show, String name) { - return whiteboardDao.getDepartment(show, name); - } - - @Override - public DepartmentSeq getDepartments( - ShowInterface show) { - return whiteboardDao.getDepartments(show); - } - - @Override - public JobSeq getJobs(JobSearchInterface r) { - return whiteboardDao.getJobs(r); - } - - @Override - public Frame getFrame(String id) { - return whiteboardDao.getFrame(id); - } - - @Override - public Host getHost(String id) { - return whiteboardDao.getHost(id); - } - - @Override - public Layer getLayer(String id) { - return whiteboardDao.getLayer(id); - } - - @Override - public HostSeq getHosts(HostSearchInterface r) { - return whiteboardDao.getHosts(r); - } - - @Override - public ProcSeq getProcs(HostInterface h) { - return whiteboardDao.getProcs(h); - } - - @Override - public ProcSeq getProcs(ProcSearchInterface p) { - return whiteboardDao.getProcs(p); - } - - @Override - public Depend getDepend(AbstractDepend depend) { - return whiteboardDao.getDepend(depend); - } - - @Override - public Host getHost(DeedEntity deed) { - return whiteboardDao.getHost(deed); - } - - @Override - public Owner getOwner(DeedEntity deed) { - return whiteboardDao.getOwner(deed); - } - - @Override - public DeedSeq getDeeds( - OwnerEntity owner) { - return whiteboardDao.getDeeds(owner); - } - - @Override - public DeedSeq getDeeds( - ShowInterface show) { - return whiteboardDao.getDeeds(show); - } - - @Override - public HostSeq getHosts(OwnerEntity owner) { - return whiteboardDao.getHosts(owner); - } - - @Override - public Owner getOwner(HostInterface host) { - return whiteboardDao.getOwner(host); - } - - @Override - public List getOwners(ShowInterface show) { - return whiteboardDao.getOwners(show); - } - - @Override - public Owner getOwner(String name) { - return whiteboardDao.getOwner(name); - } - - @Override - public Deed getDeed(HostInterface host) { - return whiteboardDao.getDeed(host); - } - - @Override - public RenderPartition getRenderPartition(LocalHostAssignment l) { - return whiteboardDao.getRenderPartition(l); - } - - @Override - public RenderPartitionSeq getRenderPartitions( - HostInterface host) { - return whiteboardDao.getRenderPartitions(host); - } - - @Override - public FacilitySeq getFacilities() { - return whiteboardDao.getFacilities(); - } - - @Override - public Facility getFacility(String name) { - return whiteboardDao.getFacility(name); - } - - @Override - public AllocationSeq getAllocations( - com.imageworks.spcue.FacilityInterface facility) { - return whiteboardDao.getAllocations(facility); - } - - @Override - public ShowSeq getActiveShows() { - return whiteboardDao.getActiveShows(); - } - - @Override - public Service getService(String id) { - return whiteboardDao.getService(id); - } - - @Override - public ServiceSeq getDefaultServices() { - return whiteboardDao.getDefaultServices(); - } - - @Override - public Service findService(String name) { - return whiteboardDao.findService(name); - } - - @Override - public ServiceOverrideSeq getServiceOverrides( - ShowInterface show) { - return whiteboardDao.getServiceOverrides(show); - } - - @Override - public ServiceOverride getServiceOverride(ShowInterface show, - String name) { - return whiteboardDao.getServiceOverride(show, name); - } - - @Override - public Limit findLimit(String name) { - return whiteboardDao.findLimit(name); - } - - @Override - public Limit getLimit(String id) { - return whiteboardDao.getLimit(id); - } - - @Override - public List getLimits() { - return whiteboardDao.getLimits(); - } - - @Override - public List getLimits(LayerInterface layer) { - return whiteboardDao.getLimits(layer); - } -} + public Depend getDepend(DependInterface depend) { + return whiteboardDao.getDepend(depend); + } + public DependSeq getWhatDependsOnThis(JobInterface job) { + + return whiteboardDao.getWhatDependsOnThis(job); + } + + public DependSeq getWhatDependsOnThis(LayerInterface layer) { + return whiteboardDao.getWhatDependsOnThis(layer); + } + + public DependSeq getWhatDependsOnThis(FrameInterface frame) { + return whiteboardDao.getWhatDependsOnThis(frame); + } + + public DependSeq getWhatThisDependsOn(JobInterface job) { + return whiteboardDao.getWhatThisDependsOn(job); + } + + public DependSeq getWhatThisDependsOn(LayerInterface layer) { + return whiteboardDao.getWhatThisDependsOn(layer); + } + + public DependSeq getWhatThisDependsOn(FrameInterface frame) { + return whiteboardDao.getWhatThisDependsOn(frame); + } + + public DependSeq getDepends(JobInterface job) { + return whiteboardDao.getDepends(job); + } + + public Frame findFrame(String job, String layer, int frame) { + return whiteboardDao.findFrame(job, layer, frame); + } + + public Layer findLayer(String job, String layer) { + return whiteboardDao.findLayer(job, layer); + } + + public Host findHost(String name) { + return whiteboardDao.findHost(name); + } + + public Depend getDepend(String id) { + return whiteboardDao.getDepend(id); + } + + public Group findGroup(String show, String group) { + return whiteboardDao.findGroup(show, group); + } + + public Filter findFilter(String show, String name) { + return whiteboardDao.findFilter(show, name); + } + + public UpdatedFrameCheckResult getUpdatedFrames(JobInterface job, List layers, + int epochTime) { + return whiteboardDao.getUpdatedFrames(job, layers, epochTime); + } + + public CommentSeq getComments(JobInterface j) { + return whiteboardDao.getComments(j); + } + + public CommentSeq getComments(HostInterface h) { + return whiteboardDao.getComments(h); + } + + public SubscriptionSeq getSubscriptions(AllocationInterface alloc) { + return whiteboardDao.getSubscriptions(alloc); + } + + public Subscription findSubscription(String show, String alloc) { + return whiteboardDao.findSubscription(show, alloc); + } + + @Override + public Task getTask(ShowInterface show, DepartmentInterface dept, String shot) { + return whiteboardDao.getTask(show, dept, shot); + } + + @Override + public TaskSeq getTasks(ShowInterface show, DepartmentInterface dept) { + return whiteboardDao.getTasks(show, dept); + } + + @Override + public List getDepartmentNames() { + return whiteboardDao.getDepartmentNames(); + } + + @Override + public Department getDepartment(ShowInterface show, String name) { + return whiteboardDao.getDepartment(show, name); + } + + @Override + public DepartmentSeq getDepartments(ShowInterface show) { + return whiteboardDao.getDepartments(show); + } + + @Override + public JobSeq getJobs(JobSearchInterface r) { + return whiteboardDao.getJobs(r); + } + + @Override + public Frame getFrame(String id) { + return whiteboardDao.getFrame(id); + } + + @Override + public Host getHost(String id) { + return whiteboardDao.getHost(id); + } + + @Override + public Layer getLayer(String id) { + return whiteboardDao.getLayer(id); + } + + @Override + public HostSeq getHosts(HostSearchInterface r) { + return whiteboardDao.getHosts(r); + } + + @Override + public ProcSeq getProcs(HostInterface h) { + return whiteboardDao.getProcs(h); + } + + @Override + public ProcSeq getProcs(ProcSearchInterface p) { + return whiteboardDao.getProcs(p); + } + + @Override + public Depend getDepend(AbstractDepend depend) { + return whiteboardDao.getDepend(depend); + } + + @Override + public Host getHost(DeedEntity deed) { + return whiteboardDao.getHost(deed); + } + + @Override + public Owner getOwner(DeedEntity deed) { + return whiteboardDao.getOwner(deed); + } + + @Override + public DeedSeq getDeeds(OwnerEntity owner) { + return whiteboardDao.getDeeds(owner); + } + + @Override + public DeedSeq getDeeds(ShowInterface show) { + return whiteboardDao.getDeeds(show); + } + + @Override + public HostSeq getHosts(OwnerEntity owner) { + return whiteboardDao.getHosts(owner); + } + + @Override + public Owner getOwner(HostInterface host) { + return whiteboardDao.getOwner(host); + } + + @Override + public List getOwners(ShowInterface show) { + return whiteboardDao.getOwners(show); + } + + @Override + public Owner getOwner(String name) { + return whiteboardDao.getOwner(name); + } + + @Override + public Deed getDeed(HostInterface host) { + return whiteboardDao.getDeed(host); + } + + @Override + public RenderPartition getRenderPartition(LocalHostAssignment l) { + return whiteboardDao.getRenderPartition(l); + } + + @Override + public RenderPartitionSeq getRenderPartitions(HostInterface host) { + return whiteboardDao.getRenderPartitions(host); + } + + @Override + public FacilitySeq getFacilities() { + return whiteboardDao.getFacilities(); + } + + @Override + public Facility getFacility(String name) { + return whiteboardDao.getFacility(name); + } + + @Override + public AllocationSeq getAllocations(com.imageworks.spcue.FacilityInterface facility) { + return whiteboardDao.getAllocations(facility); + } + + @Override + public ShowSeq getActiveShows() { + return whiteboardDao.getActiveShows(); + } + + @Override + public Service getService(String id) { + return whiteboardDao.getService(id); + } + + @Override + public ServiceSeq getDefaultServices() { + return whiteboardDao.getDefaultServices(); + } + + @Override + public Service findService(String name) { + return whiteboardDao.findService(name); + } + + @Override + public ServiceOverrideSeq getServiceOverrides(ShowInterface show) { + return whiteboardDao.getServiceOverrides(show); + } + + @Override + public ServiceOverride getServiceOverride(ShowInterface show, String name) { + return whiteboardDao.getServiceOverride(show, name); + } + + @Override + public Limit findLimit(String name) { + return whiteboardDao.findLimit(name); + } + + @Override + public Limit getLimit(String id) { + return whiteboardDao.getLimit(id); + } + + @Override + public List getLimits() { + return whiteboardDao.getLimits(); + } + + @Override + public List getLimits(LayerInterface layer) { + return whiteboardDao.getLimits(layer); + } +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/servlet/HealthCheckServlet.java b/cuebot/src/main/java/com/imageworks/spcue/servlet/HealthCheckServlet.java index 8aa7d61de..0a3e4a245 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servlet/HealthCheckServlet.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servlet/HealthCheckServlet.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.servlet; import com.imageworks.spcue.ShowEntity; @@ -43,111 +39,101 @@ @SuppressWarnings("serial") public class HealthCheckServlet extends FrameworkServlet { - private static final Logger logger = LogManager.getLogger(HealthCheckServlet.class); - private CueStatic cueStatic; - private Environment env; - - private enum HealthStatus { - SERVER_ERROR, - DISPATCH_QUEUE_UNHEALTHY, - MANAGE_QUEUE_UNHEALTHY, - REPORT_QUEUE_UNHEALTHY, - BOOKING_QUEUE_UNHEALTHY, - JOB_QUERY_ERROR - } - - @Override - public void initFrameworkServlet() throws ServletException { - this.cueStatic = (CueStatic) - Objects.requireNonNull(this.getWebApplicationContext()).getBean("cueStaticServant"); - this.env = (Environment) - Objects.requireNonNull(this.getWebApplicationContext()).getBean("environment"); - } - - private ArrayList getHealthStatus() { - ArrayList statusList = new ArrayList(); - - if (this.cueStatic == null) { - statusList.add(HealthStatus.SERVER_ERROR); - } - else { - // Check queue capacity - if (!this.cueStatic.isDispatchQueueHealthy()) { - statusList.add(HealthStatus.DISPATCH_QUEUE_UNHEALTHY); - } - if (!this.cueStatic.isManageQueueHealthy()) { - statusList.add(HealthStatus.MANAGE_QUEUE_UNHEALTHY); - } - if (!this.cueStatic.isReportQueueHealthy()) { - statusList.add(HealthStatus.REPORT_QUEUE_UNHEALTHY); - } - if (!this.cueStatic.isBookingQueueHealthy()) { - statusList.add(HealthStatus.BOOKING_QUEUE_UNHEALTHY); - } - // Run get jobs, if it crashes, set error, if it takes longer than expected, - // the caller (HEALTHCHECK) will timeout - try { - getJobs(); - } catch (RuntimeException re) { - Sentry.captureException(re); - statusList.add(HealthStatus.JOB_QUERY_ERROR); - } - } - return statusList; + private static final Logger logger = LogManager.getLogger(HealthCheckServlet.class); + private CueStatic cueStatic; + private Environment env; + + private enum HealthStatus { + SERVER_ERROR, DISPATCH_QUEUE_UNHEALTHY, MANAGE_QUEUE_UNHEALTHY, REPORT_QUEUE_UNHEALTHY, BOOKING_QUEUE_UNHEALTHY, JOB_QUERY_ERROR + } + + @Override + public void initFrameworkServlet() throws ServletException { + this.cueStatic = (CueStatic) Objects.requireNonNull(this.getWebApplicationContext()) + .getBean("cueStaticServant"); + this.env = (Environment) Objects.requireNonNull(this.getWebApplicationContext()) + .getBean("environment"); + } + + private ArrayList getHealthStatus() { + ArrayList statusList = new ArrayList(); + + if (this.cueStatic == null) { + statusList.add(HealthStatus.SERVER_ERROR); + } else { + // Check queue capacity + if (!this.cueStatic.isDispatchQueueHealthy()) { + statusList.add(HealthStatus.DISPATCH_QUEUE_UNHEALTHY); + } + if (!this.cueStatic.isManageQueueHealthy()) { + statusList.add(HealthStatus.MANAGE_QUEUE_UNHEALTHY); + } + if (!this.cueStatic.isReportQueueHealthy()) { + statusList.add(HealthStatus.REPORT_QUEUE_UNHEALTHY); + } + if (!this.cueStatic.isBookingQueueHealthy()) { + statusList.add(HealthStatus.BOOKING_QUEUE_UNHEALTHY); + } + // Run get jobs, if it crashes, set error, if it takes longer than expected, + // the caller (HEALTHCHECK) will timeout + try { + getJobs(); + } catch (RuntimeException re) { + Sentry.captureException(re); + statusList.add(HealthStatus.JOB_QUERY_ERROR); + } } - - private void getJobs() { - if (this.cueStatic != null && this.env != null) { - // Defaults to testing show, which is added as part of the seeding data script - String defaultShow = env.getProperty("protected_shows", - String.class, "testing").split(",")[0]; - ShowEntity s = new ShowEntity(); - s.name = defaultShow; - JobSearchInterface js = new JobSearch(); - js.filterByShow(s); - - // GetJobs will throw an exception if there's a problem getting - // data from the database - JobSeq jobs = this.cueStatic.getWhiteboard().getJobs(js); - } + return statusList; + } + + private void getJobs() { + if (this.cueStatic != null && this.env != null) { + // Defaults to testing show, which is added as part of the seeding data script + String defaultShow = + env.getProperty("protected_shows", String.class, "testing").split(",")[0]; + ShowEntity s = new ShowEntity(); + s.name = defaultShow; + JobSearchInterface js = new JobSearch(); + js.filterByShow(s); + + // GetJobs will throw an exception if there's a problem getting + // data from the database + JobSeq jobs = this.cueStatic.getWhiteboard().getJobs(js); } - - @Override - protected void doService(HttpServletRequest request, - HttpServletResponse response) throws Exception { - logger.info("HealthCheckServlet: Received request"); - try { - ArrayList statusList = getHealthStatus(); - if (!statusList.isEmpty()) { - response.setStatus(500); - StringBuilder out = new StringBuilder("FAILED: "); - for(HealthStatus status : statusList) { - out.append(status.name()); - out.append(" "); - } - Sentry.captureMessage("Healthcheck failure: " + out); - - sendResponse(response, out.toString()); - } - else - { - sendResponse(response, "SUCCESS"); - } - } - catch (Exception e) { - logger.error("Unexpected error", e); - response.setStatus(500); - sendResponse(response, "FAILED " + e.getMessage()); + } + + @Override + protected void doService(HttpServletRequest request, HttpServletResponse response) + throws Exception { + logger.info("HealthCheckServlet: Received request"); + try { + ArrayList statusList = getHealthStatus(); + if (!statusList.isEmpty()) { + response.setStatus(500); + StringBuilder out = new StringBuilder("FAILED: "); + for (HealthStatus status : statusList) { + out.append(status.name()); + out.append(" "); } + Sentry.captureMessage("Healthcheck failure: " + out); + + sendResponse(response, out.toString()); + } else { + sendResponse(response, "SUCCESS"); + } + } catch (Exception e) { + logger.error("Unexpected error", e); + response.setStatus(500); + sendResponse(response, "FAILED " + e.getMessage()); } - - private void sendResponse(HttpServletResponse response, String message) { - response.setContentLength(message.length()); - try { - response.getOutputStream().println(message); - } catch (IOException e) { - // failed to send response, just eat it. - } + } + + private void sendResponse(HttpServletResponse response, String message) { + response.setContentLength(message.length()); + try { + response.getOutputStream().println(message); + } catch (IOException e) { + // failed to send response, just eat it. } + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/servlet/JobLaunchServlet.java b/cuebot/src/main/java/com/imageworks/spcue/servlet/JobLaunchServlet.java index 76040b7bd..5df479fd7 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servlet/JobLaunchServlet.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servlet/JobLaunchServlet.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.servlet; import java.io.IOException; @@ -34,51 +30,47 @@ import com.imageworks.spcue.service.JobSpec; /** - * JobLaunchServlet accepts the Job spec XML via POST method and - * queues it for launch. + * JobLaunchServlet accepts the Job spec XML via POST method and queues it for launch. */ @SuppressWarnings("serial") public class JobLaunchServlet extends FrameworkServlet { - private static final Logger logger = LogManager.getLogger(JobLaunchServlet.class); - - private JobLauncher jobLauncher; - - @Override - public void initFrameworkServlet() throws ServletException { - jobLauncher = (JobLauncher) - Objects.requireNonNull(this.getWebApplicationContext()).getBean("jobLauncher"); - } - - @Override - protected void doService(HttpServletRequest request, - HttpServletResponse response) throws Exception { - - try { - JobSpec spec = jobLauncher.parse( - request.getParameter("payload")); - jobLauncher.queueAndLaunch(spec); - - StringBuilder sb = new StringBuilder(4096); - for (BuildableJob job: spec.getJobs()) { - sb.append(job.detail.name); - sb.append(","); - } - sendResponse(response,"SUCCESS " + sb.toString()); - } - catch (Exception e) { - logger.debug("Misc error", e); - sendResponse(response, "FAILED " + e.getMessage()); - } + private static final Logger logger = LogManager.getLogger(JobLaunchServlet.class); + + private JobLauncher jobLauncher; + + @Override + public void initFrameworkServlet() throws ServletException { + jobLauncher = (JobLauncher) Objects.requireNonNull(this.getWebApplicationContext()) + .getBean("jobLauncher"); + } + + @Override + protected void doService(HttpServletRequest request, HttpServletResponse response) + throws Exception { + + try { + JobSpec spec = jobLauncher.parse(request.getParameter("payload")); + jobLauncher.queueAndLaunch(spec); + + StringBuilder sb = new StringBuilder(4096); + for (BuildableJob job : spec.getJobs()) { + sb.append(job.detail.name); + sb.append(","); + } + sendResponse(response, "SUCCESS " + sb.toString()); + } catch (Exception e) { + logger.debug("Misc error", e); + sendResponse(response, "FAILED " + e.getMessage()); } - - private void sendResponse(HttpServletResponse response, String message) { - response.setContentLength(message.length()); - try { - response.getOutputStream().println(message); - } catch (IOException e) { - // failed to send response, just eat it. - } + } + + private void sendResponse(HttpServletResponse response, String message) { + response.setContentLength(message.length()); + try { + response.getOutputStream().println(message); + } catch (IOException e) { + // failed to send response, just eat it. } + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/Convert.java b/cuebot/src/main/java/com/imageworks/spcue/util/Convert.java index ec1cc4e2c..89a5c72c8 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/Convert.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/Convert.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.util; import java.math.BigDecimal; @@ -29,38 +25,45 @@ */ public final class Convert { - public static final int coresToCoreUnits(float cores) { - return new BigDecimal(cores * 100).setScale(2, RoundingMode.HALF_UP).intValue(); - } + public static final int coresToCoreUnits(float cores) { + return new BigDecimal(cores * 100).setScale(2, RoundingMode.HALF_UP).intValue(); + } - public static final int coresToCoreUnits(int cores) { - return cores * 100; - } + public static final int coresToCoreUnits(int cores) { + return cores * 100; + } - public static final int coresToWholeCoreUnits(float cores) { - if (cores == -1) { return -1;} - return (int)(((cores * 100.0f) + 0.5f) / 100) * 100; + public static final int coresToWholeCoreUnits(float cores) { + if (cores == -1) { + return -1; } + return (int) (((cores * 100.0f) + 0.5f) / 100) * 100; + } - public static final float coreUnitsToCores(int coreUnits) { - if (coreUnits == -1) { return -1f;} - return Float.valueOf(String.format(Locale.ROOT, "%6.2f", coreUnits / 100.0f)); + public static final float coreUnitsToCores(int coreUnits) { + if (coreUnits == -1) { + return -1f; } + return Float.valueOf(String.format(Locale.ROOT, "%6.2f", coreUnits / 100.0f)); + } - public static final float coreUnitsToWholeCores(int coreUnits) { - if (coreUnits == -1) { return -1f;} - return Float.valueOf((int) ((coreUnits / 100.0f) + 0.5)); + public static final float coreUnitsToWholeCores(int coreUnits) { + if (coreUnits == -1) { + return -1f; } + return Float.valueOf((int) ((coreUnits / 100.0f) + 0.5)); + } - private static final List MATCH_BOOL = - java.util.Arrays.asList(new String[] { "true", "yes", "1", "on" }); + private static final List MATCH_BOOL = + java.util.Arrays.asList(new String[] {"true", "yes", "1", "on"}); - public static final boolean stringToBool(String value) { - if (value == null) { return false; } - if (MATCH_BOOL.contains(value.toLowerCase())) { - return true; - } - return false; + public static final boolean stringToBool(String value) { + if (value == null) { + return false; + } + if (MATCH_BOOL.contains(value.toLowerCase())) { + return true; } + return false; + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/CueExceptionUtil.java b/cuebot/src/main/java/com/imageworks/spcue/util/CueExceptionUtil.java index 3879914b1..97f20c061 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/CueExceptionUtil.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/CueExceptionUtil.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.util; import java.io.PrintWriter; @@ -32,33 +28,32 @@ */ public class CueExceptionUtil { - /** - * returns the stack track for an exception as a string. - * - * @param aThrowable - * @return String - */ - public static String getStackTrace(Throwable aThrowable) { - final Writer result = new StringWriter(); - final PrintWriter printWriter = new PrintWriter(result); - aThrowable.printStackTrace(printWriter); - return result.toString(); - } - - /** - * Creates an error message string which w/ a stack track and returns it. - * - * @param msg - * @param aThrowable - * @return String - */ - public static void logStackTrace(String msg, Throwable aThrowable) { - Logger error_logger = LogManager.getLogger(CueExceptionUtil.class); - error_logger.info("Caught unexpected exception caused by: " + aThrowable); - error_logger.info("StackTrace: \n" + getStackTrace(aThrowable)); - if (aThrowable.getCause() != null) { - error_logger.info("Caused By: " + getStackTrace(aThrowable.getCause())); - } + /** + * returns the stack track for an exception as a string. + * + * @param aThrowable + * @return String + */ + public static String getStackTrace(Throwable aThrowable) { + final Writer result = new StringWriter(); + final PrintWriter printWriter = new PrintWriter(result); + aThrowable.printStackTrace(printWriter); + return result.toString(); + } + + /** + * Creates an error message string which w/ a stack track and returns it. + * + * @param msg + * @param aThrowable + * @return String + */ + public static void logStackTrace(String msg, Throwable aThrowable) { + Logger error_logger = LogManager.getLogger(CueExceptionUtil.class); + error_logger.info("Caught unexpected exception caused by: " + aThrowable); + error_logger.info("StackTrace: \n" + getStackTrace(aThrowable)); + if (aThrowable.getCause() != null) { + error_logger.info("Caused By: " + getStackTrace(aThrowable.getCause())); } + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/CueUtil.java b/cuebot/src/main/java/com/imageworks/spcue/util/CueUtil.java index 20e91147f..a94a96aec 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/CueUtil.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/CueUtil.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.util; import java.io.File; @@ -57,340 +53,331 @@ import com.imageworks.spcue.SpcueRuntimeException; import com.imageworks.spcue.dispatcher.Dispatcher; - /** * CueUtil is set of common methods used throughout the application. */ @Component public final class CueUtil { - private static final Logger logger = LogManager.getLogger(CueUtil.class); - private static String smtpHost = ""; - @Autowired - private Environment env; - - /** - * Commonly used macros for gigabyte values in KB. - */ - public static final long MB128 = 131072; - public static final long MB256 = 262144; - public static final long MB512 = 524288; - public static final long GB = 1048576; - public static final long GB2 = 1048576L * 2; - public static final long GB4 = 1048576L * 4; - public static final long GB8 = 1048576L * 8; - public static final long GB16 = 1048576L * 16; - public static final long GB32 = 1048576L * 32; - - /** - * Features that relay on an integer greated than 0 to work - * properly are disabled by setting them to -1. - */ - public static final int FEATURE_DISABLED = -1; - - /** - * A const to repesent a single core - */ - public static final int ONE_CORE = 100; - - /** - * One hour of time in seconds. - */ - public static final int ONE_HOUR = 3600; - - @PostConstruct - public void init() { - CueUtil.smtpHost = this.env.getRequiredProperty("smtp_host", String.class); + private static final Logger logger = LogManager.getLogger(CueUtil.class); + private static String smtpHost = ""; + @Autowired + private Environment env; + + /** + * Commonly used macros for gigabyte values in KB. + */ + public static final long MB128 = 131072; + public static final long MB256 = 262144; + public static final long MB512 = 524288; + public static final long GB = 1048576; + public static final long GB2 = 1048576L * 2; + public static final long GB4 = 1048576L * 4; + public static final long GB8 = 1048576L * 8; + public static final long GB16 = 1048576L * 16; + public static final long GB32 = 1048576L * 32; + + /** + * Features that relay on an integer greated than 0 to work properly are disabled by setting them + * to -1. + */ + public static final int FEATURE_DISABLED = -1; + + /** + * A const to repesent a single core + */ + public static final int ONE_CORE = 100; + + /** + * One hour of time in seconds. + */ + public static final int ONE_HOUR = 3600; + + @PostConstruct + public void init() { + CueUtil.smtpHost = this.env.getRequiredProperty("smtp_host", String.class); + } + + /** + * Return true if the given name is formatted as a valid allocation name. Allocation names should + * be facility.unique_name. + * + * @param name + * @return + */ + public static boolean verifyAllocationNameFormat(String name) { + return Pattern.matches("^(\\w+)\\.(\\w+)$", name); + } + + /** + * Split an allocation name and return its parts in a String array. The first element is the + * facility, the second is the allocation's unique name. + * + * @param name + * @return + */ + public static String[] splitAllocationName(String name) { + String[] parts = name.split("\\.", 2); + if (parts.length != 2 || !verifyAllocationNameFormat(name)) { + throw new SpcueRuntimeException( + "Allocation names must be in the form of facility.alloc. The name " + name + + " is not valid."); } - - /** - * Return true if the given name is formatted as a valid - * allocation name. Allocation names should be facility.unique_name. - * - * @param name - * @return - */ - public static boolean verifyAllocationNameFormat(String name) { - return Pattern.matches("^(\\w+)\\.(\\w+)$", name); - } - - /** - * Split an allocation name and return its parts in a - * String array. The first element is the facility, the second - * is the allocation's unique name. - * - * @param name - * @return - */ - public static String[] splitAllocationName(String name) { - String[] parts = name.split("\\.", 2); - if (parts.length != 2 || !verifyAllocationNameFormat(name)) { - throw new SpcueRuntimeException( - "Allocation names must be in the form of facility.alloc. The name " + - name + " is not valid."); + return parts; + } + + /** + * Finds the chunk that the dependErFrame belongs to in the given sequence of frames. + * + * @param dependOnFrames - the full frame range to depend on + * @param dependErFrame - the dependent frame number. + * @return + */ + public static int findChunk(List dependOnFrames, int dependErFrame) { + int dependOnFrame = -1; + if (dependOnFrames.contains(dependErFrame)) { + dependOnFrame = dependErFrame; + } else { + int size = dependOnFrames.size(); + for (int i = 0; i < size; i++) { + dependOnFrame = dependOnFrames.get(i); + if (dependOnFrame > dependErFrame) { + dependOnFrame = dependOnFrames.get(i - 1); + break; } - return parts; + } } - /** - * Finds the chunk that the dependErFrame belongs to in the - * given sequence of frames. - * - * @param dependOnFrames - the full frame range to depend on - * @param dependErFrame - the dependent frame number. - * @return - */ - public static int findChunk(List dependOnFrames, int dependErFrame) { - int dependOnFrame = -1; - if (dependOnFrames.contains(dependErFrame)) { - dependOnFrame = dependErFrame; - } else { - int size = dependOnFrames.size(); - for (int i=0; i < size; i++) { - dependOnFrame = dependOnFrames.get(i); - if (dependOnFrame > dependErFrame) { - dependOnFrame = dependOnFrames.get(i-1); - break; - } - } - } - if (dependOnFrame == -1) { - throw new RuntimeException("unable to find chunk for frame: " + dependErFrame + - " in the range: " + dependOnFrames.toString()); - } - return dependOnFrame; + if (dependOnFrame == -1) { + throw new RuntimeException("unable to find chunk for frame: " + dependErFrame + + " in the range: " + dependOnFrames.toString()); } - /** - * A simple send mail method - * - * @param to - * @param from - * @param subject - * @param body - * @param images - */ - public static void sendmail(String to, String from, String subject, StringBuilder body, Map images, File attachment) { - try { - Properties props = System.getProperties(); - props.put("mail.smtp.host", CueUtil.smtpHost); - Session session = Session.getDefaultInstance(props, null); - Message msg = new MimeMessage(session); - msg.setFrom(new InternetAddress(from)); - msg.setReplyTo(new InternetAddress[] { new InternetAddress(from) } ); - msg.setRecipients(Message.RecipientType.TO, - InternetAddress.parse(to, false)); - msg.setSubject(subject); - - MimeMultipart mimeMultipart = new MimeMultipart(); - mimeMultipart.setSubType("alternative"); - - BodyPart htmlBodyPart = new MimeBodyPart(); - htmlBodyPart.setContent(body.toString(), "text/html"); - mimeMultipart.addBodyPart(htmlBodyPart); - - for (Entry e : images.entrySet()) { - String name = e.getKey().replace('/', '_'); - - BodyPart imageBodyPart = new MimeBodyPart(); - DataSource ds = new ByteArrayDataSource(e.getValue(), "image/png"); - DataHandler dh = new DataHandler(ds); - imageBodyPart.setDataHandler(dh); - imageBodyPart.setFileName(name); - imageBodyPart.setDisposition("inline"); - imageBodyPart.setHeader("Content-ID", '<' + name + '>'); - mimeMultipart.addBodyPart(imageBodyPart); - } - if (attachment != null && attachment.length() != 0){ - MimeBodyPart attachmentPart = new MimeBodyPart(); - attachmentPart.attachFile(attachment); - mimeMultipart.addBodyPart(attachmentPart); - } - - msg.setContent(mimeMultipart); - msg.setHeader("X-Mailer", "OpenCueMailer"); - msg.setSentDate(new Date()); - Transport transport = session.getTransport("smtp"); - transport.connect(CueUtil.smtpHost, null, null); - Transport.send(msg); - } - catch (Exception e) { - throw new RuntimeException("failed to send email: " + e); - } + return dependOnFrame; + } + + /** + * A simple send mail method + * + * @param to + * @param from + * @param subject + * @param body + * @param images + */ + public static void sendmail(String to, String from, String subject, StringBuilder body, + Map images, File attachment) { + try { + Properties props = System.getProperties(); + props.put("mail.smtp.host", CueUtil.smtpHost); + Session session = Session.getDefaultInstance(props, null); + Message msg = new MimeMessage(session); + msg.setFrom(new InternetAddress(from)); + msg.setReplyTo(new InternetAddress[] {new InternetAddress(from)}); + msg.setRecipients(Message.RecipientType.TO, InternetAddress.parse(to, false)); + msg.setSubject(subject); + + MimeMultipart mimeMultipart = new MimeMultipart(); + mimeMultipart.setSubType("alternative"); + + BodyPart htmlBodyPart = new MimeBodyPart(); + htmlBodyPart.setContent(body.toString(), "text/html"); + mimeMultipart.addBodyPart(htmlBodyPart); + + for (Entry e : images.entrySet()) { + String name = e.getKey().replace('/', '_'); + + BodyPart imageBodyPart = new MimeBodyPart(); + DataSource ds = new ByteArrayDataSource(e.getValue(), "image/png"); + DataHandler dh = new DataHandler(ds); + imageBodyPart.setDataHandler(dh); + imageBodyPart.setFileName(name); + imageBodyPart.setDisposition("inline"); + imageBodyPart.setHeader("Content-ID", '<' + name + '>'); + mimeMultipart.addBodyPart(imageBodyPart); + } + if (attachment != null && attachment.length() != 0) { + MimeBodyPart attachmentPart = new MimeBodyPart(); + attachmentPart.attachFile(attachment); + mimeMultipart.addBodyPart(attachmentPart); + } + + msg.setContent(mimeMultipart); + msg.setHeader("X-Mailer", "OpenCueMailer"); + msg.setSentDate(new Date()); + Transport transport = session.getTransport("smtp"); + transport.connect(CueUtil.smtpHost, null, null); + Transport.send(msg); + } catch (Exception e) { + throw new RuntimeException("failed to send email: " + e); } - - public static final String formatDuration(long seconds) { - return String.format("%02d:%02d:%02d",seconds / 3600,(seconds % 3600) / 60,seconds % 60); - } - - public static final String formatDuration(int seconds) { - return String.format("%02d:%02d:%02d",seconds / 3600,(seconds % 3600) / 60,seconds % 60); + } + + public static final String formatDuration(long seconds) { + return String.format("%02d:%02d:%02d", seconds / 3600, (seconds % 3600) / 60, seconds % 60); + } + + public static final String formatDuration(int seconds) { + return String.format("%02d:%02d:%02d", seconds / 3600, (seconds % 3600) / 60, seconds % 60); + } + + public static final String KbToMb(long kb) { + return String.format("%dMB", kb / 1024); + } + + public static final long convertKbToFakeKb64bit(Environment env, long Kb) { + long memReservedSystem = + env.getRequiredProperty("dispatcher.memory.mem_reserved_system", Long.class); + return (long) (Math.ceil((Kb * 0.0009765625) * 0.0009765625) * 1048576) - memReservedSystem; + } + + public static final long convertKbToFakeKb32bit(Environment env, long Kb) { + long memReservedSystem = + env.getRequiredProperty("dispatcher.memory.mem_reserved_system", Long.class); + return (long) (Math.floor((Kb * 0.0009765625) * 0.0009765625) * 1048576) - memReservedSystem; + } + + /** + * returns epoch time + * + * @return int + */ + public static int getTime() { + return (int) (System.currentTimeMillis() / 1000); + } + + /** + * returns a frame name from a layer and frame number. + * + * @param layer + * @param num + * @return String + */ + public final static String buildFrameName(LayerInterface layer, int num) { + return String.format("%04d-%s", num, layer.getName()); + } + + public final static String buildProcName(String host, int cores, int gpus) { + return String.format(Locale.ROOT, "%s/%4.2f/%d", host, Convert.coreUnitsToCores(cores), gpus); + } + + /** + * for logging how long an operation took + * + * @param time + * @param message + */ + public final static void logDuration(long time, String message) { + long duration = System.currentTimeMillis() - time; + logger.info("Operation: " + message + " took " + duration + "ms"); + } + + /** + * return the milliseconds since time + * + * @param time + */ + public final static long duration(long time) { + return System.currentTimeMillis() - time; + } + + public static final long getCpuUsage() { + ThreadMXBean mx = ManagementFactory.getThreadMXBean(); + mx.setThreadCpuTimeEnabled(true); + long result = 0; + for (long id : mx.getAllThreadIds()) { + result = result + mx.getThreadUserTime(id); } + return result; + } - public static final String KbToMb(long kb) { - return String.format("%dMB", kb / 1024); - } + private static final int DAY_START = 7; + private static final int DAY_END = 19; - public static final long convertKbToFakeKb64bit(Environment env, long Kb) { - long memReservedSystem = env.getRequiredProperty( - "dispatcher.memory.mem_reserved_system", - Long.class); - return (long) (Math.ceil((Kb * 0.0009765625) * 0.0009765625) * 1048576) - memReservedSystem; - } - - public static final long convertKbToFakeKb32bit(Environment env, long Kb) { - long memReservedSystem = env.getRequiredProperty( - "dispatcher.memory.mem_reserved_system", - Long.class); - return (long) (Math.floor((Kb * 0.0009765625) * 0.0009765625) * 1048576) - memReservedSystem; + public static boolean isDayTime() { + Calendar cal = Calendar.getInstance(); + int hour_of_day = cal.get(Calendar.HOUR_OF_DAY); + if (hour_of_day >= DAY_START && hour_of_day < DAY_END) { + return true; } + return false; + } + + /** + * Take a frame range and chunk size and return an ordered array of frames with all duplicates + * removed. + * + * @param range + * @param chunkSize + * @return + */ + public static List normalizeFrameRange(String range, int chunkSize) { + return normalizeFrameRange(new FrameSet(range), chunkSize); + } + + /** + * Take a frame range and chunk size and return an ordered array of frames with all duplicates + * removed. + * + * @param frameSet + * @param chunkSize + * @return + */ + public static List normalizeFrameRange(FrameSet frameSet, int chunkSize) { + + int rangeSize = frameSet.size(); + Set result = new LinkedHashSet(rangeSize / chunkSize); /** - * returns epoch time - * - * @return int + * Have to remove all duplicates and maintain order before chunking it. */ - public static int getTime() { - return (int) (System.currentTimeMillis() / 1000); - } + if (chunkSize > 1) { - /** - * returns a frame name from a layer and frame number. - * - * @param layer - * @param num - * @return String - */ - public final static String buildFrameName(LayerInterface layer, int num) { - return String.format("%04d-%s", num, layer.getName()); - } - - public final static String buildProcName(String host, int cores, int gpus) { - return String.format(Locale.ROOT, "%s/%4.2f/%d", host, Convert.coreUnitsToCores(cores), gpus); - } + /** + * This handles people who chunk on 1,000,000. + */ + if (chunkSize > rangeSize) { + result.add(frameSet.get(0)); + } else { - /** - * for logging how long an operation took - * - * @param time - * @param message - */ - public final static void logDuration(long time, String message) { - long duration = System.currentTimeMillis() - time; - logger.info("Operation: " + message + " took " + duration + "ms"); - } - - /** - * return the milliseconds since time - * - * @param time - */ - public final static long duration(long time) { - return System.currentTimeMillis() - time; - } - - public static final long getCpuUsage() { - ThreadMXBean mx = ManagementFactory.getThreadMXBean(); - mx.setThreadCpuTimeEnabled(true); - long result = 0; - for (long id: mx.getAllThreadIds()) { - result = result + mx.getThreadUserTime(id); - } - return result; - } + /** + * A linked hash set to weed out duplicates but maintain frame ordering. + */ + final Set tempResult = new LinkedHashSet((rangeSize / chunkSize) + 1); - private static final int DAY_START = 7; - private static final int DAY_END = 19; - public static boolean isDayTime() { - Calendar cal = Calendar.getInstance(); - int hour_of_day = cal.get(Calendar.HOUR_OF_DAY); - if (hour_of_day >= DAY_START && hour_of_day < DAY_END) { - return true; + for (int idx = 0; idx < rangeSize; idx = idx + 1) { + tempResult.add(frameSet.get(idx)); } - return false; - } - - /** - * Take a frame range and chunk size and return an - * ordered array of frames with all duplicates removed. - * - * @param range - * @param chunkSize - * @return - */ - public static List normalizeFrameRange(String range, int chunkSize) { - return normalizeFrameRange(new FrameSet(range), chunkSize); - } - - /** - * Take a frame range and chunk size and return an - * ordered array of frames with all duplicates removed. - * - * @param frameSet - * @param chunkSize - * @return - */ - public static List normalizeFrameRange(FrameSet frameSet, int chunkSize) { - - int rangeSize = frameSet.size(); - Set result = new LinkedHashSet(rangeSize / chunkSize); /** - * Have to remove all duplicates and maintain order before chunking it. + * Now go through the frames and add 1 frame for every chunk. */ - if (chunkSize > 1) { - - /** - * This handles people who chunk on 1,000,000. - */ - if (chunkSize > rangeSize) { - result.add(frameSet.get(0)); - } - else { - - /** - * A linked hash set to weed out duplicates - * but maintain frame ordering. - */ - final Set tempResult = - new LinkedHashSet((rangeSize / chunkSize) + 1); - - for (int idx = 0; idx < rangeSize; idx = idx + 1) { - tempResult.add(frameSet.get(idx)); - } - - /** - * Now go through the frames and add 1 frame - * for every chunk. - */ - int idx = 0; - for (int frame: tempResult) { - if (idx % chunkSize == 0) { - result.add(frame); - } - idx = idx + 1; - } - } - } - else { - for (int idx = 0; idx < rangeSize; idx = idx + 1) { - result.add(frameSet.get(idx)); - } + int idx = 0; + for (int frame : tempResult) { + if (idx % chunkSize == 0) { + result.add(frame); + } + idx = idx + 1; } - - return Collections.unmodifiableList( - new ArrayList(result)); + } + } else { + for (int idx = 0; idx < rangeSize; idx = idx + 1) { + result.add(frameSet.get(idx)); + } } - /** - * Get "{prefix}.{key}" property int value - * - * @param env - * @param prefix Example "dispatcher.report_queue" - * @param key Example "core_pool_size" - */ - public static int getIntProperty(Environment env, String prefix, String key) - throws IllegalStateException { - Integer value = env.getRequiredProperty(prefix + "." + key, Integer.class); - return value.intValue(); - } + return Collections.unmodifiableList(new ArrayList(result)); + } + + /** + * Get "{prefix}.{key}" property int value + * + * @param env + * @param prefix Example "dispatcher.report_queue" + * @param key Example "core_pool_size" + */ + public static int getIntProperty(Environment env, String prefix, String key) + throws IllegalStateException { + Integer value = env.getRequiredProperty(prefix + "." + key, Integer.class); + return value.intValue(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/FrameRange.java b/cuebot/src/main/java/com/imageworks/spcue/util/FrameRange.java index 2e601f72c..e00485a57 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/FrameRange.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/FrameRange.java @@ -20,174 +20,170 @@ */ public class FrameRange { - private static final Pattern SINGLE_FRAME_PATTERN = Pattern.compile("(-?)\\d+"); - private static final Pattern SIMPLE_FRAME_RANGE_PATTERN = Pattern.compile( - "(?(-?)\\d+)-(?(-?)\\d+)"); - private static final Pattern STEP_PATTERN = Pattern.compile( - "(?(-?)\\d+)-(?(-?)\\d+)(?[xy])(?(-?)\\d+)"); - private static final Pattern INTERLEAVE_PATTERN = Pattern.compile( - "(?(-?)\\d+)-(?(-?)\\d+):(?(-?)\\d+)"); - - private ImmutableList frameList; - - /** - * Construct a FrameRange object by parsing a spec. - * - * FrameSet("1-10x3"); - * FrameSet("1-10y3"); // inverted step - * FrameSet("10-1x-1"); - * FrameSet("1"); // same as "1-1x1" - * FrameSet("1-10:5"); // interleave of 5 - * - * A valid spec consists of: - * - * An inTime. - * An optional hyphen and outTime. - * An optional x or y and stepSize. - * Or an optional : and interleaveSize. - * If outTime is less than inTime, stepSize must be negative. - * - * A stepSize of 0 produces an empty FrameRange. - * - * A stepSize cannot be combined with a interleaveSize. - * - * A stepSize designated with y creates an inverted step. Frames that would be included - * with an x step are excluded. - * - * Example: 1-10y3 == 2, 3, 5, 6, 8, 9. - * - * An interleaveSize alters the order of frames when iterating over the FrameRange. The - * iterator will first produce the list of frames from inTime to outTime with a stepSize - * equal to interleaveSize. The interleaveSize is then divided in half, producing another - * set of frames unique from the first set. This process is repeated until interleaveSize - * reaches 1. - * - * Example: 1-10:5 == 1, 6, 3, 5 ,7 ,9, 2, 4, 8, 10. - */ - public FrameRange(String frameRange) { - frameList = parseFrameRange(frameRange); + private static final Pattern SINGLE_FRAME_PATTERN = Pattern.compile("(-?)\\d+"); + private static final Pattern SIMPLE_FRAME_RANGE_PATTERN = + Pattern.compile("(?(-?)\\d+)-(?(-?)\\d+)"); + private static final Pattern STEP_PATTERN = + Pattern.compile("(?(-?)\\d+)-(?(-?)\\d+)(?[xy])(?(-?)\\d+)"); + private static final Pattern INTERLEAVE_PATTERN = + Pattern.compile("(?(-?)\\d+)-(?(-?)\\d+):(?(-?)\\d+)"); + + private ImmutableList frameList; + + /** + * Construct a FrameRange object by parsing a spec. + * + * FrameSet("1-10x3"); FrameSet("1-10y3"); // inverted step FrameSet("10-1x-1"); FrameSet("1"); // + * same as "1-1x1" FrameSet("1-10:5"); // interleave of 5 + * + * A valid spec consists of: + * + * An inTime. An optional hyphen and outTime. An optional x or y and stepSize. Or an optional : + * and interleaveSize. If outTime is less than inTime, stepSize must be negative. + * + * A stepSize of 0 produces an empty FrameRange. + * + * A stepSize cannot be combined with a interleaveSize. + * + * A stepSize designated with y creates an inverted step. Frames that would be included with an x + * step are excluded. + * + * Example: 1-10y3 == 2, 3, 5, 6, 8, 9. + * + * An interleaveSize alters the order of frames when iterating over the FrameRange. The iterator + * will first produce the list of frames from inTime to outTime with a stepSize equal to + * interleaveSize. The interleaveSize is then divided in half, producing another set of frames + * unique from the first set. This process is repeated until interleaveSize reaches 1. + * + * Example: 1-10:5 == 1, 6, 3, 5 ,7 ,9, 2, 4, 8, 10. + */ + public FrameRange(String frameRange) { + frameList = parseFrameRange(frameRange); + } + + /** + * Gets the number of frames contained in this sequence. + * + * @return + */ + public int size() { + return frameList.size(); + } + + /** + * Gets an individual entry in the sequence, by numerical position. + * + * @param idx + * @return + */ + public int get(int idx) { + return frameList.get(idx); + } + + /** + * Query index of frame number in frame set. + * + * @param idx + * @return Index of frame. -1 if frame set does not contain frame. + */ + public int index(int idx) { + return frameList.indexOf(idx); + } + + /** + * Gets the full numerical sequence. + * + * @return + */ + public ImmutableList getAll() { + return frameList; + } + + protected static ImmutableList parseFrameRange(String frameRange) { + Matcher singleFrameMatcher = SINGLE_FRAME_PATTERN.matcher(frameRange); + if (singleFrameMatcher.matches()) { + return ImmutableList.of(Integer.valueOf(frameRange)); } - /** - * Gets the number of frames contained in this sequence. - * @return - */ - public int size() { - return frameList.size(); + Matcher simpleRangeMatcher = SIMPLE_FRAME_RANGE_PATTERN.matcher(frameRange); + if (simpleRangeMatcher.matches()) { + Integer startFrame = Integer.valueOf(simpleRangeMatcher.group("sf")); + Integer endFrame = Integer.valueOf(simpleRangeMatcher.group("ef")); + return getIntRange(startFrame, endFrame, (endFrame >= startFrame ? 1 : -1)); } - /** - * Gets an individual entry in the sequence, by numerical position. - * @param idx - * @return - */ - public int get(int idx) { - return frameList.get(idx); + Matcher rangeWithStepMatcher = STEP_PATTERN.matcher(frameRange); + if (rangeWithStepMatcher.matches()) { + Integer startFrame = Integer.valueOf(rangeWithStepMatcher.group("sf")); + Integer endFrame = Integer.valueOf(rangeWithStepMatcher.group("ef")); + Integer step = Integer.valueOf(rangeWithStepMatcher.group("step")); + String stepSep = rangeWithStepMatcher.group("stepSep"); + return getSteppedRange(startFrame, endFrame, step, "y".equals(stepSep)); } - /** - * Query index of frame number in frame set. - * @param idx - * @return Index of frame. -1 if frame set does not contain frame. - */ - public int index(int idx) { - return frameList.indexOf(idx); + Matcher rangeWithInterleaveMatcher = INTERLEAVE_PATTERN.matcher(frameRange); + if (rangeWithInterleaveMatcher.matches()) { + Integer startFrame = Integer.valueOf(rangeWithInterleaveMatcher.group("sf")); + Integer endFrame = Integer.valueOf(rangeWithInterleaveMatcher.group("ef")); + Integer step = Integer.valueOf(rangeWithInterleaveMatcher.group("step")); + return getInterleavedRange(startFrame, endFrame, step); } - /** - * Gets the full numerical sequence. - * @return - */ - public ImmutableList getAll() { - return frameList; - } + throw new IllegalArgumentException("unrecognized frame range syntax " + frameRange); + } - protected static ImmutableList parseFrameRange(String frameRange) { - Matcher singleFrameMatcher = SINGLE_FRAME_PATTERN.matcher(frameRange); - if (singleFrameMatcher.matches()) { - return ImmutableList.of(Integer.valueOf(frameRange)); - } - - Matcher simpleRangeMatcher = SIMPLE_FRAME_RANGE_PATTERN.matcher(frameRange); - if (simpleRangeMatcher.matches()) { - Integer startFrame = Integer.valueOf(simpleRangeMatcher.group("sf")); - Integer endFrame = Integer.valueOf(simpleRangeMatcher.group("ef")); - return getIntRange(startFrame, endFrame, (endFrame >= startFrame ? 1 : -1)); - } - - Matcher rangeWithStepMatcher = STEP_PATTERN.matcher(frameRange); - if (rangeWithStepMatcher.matches()) { - Integer startFrame = Integer.valueOf(rangeWithStepMatcher.group("sf")); - Integer endFrame = Integer.valueOf(rangeWithStepMatcher.group("ef")); - Integer step = Integer.valueOf(rangeWithStepMatcher.group("step")); - String stepSep = rangeWithStepMatcher.group("stepSep"); - return getSteppedRange(startFrame, endFrame, step, "y".equals(stepSep)); - } - - Matcher rangeWithInterleaveMatcher = INTERLEAVE_PATTERN.matcher(frameRange); - if (rangeWithInterleaveMatcher.matches()) { - Integer startFrame = Integer.valueOf(rangeWithInterleaveMatcher.group("sf")); - Integer endFrame = Integer.valueOf(rangeWithInterleaveMatcher.group("ef")); - Integer step = Integer.valueOf(rangeWithInterleaveMatcher.group("step")); - return getInterleavedRange(startFrame, endFrame, step); - } - - throw new IllegalArgumentException("unrecognized frame range syntax " + frameRange); - } + private static ImmutableList getIntRange(Integer start, Integer end, Integer step) { + int streamStart = (step < 0 ? end : start); + int streamEnd = (step < 0 ? start : end); + int streamStep = abs(step); - private static ImmutableList getIntRange(Integer start, Integer end, Integer step) { - int streamStart = (step < 0 ? end : start); - int streamEnd = (step < 0 ? start : end); - int streamStep = abs(step); - - List intList = IntStream - .rangeClosed(streamStart, streamEnd) - .filter(n -> (n - start) % streamStep == 0) - .boxed() - .collect(Collectors.toList()); - - if (step < 0) { - return ImmutableList.copyOf(Lists.reverse(intList)); - } - return ImmutableList.copyOf(intList); - } + List intList = IntStream.rangeClosed(streamStart, streamEnd) + .filter(n -> (n - start) % streamStep == 0).boxed().collect(Collectors.toList()); - private static ImmutableList getSteppedRange( - Integer start, Integer end, Integer step, Boolean inverseStep) { - validateStepSign(start, end, step); - ImmutableList steppedRange = getIntRange(start, end, step); - if (inverseStep) { - ImmutableList fullRange = getIntRange(start, end, (step < 0 ? -1 : 1)); - return ImmutableList.copyOf(Collections2.filter(fullRange, Predicates.not(Predicates.in(steppedRange)))); - } - return steppedRange; + if (step < 0) { + return ImmutableList.copyOf(Lists.reverse(intList)); } + return ImmutableList.copyOf(intList); + } + + private static ImmutableList getSteppedRange(Integer start, Integer end, Integer step, + Boolean inverseStep) { + validateStepSign(start, end, step); + ImmutableList steppedRange = getIntRange(start, end, step); + if (inverseStep) { + ImmutableList fullRange = getIntRange(start, end, (step < 0 ? -1 : 1)); + return ImmutableList + .copyOf(Collections2.filter(fullRange, Predicates.not(Predicates.in(steppedRange)))); + } + return steppedRange; + } - private static ImmutableList getInterleavedRange(Integer start, Integer end, Integer step) { - validateStepSign(start, end, step); - Set interleavedFrames = new LinkedHashSet<>(); + private static ImmutableList getInterleavedRange(Integer start, Integer end, + Integer step) { + validateStepSign(start, end, step); + Set interleavedFrames = new LinkedHashSet<>(); - while (abs(step) > 0) { - interleavedFrames.addAll(getIntRange(start, end, step)); - step /= 2; - } - return ImmutableList.copyOf(interleavedFrames); + while (abs(step) > 0) { + interleavedFrames.addAll(getIntRange(start, end, step)); + step /= 2; } - - private static void validateStepSign(Integer start, Integer end, Integer step) { - if (step > 1) { - if (end < start) { - throw new IllegalArgumentException( - "end frame may not be less than start frame when using a positive step"); - } - } else if (step == 0) { - throw new IllegalArgumentException("step cannot be zero"); - - } else if (step < 0) { - if (end >= start) { - throw new IllegalArgumentException( - "end frame may not be greater than start frame when using a negative step"); - } - } + return ImmutableList.copyOf(interleavedFrames); + } + + private static void validateStepSign(Integer start, Integer end, Integer step) { + if (step > 1) { + if (end < start) { + throw new IllegalArgumentException( + "end frame may not be less than start frame when using a positive step"); + } + } else if (step == 0) { + throw new IllegalArgumentException("step cannot be zero"); + + } else if (step < 0) { + if (end >= start) { + throw new IllegalArgumentException( + "end frame may not be greater than start frame when using a negative step"); + } } + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/FrameSet.java b/cuebot/src/main/java/com/imageworks/spcue/util/FrameSet.java index 731c2d2d3..ef6381e48 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/FrameSet.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/FrameSet.java @@ -10,157 +10,164 @@ * Represents an ordered sequence of FrameRanges. */ public class FrameSet { - private ImmutableList frameList; - - /** - * Construct a FrameSet object by parsing a spec. - * - * See FrameRange for the supported syntax. A FrameSet follows the same syntax, - * with the addition that it may be a comma-separated list of different FrameRanges. - */ - public FrameSet(String frameRange) { - frameList = parseFrameRange(frameRange); + private ImmutableList frameList; + + /** + * Construct a FrameSet object by parsing a spec. + * + * See FrameRange for the supported syntax. A FrameSet follows the same syntax, with the addition + * that it may be a comma-separated list of different FrameRanges. + */ + public FrameSet(String frameRange) { + frameList = parseFrameRange(frameRange); + } + + /** + * Gets the number of frames contained in this sequence. + * + * @return + */ + public int size() { + return frameList.size(); + } + + /** + * Gets an individual entry in the sequence, by numerical position. + * + * @param idx + * @return + */ + public int get(int idx) { + return frameList.get(idx); + } + + /** + * Query index of frame number in frame set. + * + * @param idx + * @return Index of frame. -1 if frame set does not contain frame. + */ + public int index(int idx) { + return frameList.indexOf(idx); + } + + /** + * Gets the full numerical sequence. + * + * @return + */ + public ImmutableList getAll() { + return frameList; + } + + private ImmutableList parseFrameRange(String frameRange) { + ImmutableList.Builder builder = ImmutableList.builder(); + for (String frameRangeSection : frameRange.split(",")) { + builder.addAll(FrameRange.parseFrameRange(frameRangeSection)); } - - /** - * Gets the number of frames contained in this sequence. - * @return - */ - public int size() { - return frameList.size(); + return builder.build(); + } + + /** + * Return a sub-FrameSet object starting at startFrame with max chunkSize members + * + * @param startFrameIndex Index of frame to start at; not the frame itself + * @param chunkSize Max number of frames per chunk + * @return String representation of the chunk, e.g. 1-1001x3 + */ + public String getChunk(int startFrameIndex, int chunkSize) { + if (frameList.size() <= startFrameIndex || startFrameIndex < 0) { + String sf = String.valueOf(startFrameIndex); + String sz = String.valueOf(frameList.size() - 1); + throw new IllegalArgumentException("startFrameIndex " + sf + " is not in range 0-" + sz); } - - /** - * Gets an individual entry in the sequence, by numerical position. - * @param idx - * @return - */ - public int get(int idx) { - return frameList.get(idx); + if (chunkSize == 1) { + // Chunksize of 1 so the FrameSet is just the startFrame + return String.valueOf(frameList.get(startFrameIndex)); } - - /** - * Query index of frame number in frame set. - * @param idx - * @return Index of frame. -1 if frame set does not contain frame. - */ - public int index(int idx) { - return frameList.indexOf(idx); + int finalFrameIndex = frameList.size() - 1; + int endFrameIndex = startFrameIndex + chunkSize - 1; + if (endFrameIndex > finalFrameIndex) { + // We don't have enough frames, so return the remaining frames. + endFrameIndex = finalFrameIndex; } - /** - * Gets the full numerical sequence. - * @return - */ - public ImmutableList getAll() { - return frameList; + return framesToFrameRanges(frameList.subList(startFrameIndex, endFrameIndex + 1)); + } + + /** + * Return a string representation of a subset of a frame range. + * + * This approach was adapted from https://pypi.org/project/Fileseq/ + * + * @param startFrame Start frame + * @param endFrame End frame + * @param step The step between frames + * @return String representation of the frame range, e.g. 1-1001x3 + */ + private String buildFrangePart(int startFrame, int endFrame, int step) { + if (startFrame == endFrame) { + return String.valueOf(startFrame); + } else if (step == 1) { + return String.format("%d-%d", startFrame, endFrame); + } else { + return String.format("%d-%dx%d", startFrame, endFrame, step); } - - private ImmutableList parseFrameRange(String frameRange) { - ImmutableList.Builder builder = ImmutableList.builder(); - for (String frameRangeSection : frameRange.split(",")) { - builder.addAll(FrameRange.parseFrameRange(frameRangeSection)); - } - return builder.build(); + } + + /** + * Return a String representation of a frame range based on a list of literal integer frame IDs. + * + * @param frames List of integers representing frame IDs, + * @return String representation of a frameset, e.g. '1-10,12-100x2' + */ + private String framesToFrameRanges(ImmutableList frames) { + int l = frames.size(); + if (l == 0) { + return ""; + } else if (l == 1) { + return String.valueOf(frames.get(0)); } - /** - * Return a sub-FrameSet object starting at startFrame with max chunkSize members - * @param startFrameIndex Index of frame to start at; not the frame itself - * @param chunkSize Max number of frames per chunk - * @return String representation of the chunk, e.g. 1-1001x3 - */ - public String getChunk(int startFrameIndex, int chunkSize) { - if (frameList.size() <= startFrameIndex || startFrameIndex < 0) { - String sf = String.valueOf(startFrameIndex); - String sz = String.valueOf(frameList.size() - 1); - throw new IllegalArgumentException("startFrameIndex " + sf + " is not in range 0-" + sz); - } - if (chunkSize == 1) { - // Chunksize of 1 so the FrameSet is just the startFrame - return String.valueOf(frameList.get(startFrameIndex)); - } - int finalFrameIndex = frameList.size() - 1; - int endFrameIndex = startFrameIndex + chunkSize - 1; - if (endFrameIndex > finalFrameIndex) { - // We don't have enough frames, so return the remaining frames. - endFrameIndex = finalFrameIndex; - } - - return framesToFrameRanges(frameList.subList(startFrameIndex, endFrameIndex+1)); + StringJoiner resultBuilder = new StringJoiner(","); + + int curr_count = 1; + int curr_step = 0; + int new_step = 0; + int curr_start = frames.get(0); + int curr_frame = frames.get(0); + int last_frame = frames.get(0); + + for (int i = 1; i < frames.size(); i++) { + curr_frame = frames.get(i); + + if (curr_step == 0) { + curr_step = curr_frame - curr_start; + } + new_step = curr_frame - last_frame; + if (curr_step == new_step) { + last_frame = curr_frame; + curr_count += 1; + } else if (curr_count == 2 && curr_step != 1) { + resultBuilder.add(String.valueOf(curr_start)); + curr_step = 0; + curr_start = last_frame; + last_frame = curr_frame; + } else { + resultBuilder.add(buildFrangePart(curr_start, last_frame, curr_step)); + curr_step = 0; + curr_start = curr_frame; + last_frame = curr_frame; + curr_count = 1; + } } - - /** - * Return a string representation of a subset of a frame range. - * - * This approach was adapted from https://pypi.org/project/Fileseq/ - * @param startFrame Start frame - * @param endFrame End frame - * @param step The step between frames - * @return String representation of the frame range, e.g. 1-1001x3 - */ - private String buildFrangePart(int startFrame, int endFrame, int step) { - if (startFrame == endFrame) { - return String.valueOf(startFrame); - } else if (step == 1) { - return String.format("%d-%d", startFrame, endFrame); - } else { - return String.format("%d-%dx%d", startFrame, endFrame, step); - } + if (curr_count == 2 && curr_step != 1) { + resultBuilder.add(String.valueOf(curr_start)); + resultBuilder.add(String.valueOf(curr_frame)); + } else { + resultBuilder.add(buildFrangePart(curr_start, curr_frame, curr_step)); } - /** - * Return a String representation of a frame range based on a list of literal integer frame IDs. - * @param frames List of integers representing frame IDs, - * @return String representation of a frameset, e.g. '1-10,12-100x2' - */ - private String framesToFrameRanges(ImmutableList frames) { - int l = frames.size(); - if (l == 0) { - return ""; - } else if (l == 1) { - return String.valueOf(frames.get(0)); - } - - StringJoiner resultBuilder = new StringJoiner(","); - - int curr_count = 1; - int curr_step = 0; - int new_step = 0; - int curr_start = frames.get(0); - int curr_frame = frames.get(0); - int last_frame = frames.get(0); - - for (int i = 1; i < frames.size(); i++) { - curr_frame = frames.get(i); - - if (curr_step == 0) { - curr_step = curr_frame - curr_start; - } - new_step = curr_frame - last_frame; - if (curr_step == new_step) { - last_frame = curr_frame; - curr_count += 1; - } else if (curr_count == 2 && curr_step != 1) { - resultBuilder.add(String.valueOf(curr_start)); - curr_step = 0; - curr_start = last_frame; - last_frame = curr_frame; - } else { - resultBuilder.add(buildFrangePart(curr_start, last_frame, curr_step)); - curr_step = 0; - curr_start = curr_frame; - last_frame = curr_frame; - curr_count = 1; - } - } - if (curr_count == 2 && curr_step != 1) { - resultBuilder.add(String.valueOf(curr_start)); - resultBuilder.add(String.valueOf(curr_frame)); - } else { - resultBuilder.add(buildFrangePart(curr_start, curr_frame, curr_step)); - } - - return resultBuilder.toString(); - } + return resultBuilder.toString(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/JobLogUtil.java b/cuebot/src/main/java/com/imageworks/spcue/util/JobLogUtil.java index c223ebbc0..625732b8d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/JobLogUtil.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/JobLogUtil.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.util; import com.imageworks.spcue.JobDetail; @@ -28,42 +25,42 @@ @Component public class JobLogUtil { - @Autowired - private Environment env; + @Autowired + private Environment env; - public boolean createJobLogDirectory(String path) { - File f = new File(path); - f.mkdir(); - f.setWritable(true, false); - return f.isDirectory(); - } + public boolean createJobLogDirectory(String path) { + File f = new File(path); + f.mkdir(); + f.setWritable(true, false); + return f.isDirectory(); + } - public String getJobLogDir(String show, String shot, String os) { - StringBuilder sb = new StringBuilder(512); - sb.append(getJobLogRootDir(os)); - sb.append("/"); - sb.append(show); - sb.append("/"); - sb.append(shot); - sb.append("/logs"); - return sb.toString(); - } + public String getJobLogDir(String show, String shot, String os) { + StringBuilder sb = new StringBuilder(512); + sb.append(getJobLogRootDir(os)); + sb.append("/"); + sb.append(show); + sb.append("/"); + sb.append(shot); + sb.append("/logs"); + return sb.toString(); + } - public String getJobLogPath(JobDetail job) { - StringBuilder sb = new StringBuilder(512); - sb.append(getJobLogDir(job.showName, job.shot, job.os)); - sb.append("/"); - sb.append(job.name); - sb.append("--"); - sb.append(job.id); - return sb.toString(); - } + public String getJobLogPath(JobDetail job) { + StringBuilder sb = new StringBuilder(512); + sb.append(getJobLogDir(job.showName, job.shot, job.os)); + sb.append("/"); + sb.append(job.name); + sb.append("--"); + sb.append(job.id); + return sb.toString(); + } - public String getJobLogRootDir(String os) { - try { - return env.getRequiredProperty(String.format("log.frame-log-root.%s", os), String.class); - } catch (IllegalStateException e) { - return env.getRequiredProperty("log.frame-log-root.default_os", String.class); - } + public String getJobLogRootDir(String os) { + try { + return env.getRequiredProperty(String.format("log.frame-log-root.%s", os), String.class); + } catch (IllegalStateException e) { + return env.getRequiredProperty("log.frame-log-root.default_os", String.class); } -} \ No newline at end of file + } +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/SqlUtil.java b/cuebot/src/main/java/com/imageworks/spcue/util/SqlUtil.java index ddfbabc61..a5036b567 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/SqlUtil.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/SqlUtil.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.util; import com.google.protobuf.ByteString; @@ -27,121 +23,120 @@ public class SqlUtil { - public static String buildBindVariableArray(String col, Integer numValues) { - StringBuilder sb = new StringBuilder(1024); - sb.append(col); - sb.append(" IN ("); - for (int i = 0; i < numValues; i++) { - sb.append("?,"); - } - sb.delete(sb.length() - 1, sb.length()); - sb.append(")"); - return sb.toString(); - } - - /** - * returns a 32 character UUID string that will be identical everytime its - * generated based on the name passed in. - * - * @param name String - * @return String - */ - public static String genShortKeyByName(String name) { - return UUID.nameUUIDFromBytes(name.getBytes()).toString().replaceAll("-", ""); - } - - /** - * returns a 32 character UUID string that will be identical everytime its - * generated based on the name passed in. - * - * @param name String - * @return String - */ - public static String genShortKeyByNameAndTime(String name) { - StringBuilder sb = new StringBuilder(64); - sb.append(name); - sb.append(System.currentTimeMillis()); - return UUID.nameUUIDFromBytes(sb.toString().getBytes()).toString().replaceAll("-", ""); - } - - /** - * returns a random UUID - * - * @return String - */ - public static String genKeyRandom() { - return UUID.randomUUID().toString(); - } - - /** - * returns a 36 character UUID string that will be identical everytime its - * generated based on the name passed in. - * - * @param name String - * @return String - */ - public static String genKeyByName(String name) { - return UUID.nameUUIDFromBytes(name.getBytes()).toString(); + public static String buildBindVariableArray(String col, Integer numValues) { + StringBuilder sb = new StringBuilder(1024); + sb.append(col); + sb.append(" IN ("); + for (int i = 0; i < numValues; i++) { + sb.append("?,"); } - - /** - * returns a 36 character UUID string that is based on the name and the time - * the UUID is created - * - * @param name String - * @return String - */ - public static String genKeyByNameAndTime(String name) { - StringBuilder sb = new StringBuilder(64); - sb.append(name); - sb.append(System.currentTimeMillis()); - sb.append(System.getenv("HOSTNAME")); - return UUID.nameUUIDFromBytes(sb.toString().getBytes()).toString(); + sb.delete(sb.length() - 1, sb.length()); + sb.append(")"); + return sb.toString(); + } + + /** + * returns a 32 character UUID string that will be identical everytime its generated based on the + * name passed in. + * + * @param name String + * @return String + */ + public static String genShortKeyByName(String name) { + return UUID.nameUUIDFromBytes(name.getBytes()).toString().replaceAll("-", ""); + } + + /** + * returns a 32 character UUID string that will be identical everytime its generated based on the + * name passed in. + * + * @param name String + * @return String + */ + public static String genShortKeyByNameAndTime(String name) { + StringBuilder sb = new StringBuilder(64); + sb.append(name); + sb.append(System.currentTimeMillis()); + return UUID.nameUUIDFromBytes(sb.toString().getBytes()).toString().replaceAll("-", ""); + } + + /** + * returns a random UUID + * + * @return String + */ + public static String genKeyRandom() { + return UUID.randomUUID().toString(); + } + + /** + * returns a 36 character UUID string that will be identical everytime its generated based on the + * name passed in. + * + * @param name String + * @return String + */ + public static String genKeyByName(String name) { + return UUID.nameUUIDFromBytes(name.getBytes()).toString(); + } + + /** + * returns a 36 character UUID string that is based on the name and the time the UUID is created + * + * @param name String + * @return String + */ + public static String genKeyByNameAndTime(String name) { + StringBuilder sb = new StringBuilder(64); + sb.append(name); + sb.append(System.currentTimeMillis()); + sb.append(System.getenv("HOSTNAME")); + return UUID.nameUUIDFromBytes(sb.toString().getBytes()).toString(); + } + + /** + * returns a 36 character UUID string that is based on time and the IP address of the primary + * network interface and the time + * + * @return String + */ + public static String genKeyByTime() { + String name = System.getenv("HOSTNAME") + System.currentTimeMillis(); + return UUID.nameUUIDFromBytes(name.getBytes()).toString(); + } + + /** + * SQL ResultSet.getString method returns a null, but we want to initialize our message builders + * with an empty string. Call rs.getString but return an empty string instead of null.] + * + * @param rs ResultSet + * @param field String + * @return String + */ + public static String getString(ResultSet rs, String field) throws SQLException { + String value = rs.getString(field); + if (rs.wasNull()) { + return ""; + } else { + return value; } - - /** - * returns a 36 character UUID string that is based on time and the IP - * address of the primary network interface and the time - * - * @return String - */ - public static String genKeyByTime() { - String name = System.getenv("HOSTNAME") + System.currentTimeMillis(); - return UUID.nameUUIDFromBytes(name.getBytes()).toString(); + } + + public static String getString(ResultSet rs, int index) throws SQLException { + String value = rs.getString(index); + if (rs.wasNull()) { + return ""; + } else { + return value; } - - /** - * SQL ResultSet.getString method returns a null, but we want to initialize our - * message builders with an empty string. Call rs.getString but return an empty string instead of null.] - * @param rs ResultSet - * @param field String - * @return String - */ - public static String getString(ResultSet rs, String field) throws SQLException { - String value = rs.getString(field); - if (rs.wasNull()) { - return ""; - } else { - return value; - } - } - - public static String getString(ResultSet rs, int index) throws SQLException { - String value = rs.getString(index); - if (rs.wasNull()) { - return ""; - } else { - return value; - } - } - - public static ByteString getByteString(ResultSet rs, String field) throws SQLException { - byte[] data = rs.getBytes(field); - if (rs.wasNull()) { - return ByteString.copyFrom("".getBytes()); - } else { - return ByteString.copyFrom(data); - } + } + + public static ByteString getByteString(ResultSet rs, String field) throws SQLException { + byte[] data = rs.getBytes(field); + if (rs.wasNull()) { + return ByteString.copyFrom("".getBytes()); + } else { + return ByteString.copyFrom(data); } + } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/TagUtil.java b/cuebot/src/main/java/com/imageworks/spcue/util/TagUtil.java index a43698f70..606f64c61 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/TagUtil.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/TagUtil.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.util; import java.util.ArrayList; @@ -26,28 +22,25 @@ public class TagUtil { - /** - * This will take the RQD tags and convert them - * into something usable for now until the RQD - * tag standard is set. - * - * @param host - * @return - */ - public static List buildHardwareTags(RenderHost host) { - List tags = new ArrayList(); - if (host.getTagsList().contains("linux")) { - tags.add("linux"); - } + /** + * This will take the RQD tags and convert them into something usable for now until the RQD tag + * standard is set. + * + * @param host + * @return + */ + public static List buildHardwareTags(RenderHost host) { + List tags = new ArrayList(); + if (host.getTagsList().contains("linux")) { + tags.add("linux"); + } - if (host.getTagsList().contains("64bit")) { - tags.add("64bit"); - tags.add("32bit"); - } - else { - tags.add("32bit"); - } - return tags; + if (host.getTagsList().contains("64bit")) { + tags.add("64bit"); + tags.add("32bit"); + } else { + tags.add("32bit"); } + return tags; + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/config/TestAppConfig.java b/cuebot/src/test/java/com/imageworks/spcue/config/TestAppConfig.java index e86126839..50873050a 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/config/TestAppConfig.java +++ b/cuebot/src/test/java/com/imageworks/spcue/config/TestAppConfig.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.config; import javax.sql.DataSource; @@ -32,23 +28,20 @@ @Configuration @ImportResource({"classpath:conf/spring/applicationContext-assumptions.xml", - "classpath:conf/spring/applicationContext-dbEngine.xml", - "classpath:conf/spring/applicationContext-grpc.xml", - "classpath:conf/spring/applicationContext-grpcServer.xml", - "classpath:conf/spring/applicationContext-service.xml", - "classpath:conf/spring/applicationContext-jms.xml", - "classpath:conf/spring/applicationContext-criteria.xml"}) + "classpath:conf/spring/applicationContext-dbEngine.xml", + "classpath:conf/spring/applicationContext-grpc.xml", + "classpath:conf/spring/applicationContext-grpcServer.xml", + "classpath:conf/spring/applicationContext-service.xml", + "classpath:conf/spring/applicationContext-jms.xml", + "classpath:conf/spring/applicationContext-criteria.xml"}) @EnableConfigurationProperties @PropertySource({"classpath:opencue.properties"}) public class TestAppConfig { - @Configuration - @Conditional(PostgresDatabaseCondition.class) - @ImportResource({ - "classpath:conf/spring/applicationContext-postgres-datasource.xml", - "classpath:conf/spring/applicationContext-dao-postgres.xml" - }) - static class PostgresEngineConfig {} + @Configuration + @Conditional(PostgresDatabaseCondition.class) + @ImportResource({"classpath:conf/spring/applicationContext-postgres-datasource.xml", + "classpath:conf/spring/applicationContext-dao-postgres.xml"}) + static class PostgresEngineConfig { + } } - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/AssumingPostgresEngine.java b/cuebot/src/test/java/com/imageworks/spcue/test/AssumingPostgresEngine.java index 427f2977a..02de12538 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/AssumingPostgresEngine.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/AssumingPostgresEngine.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.test; import org.junit.AssumptionViolatedException; @@ -25,35 +22,32 @@ import com.imageworks.spcue.config.DatabaseEngine; - public class AssumingPostgresEngine implements TestRule { - private DatabaseEngine dbEngine; - - public AssumingPostgresEngine() { - } - - @Override - public Statement apply(Statement base, Description description) { - return new Statement() { - @Override - public void evaluate() throws Throwable { - if (dbEngine == DatabaseEngine.POSTGRES) { - base.evaluate(); - } else { - throw new AssumptionViolatedException( - "Current database engine is " + dbEngine.toString() + - ", test requires POSTGRES. Skipping"); - } - } - }; - } - - public DatabaseEngine getDbEngine() { - return dbEngine; - } - - public void setDbEngine(DatabaseEngine dbEngine) { - this.dbEngine = dbEngine; - } + private DatabaseEngine dbEngine; + + public AssumingPostgresEngine() {} + + @Override + public Statement apply(Statement base, Description description) { + return new Statement() { + @Override + public void evaluate() throws Throwable { + if (dbEngine == DatabaseEngine.POSTGRES) { + base.evaluate(); + } else { + throw new AssumptionViolatedException("Current database engine is " + dbEngine.toString() + + ", test requires POSTGRES. Skipping"); + } + } + }; + } + + public DatabaseEngine getDbEngine() { + return dbEngine; + } + + public void setDbEngine(DatabaseEngine dbEngine) { + this.dbEngine = dbEngine; + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/EntityTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/EntityTests.java index 651a1f0c1..6a4727662 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/EntityTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/EntityTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test; import org.junit.Test; @@ -26,51 +22,50 @@ import junit.framework.TestCase; /** - * Some tests for the com.imageworks.spcue.Entity class - * which is the base class for all entities used internally. + * Some tests for the com.imageworks.spcue.Entity class which is the base class for all entities + * used internally. */ public class EntityTests extends TestCase { - @Test - public void testEntityEquality() { - Entity a = new Entity("id","name"); - Entity b = new Entity("id","name"); - assertEquals(a,b); - - a = new Entity("id","name"); - b = new Entity("id_a","name"); - assertFalse(a.equals(b)); - - a = new Entity("id","name"); - b = new Entity("id_a","name_a"); - assertFalse(a.equals(b)); - } - - @Test - public void testEntityHashCode() { - - Entity a = new Entity("id","name"); - Entity b = new Entity("id","name"); - assertEquals(a.hashCode(), b.hashCode()); - - a = new Entity("id","name"); - b = new Entity("id_a","name"); - assertFalse(a.hashCode() == b.hashCode()); - - a = new Entity(); - b = new Entity(); - assertFalse(a.hashCode() == b.hashCode()); - } - - @Test - public void testEntityToString() { - Entity a = new Entity("id","name"); - Entity b = new Entity("id","name"); - assertEquals(a.toString(), b.toString()); - - a = new Entity("id_a","name"); - b = new Entity("id","name"); - assertNotSame(a.toString(), b.toString()); - } + @Test + public void testEntityEquality() { + Entity a = new Entity("id", "name"); + Entity b = new Entity("id", "name"); + assertEquals(a, b); + + a = new Entity("id", "name"); + b = new Entity("id_a", "name"); + assertFalse(a.equals(b)); + + a = new Entity("id", "name"); + b = new Entity("id_a", "name_a"); + assertFalse(a.equals(b)); + } + + @Test + public void testEntityHashCode() { + + Entity a = new Entity("id", "name"); + Entity b = new Entity("id", "name"); + assertEquals(a.hashCode(), b.hashCode()); + + a = new Entity("id", "name"); + b = new Entity("id_a", "name"); + assertFalse(a.hashCode() == b.hashCode()); + + a = new Entity(); + b = new Entity(); + assertFalse(a.hashCode() == b.hashCode()); + } + + @Test + public void testEntityToString() { + Entity a = new Entity("id", "name"); + Entity b = new Entity("id", "name"); + assertEquals(a.toString(), b.toString()); + + a = new Entity("id_a", "name"); + b = new Entity("id", "name"); + assertNotSame(a.toString(), b.toString()); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/TestDatabaseSetupPostgres.java b/cuebot/src/test/java/com/imageworks/spcue/test/TestDatabaseSetupPostgres.java index 49513708b..794f6b638 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/TestDatabaseSetupPostgres.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/TestDatabaseSetupPostgres.java @@ -1,17 +1,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.test; @@ -28,51 +26,49 @@ import java.util.concurrent.atomic.AtomicBoolean; public final class TestDatabaseSetupPostgres { - private static final String DB_NAME = "postgres"; - private static final String USERNAME = "postgres"; - private static AtomicBoolean setupComplete = new AtomicBoolean(false); - private EmbeddedPostgres postgres; + private static final String DB_NAME = "postgres"; + private static final String USERNAME = "postgres"; + private static AtomicBoolean setupComplete = new AtomicBoolean(false); + private EmbeddedPostgres postgres; - public TestDatabaseSetupPostgres() {} + public TestDatabaseSetupPostgres() {} - public String getUrl() { - return postgres.getJdbcUrl(USERNAME, DB_NAME); - } - - public String getUsername() { - return USERNAME; - } + public String getUrl() { + return postgres.getJdbcUrl(USERNAME, DB_NAME); + } - public String getPassword() { - return null; - } + public String getUsername() { + return USERNAME; + } - public void create() throws Exception { - if (!setupComplete.compareAndSet(false, true)) { - return; - } + public String getPassword() { + return null; + } - postgres = EmbeddedPostgres.start(); - Flyway flyway = Flyway.configure() - .dataSource(postgres.getPostgresDatabase()) - .locations("classpath:conf/ddl/postgres/migrations") - .load(); - flyway.migrate(); - - populateTestData(); + public void create() throws Exception { + if (!setupComplete.compareAndSet(false, true)) { + return; } - private void populateTestData() throws Exception { - Connection conn = postgres.getPostgresDatabase().getConnection(); + postgres = EmbeddedPostgres.start(); + Flyway flyway = Flyway.configure().dataSource(postgres.getPostgresDatabase()) + .locations("classpath:conf/ddl/postgres/migrations").load(); + flyway.migrate(); + + populateTestData(); + } - URL url = Resources.getResource("conf/ddl/postgres/test_data.sql"); - List testDataStatements = Resources.readLines(url, Charsets.UTF_8); - for (String testDataStatement : testDataStatements) { - Statement st = conn.createStatement(); - st.execute(testDataStatement); - st.close(); - } + private void populateTestData() throws Exception { + Connection conn = postgres.getPostgresDatabase().getConnection(); - conn.close(); + URL url = Resources.getResource("conf/ddl/postgres/test_data.sql"); + List testDataStatements = Resources.readLines(url, Charsets.UTF_8); + for (String testDataStatement : testDataStatements) { + Statement st = conn.createStatement(); + st.execute(testDataStatement); + st.close(); } + + conn.close(); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/TransactionalTest.java b/cuebot/src/test/java/com/imageworks/spcue/test/TransactionalTest.java index 5f66b3e74..a21eb36ff 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/TransactionalTest.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/TransactionalTest.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test; import org.junit.Test; @@ -28,13 +24,12 @@ import com.imageworks.spcue.config.TestAppConfig; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class TransactionalTest extends AbstractTransactionalJUnit4SpringContextTests { - @Test - public void testInit() { + @Test + public void testInit() { - } + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/FrameSearchTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/FrameSearchTests.java index 1e8f01669..adc9e202b 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/FrameSearchTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/FrameSearchTests.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.test.dao.criteria; @@ -55,198 +53,188 @@ import static org.junit.Assert.assertTrue; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class FrameSearchTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - JobLauncher jobLauncher; - - @Resource - JobDao jobDao; - - @Resource - FrameSearchFactory frameSearchFactory; - - @Resource - FrameDao frameDao; - - @Resource - LayerDao layerDao; - - @Resource - WhiteboardDao whiteboardDao; - - @Resource - JobManager jobManager; - - @Before - public void launchTestJobs() { - ClassLoader classLoader = getClass().getClassLoader(); - File file = new File( - classLoader.getResource("conf/jobspec/jobspec_depend_test.xml").getFile()); - - jobLauncher.testMode = true; - jobLauncher.launch(file); - } - - @Test - @Transactional - @Rollback - public void testGetCriteria() { - JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); - FrameSearchCriteria criteria = FrameSearchInterface.criteriaFactory(); - - FrameSearchInterface frameSearch = frameSearchFactory.create(job, criteria); - - assertEquals(criteria, frameSearch.getCriteria()); - } - - @Test - @Transactional - @Rollback - public void testSetCriteria() { - FrameSearchCriteria criteria = FrameSearchInterface.criteriaFactory() - .toBuilder() - .setFrameRange("1-10") - .build(); - FrameSearchInterface frameSearch = frameSearchFactory.create(); - - // Ensure we can distinguish between the default and non-default criteria. - assertNotEquals(criteria, frameSearch.getCriteria()); - - frameSearch.setCriteria(criteria); - - assertEquals(criteria, frameSearch.getCriteria()); - } - - @Test - @Transactional - @Rollback - public void testFilterByFrameIds() { - JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); - FrameSearchInterface frameSearch = frameSearchFactory.create(); - LayerInterface layer = layerDao.getLayers(job).get(0); - FrameInterface frame1 = frameDao.findFrame(layer, 1); - FrameInterface frame2 = frameDao.findFrame(layer, 2); - frameSearch.filterByFrameIds(ImmutableList.of(frame1.getFrameId(), frame2.getFrameId())); - - List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() - .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); - - assertThat(frames).containsExactlyInAnyOrder(frame1, frame2); - } - - @Test - @Transactional - @Rollback - public void testFilterByFrame() { - JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); - FrameSearchInterface frameSearch = frameSearchFactory.create(); - LayerInterface layer = layerDao.getLayers(job).get(0); - FrameInterface frame1 = frameDao.findFrame(layer, 1); - frameSearch.filterByFrame(frame1); - - List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() - .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); - - assertThat(frames).containsExactly(frame1); - } - - @Test - @Transactional - @Rollback - public void testFilterByJob() { - JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); - String jobId = job.getJobId(); - FrameSearchInterface frameSearch = frameSearchFactory.create(); - frameSearch.filterByJob(job); - - List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() - .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); - - assertEquals(20, frames.size()); - assertTrue(frames.stream().allMatch(frame -> frame.getJobId().equals(jobId))); - } - - @Test - @Transactional - @Rollback - public void testFilterByLayer() { - JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); - LayerInterface layer = layerDao.getLayers(job).get(0); - FrameSearchInterface frameSearch = frameSearchFactory.create(); - frameSearch.filterByLayer(layer); - - List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() - .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); - - assertTrue( - frames.stream().allMatch(frame -> frame.getLayerId().equals(layer.getLayerId()))); - } - - @Test - @Transactional - @Rollback - public void testFilterByFrameStates() { - JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_b"); - LayerInterface layer = layerDao.getLayers(job).get(1); - IntStream.range(1, 11).forEach( - i -> frameDao.updateFrameState(frameDao.findFrame(layer, i), FrameState.SUCCEEDED)); - FrameSearchInterface frameSearch = frameSearchFactory.create(); - frameSearch.filterByFrameStates(ImmutableList.of(FrameState.SUCCEEDED)); - - List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() - .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); - - assertEquals(10, frames.size()); - assertTrue( - frames.stream().allMatch( - frame -> frameDao.getFrameDetail( - frame.getFrameId()).state.equals(FrameState.SUCCEEDED))); - } - - @Test - @Transactional - @Rollback - public void testFilterByFrameSet() { - JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); - LayerInterface layer = layerDao.getLayers(job).get(0); - FrameSearchInterface frameSearch = frameSearchFactory.create(); - frameSearch.filterByFrameSet("5-6"); - - List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() - .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); - - assertEquals(8, frames.size()); - assertThat( - frames.stream().map( - frame -> frameDao.getFrameDetail(frame.getFrameId()).number) - .collect(Collectors.toList())) - .containsOnly(5, 6); - } - - @Test - @Transactional - @Rollback - public void filterByMemoryRange() { - JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); - LayerInterface layer = layerDao.getLayers(job).get(0); - IntStream.range(1, 11).forEach( - i -> { - FrameInterface frame = frameDao.findFrame(layer, i); - frameDao.updateFrameState(frame, FrameState.RUNNING); - frameDao.updateFrameMemoryUsageAndLluTime(frame, CueUtil.GB * 5, CueUtil.GB, 0); - }); - - FrameSearchInterface frameSearch = frameSearchFactory.create(); - frameSearch.filterByMemoryRange("4.2-7.1"); - - List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() - .map(frame -> jobManager.getFrameDetail(frame.getId())).collect(Collectors.toList()); - - assertEquals(10, frames.size()); - assertTrue(frames.stream().allMatch(frame -> frame.maxRss == CueUtil.GB * 5)); - } - - // TODO(bcipriano) Add filterByDurationRange and filterByChangeDate tests. + @Resource + JobLauncher jobLauncher; + + @Resource + JobDao jobDao; + + @Resource + FrameSearchFactory frameSearchFactory; + + @Resource + FrameDao frameDao; + + @Resource + LayerDao layerDao; + + @Resource + WhiteboardDao whiteboardDao; + + @Resource + JobManager jobManager; + + @Before + public void launchTestJobs() { + ClassLoader classLoader = getClass().getClassLoader(); + File file = new File(classLoader.getResource("conf/jobspec/jobspec_depend_test.xml").getFile()); + + jobLauncher.testMode = true; + jobLauncher.launch(file); + } + + @Test + @Transactional + @Rollback + public void testGetCriteria() { + JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); + FrameSearchCriteria criteria = FrameSearchInterface.criteriaFactory(); + + FrameSearchInterface frameSearch = frameSearchFactory.create(job, criteria); + + assertEquals(criteria, frameSearch.getCriteria()); + } + + @Test + @Transactional + @Rollback + public void testSetCriteria() { + FrameSearchCriteria criteria = + FrameSearchInterface.criteriaFactory().toBuilder().setFrameRange("1-10").build(); + FrameSearchInterface frameSearch = frameSearchFactory.create(); + + // Ensure we can distinguish between the default and non-default criteria. + assertNotEquals(criteria, frameSearch.getCriteria()); + + frameSearch.setCriteria(criteria); + + assertEquals(criteria, frameSearch.getCriteria()); + } + + @Test + @Transactional + @Rollback + public void testFilterByFrameIds() { + JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); + FrameSearchInterface frameSearch = frameSearchFactory.create(); + LayerInterface layer = layerDao.getLayers(job).get(0); + FrameInterface frame1 = frameDao.findFrame(layer, 1); + FrameInterface frame2 = frameDao.findFrame(layer, 2); + frameSearch.filterByFrameIds(ImmutableList.of(frame1.getFrameId(), frame2.getFrameId())); + + List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() + .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); + + assertThat(frames).containsExactlyInAnyOrder(frame1, frame2); + } + + @Test + @Transactional + @Rollback + public void testFilterByFrame() { + JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); + FrameSearchInterface frameSearch = frameSearchFactory.create(); + LayerInterface layer = layerDao.getLayers(job).get(0); + FrameInterface frame1 = frameDao.findFrame(layer, 1); + frameSearch.filterByFrame(frame1); + + List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() + .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); + + assertThat(frames).containsExactly(frame1); + } + + @Test + @Transactional + @Rollback + public void testFilterByJob() { + JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); + String jobId = job.getJobId(); + FrameSearchInterface frameSearch = frameSearchFactory.create(); + frameSearch.filterByJob(job); + + List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() + .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); + + assertEquals(20, frames.size()); + assertTrue(frames.stream().allMatch(frame -> frame.getJobId().equals(jobId))); + } + + @Test + @Transactional + @Rollback + public void testFilterByLayer() { + JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); + LayerInterface layer = layerDao.getLayers(job).get(0); + FrameSearchInterface frameSearch = frameSearchFactory.create(); + frameSearch.filterByLayer(layer); + + List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() + .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); + + assertTrue(frames.stream().allMatch(frame -> frame.getLayerId().equals(layer.getLayerId()))); + } + + @Test + @Transactional + @Rollback + public void testFilterByFrameStates() { + JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_b"); + LayerInterface layer = layerDao.getLayers(job).get(1); + IntStream.range(1, 11).forEach( + i -> frameDao.updateFrameState(frameDao.findFrame(layer, i), FrameState.SUCCEEDED)); + FrameSearchInterface frameSearch = frameSearchFactory.create(); + frameSearch.filterByFrameStates(ImmutableList.of(FrameState.SUCCEEDED)); + + List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() + .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); + + assertEquals(10, frames.size()); + assertTrue(frames.stream().allMatch( + frame -> frameDao.getFrameDetail(frame.getFrameId()).state.equals(FrameState.SUCCEEDED))); + } + + @Test + @Transactional + @Rollback + public void testFilterByFrameSet() { + JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); + LayerInterface layer = layerDao.getLayers(job).get(0); + FrameSearchInterface frameSearch = frameSearchFactory.create(); + frameSearch.filterByFrameSet("5-6"); + + List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() + .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); + + assertEquals(8, frames.size()); + assertThat(frames.stream().map(frame -> frameDao.getFrameDetail(frame.getFrameId()).number) + .collect(Collectors.toList())).containsOnly(5, 6); + } + + @Test + @Transactional + @Rollback + public void filterByMemoryRange() { + JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); + LayerInterface layer = layerDao.getLayers(job).get(0); + IntStream.range(1, 11).forEach(i -> { + FrameInterface frame = frameDao.findFrame(layer, i); + frameDao.updateFrameState(frame, FrameState.RUNNING); + frameDao.updateFrameMemoryUsageAndLluTime(frame, CueUtil.GB * 5, CueUtil.GB, 0); + }); + + FrameSearchInterface frameSearch = frameSearchFactory.create(); + frameSearch.filterByMemoryRange("4.2-7.1"); + + List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() + .map(frame -> jobManager.getFrameDetail(frame.getId())).collect(Collectors.toList()); + + assertEquals(10, frames.size()); + assertTrue(frames.stream().allMatch(frame -> frame.maxRss == CueUtil.GB * 5)); + } + + // TODO(bcipriano) Add filterByDurationRange and filterByChangeDate tests. } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/HostSearchTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/HostSearchTests.java index 5c08c6bfd..7a4682152 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/HostSearchTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/HostSearchTests.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.test.dao.criteria; @@ -45,67 +43,63 @@ import static org.junit.Assert.assertEquals; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class HostSearchTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - HostSearchFactory hostSearchFactory; - - @Resource - AdminManager adminManager; - - @Resource - HostManager hostManager; - - @Resource - WhiteboardDao whiteboardDao; - - private AllocationEntity createAlloc(FacilityInterface facility, String allocName) { - AllocationEntity alloc = new AllocationEntity(); - alloc.name = allocName; - alloc.tag = "test-tag"; - adminManager.createAllocation(facility, alloc); - return alloc; - } - - private DispatchHost createHost(AllocationEntity alloc, String hostName) { - DispatchHost host = hostManager.createHost( - RenderHost.newBuilder() - .setName(hostName) - .setTotalMem(50000000) - .build()); - hostManager.setAllocation(host, alloc); - return host; - } - - @Test - @Transactional - @Rollback - public void testGetCriteria() { - HostSearchCriteria criteria = HostSearchInterface.criteriaFactory(); - - HostSearchInterface hostSearch = hostSearchFactory.create(criteria); - - assertEquals(criteria, hostSearch.getCriteria()); - } - - @Test - @Transactional - @Rollback - public void testFilterByAlloc() { - FacilityInterface facility = adminManager.createFacility("test-facility"); - AllocationEntity alloc1 = createAlloc(facility, "test-alloc-01"); - AllocationEntity alloc2 = createAlloc(facility, "test-alloc-02"); - DispatchHost expectedHost = createHost(alloc1, "test-host-01"); - createHost(alloc2, "test-host-02"); - HostSearchInterface hostSearch = hostSearchFactory.create( - HostSearchInterface.criteriaFactory()); - hostSearch.filterByAlloc(alloc1); - - List hosts = whiteboardDao.getHosts(hostSearch).getHostsList(); - - assertThat( - hosts.stream().map(Host::getId).collect(Collectors.toList())) - .containsOnly(expectedHost.getHostId()); - } + @Resource + HostSearchFactory hostSearchFactory; + + @Resource + AdminManager adminManager; + + @Resource + HostManager hostManager; + + @Resource + WhiteboardDao whiteboardDao; + + private AllocationEntity createAlloc(FacilityInterface facility, String allocName) { + AllocationEntity alloc = new AllocationEntity(); + alloc.name = allocName; + alloc.tag = "test-tag"; + adminManager.createAllocation(facility, alloc); + return alloc; + } + + private DispatchHost createHost(AllocationEntity alloc, String hostName) { + DispatchHost host = hostManager + .createHost(RenderHost.newBuilder().setName(hostName).setTotalMem(50000000).build()); + hostManager.setAllocation(host, alloc); + return host; + } + + @Test + @Transactional + @Rollback + public void testGetCriteria() { + HostSearchCriteria criteria = HostSearchInterface.criteriaFactory(); + + HostSearchInterface hostSearch = hostSearchFactory.create(criteria); + + assertEquals(criteria, hostSearch.getCriteria()); + } + + @Test + @Transactional + @Rollback + public void testFilterByAlloc() { + FacilityInterface facility = adminManager.createFacility("test-facility"); + AllocationEntity alloc1 = createAlloc(facility, "test-alloc-01"); + AllocationEntity alloc2 = createAlloc(facility, "test-alloc-02"); + DispatchHost expectedHost = createHost(alloc1, "test-host-01"); + createHost(alloc2, "test-host-02"); + HostSearchInterface hostSearch = + hostSearchFactory.create(HostSearchInterface.criteriaFactory()); + hostSearch.filterByAlloc(alloc1); + + List hosts = whiteboardDao.getHosts(hostSearch).getHostsList(); + + assertThat(hosts.stream().map(Host::getId).collect(Collectors.toList())) + .containsOnly(expectedHost.getHostId()); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/JobSearchTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/JobSearchTests.java index 04b9d2ab1..46d2edfc2 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/JobSearchTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/JobSearchTests.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.test.dao.criteria; @@ -42,77 +40,71 @@ import static org.junit.Assert.assertNotEquals; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class JobSearchTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - JobSearchFactory jobSearchFactory; + @Resource + JobSearchFactory jobSearchFactory; - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - WhiteboardDao whiteboardDao; + @Resource + WhiteboardDao whiteboardDao; - @Resource - ShowDao showDao; + @Resource + ShowDao showDao; - @Before - public void launchTestJobs() { - ClassLoader classLoader = getClass().getClassLoader(); - jobLauncher.testMode = true; + @Before + public void launchTestJobs() { + ClassLoader classLoader = getClass().getClassLoader(); + jobLauncher.testMode = true; - File file = new File( - classLoader.getResource("conf/jobspec/jobspec.xml").getFile()); - jobLauncher.launch(file); + File file = new File(classLoader.getResource("conf/jobspec/jobspec.xml").getFile()); + jobLauncher.launch(file); - file = new File( - classLoader.getResource("conf/jobspec/jobspec_other_show.xml").getFile()); - jobLauncher.launch(file); - } + file = new File(classLoader.getResource("conf/jobspec/jobspec_other_show.xml").getFile()); + jobLauncher.launch(file); + } - @Test - @Transactional - @Rollback - public void testGetCriteria() { - JobSearchCriteria criteria = JobSearchInterface.criteriaFactory(); + @Test + @Transactional + @Rollback + public void testGetCriteria() { + JobSearchCriteria criteria = JobSearchInterface.criteriaFactory(); - JobSearchInterface jobSearch = jobSearchFactory.create(criteria); + JobSearchInterface jobSearch = jobSearchFactory.create(criteria); - assertEquals(criteria, jobSearch.getCriteria()); - } + assertEquals(criteria, jobSearch.getCriteria()); + } - @Test - @Transactional - @Rollback - public void testSetCriteria() { - JobSearchCriteria criteria = JobSearchInterface.criteriaFactory() - .toBuilder() - .addIds("fake-job-id") - .build(); - JobSearchInterface jobSearch = jobSearchFactory.create(); + @Test + @Transactional + @Rollback + public void testSetCriteria() { + JobSearchCriteria criteria = + JobSearchInterface.criteriaFactory().toBuilder().addIds("fake-job-id").build(); + JobSearchInterface jobSearch = jobSearchFactory.create(); - // Ensure we can distinguish between the default and non-default criteria. - assertNotEquals(criteria, jobSearch.getCriteria()); + // Ensure we can distinguish between the default and non-default criteria. + assertNotEquals(criteria, jobSearch.getCriteria()); - jobSearch.setCriteria(criteria); + jobSearch.setCriteria(criteria); - assertEquals(criteria, jobSearch.getCriteria()); - } + assertEquals(criteria, jobSearch.getCriteria()); + } - @Test - @Transactional - @Rollback - public void testFilterByShow() { - JobSearchCriteria criteria = JobSearchInterface.criteriaFactory() - .toBuilder() - .setIncludeFinished(true) - .build(); - JobSearchInterface jobSearch = jobSearchFactory.create(criteria); - jobSearch.filterByShow(showDao.findShowDetail("pipe")); + @Test + @Transactional + @Rollback + public void testFilterByShow() { + JobSearchCriteria criteria = + JobSearchInterface.criteriaFactory().toBuilder().setIncludeFinished(true).build(); + JobSearchInterface jobSearch = jobSearchFactory.create(criteria); + jobSearch.filterByShow(showDao.findShowDetail("pipe")); - List jobs = whiteboardDao.getJobs(jobSearch).getJobsList(); + List jobs = whiteboardDao.getJobs(jobSearch).getJobsList(); - assertEquals(1, jobs.size()); - } + assertEquals(1, jobs.size()); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/ProcSearchTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/ProcSearchTests.java index 78a13b321..45c4f462b 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/ProcSearchTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/ProcSearchTests.java @@ -2,17 +2,15 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ package com.imageworks.spcue.test.dao.criteria; @@ -57,196 +55,172 @@ import static org.junit.Assert.assertNotEquals; @Transactional -@ContextConfiguration(classes= TestAppConfig.class, loader= AnnotationConfigContextLoader.class) +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class ProcSearchTests extends AbstractTransactionalJUnit4SpringContextTests { - private static final String FIRST_HOST = "beta01"; - private static final String SECOND_HOST = "beta02"; - private static final String FIRST_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; - private static final String SECOND_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; - private static final String DEFAULT_GROUP_NAME = "pipe"; - private static final String NEW_GROUP_NAME = "arbitrary-group-name"; + private static final String FIRST_HOST = "beta01"; + private static final String SECOND_HOST = "beta02"; + private static final String FIRST_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; + private static final String SECOND_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; + private static final String DEFAULT_GROUP_NAME = "pipe"; + private static final String NEW_GROUP_NAME = "arbitrary-group-name"; - @Resource - ProcSearchFactory procSearchFactory; + @Resource + ProcSearchFactory procSearchFactory; - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - AdminManager adminManager; + @Resource + AdminManager adminManager; - @Resource - HostManager hostManager; + @Resource + HostManager hostManager; - @Resource - Dispatcher dispatcher; + @Resource + Dispatcher dispatcher; - @Resource - JobManager jobManager; + @Resource + JobManager jobManager; - @Resource - WhiteboardDao whiteboardDao; + @Resource + WhiteboardDao whiteboardDao; - @Resource - GroupManager groupManager; + @Resource + GroupManager groupManager; - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } - @Test - @Transactional - @Rollback - public void testGetCriteria() { - ProcSearchCriteria criteria = ProcSearchInterface.criteriaFactory(); + @Test + @Transactional + @Rollback + public void testGetCriteria() { + ProcSearchCriteria criteria = ProcSearchInterface.criteriaFactory(); - ProcSearchInterface procSearch = procSearchFactory.create(criteria); - - assertEquals(criteria, procSearch.getCriteria()); - } - - @Test - @Transactional - @Rollback - public void testSetCriteria() { - ProcSearchCriteria criteria = ProcSearchInterface.criteriaFactory() - .toBuilder() - .addHosts("test-host") - .build(); - ProcSearchInterface procSearch = procSearchFactory.create(); - - // Ensure we can distinguish between the default and non-default criteria. - assertNotEquals(criteria, procSearch.getCriteria()); - - procSearch.setCriteria(criteria); - - assertEquals(criteria, procSearch.getCriteria()); - } - - @Test - @Transactional - @Rollback - public void testNotJobs() { - createHostsJobsAndProcs(); - - JobDetail firstJob = jobManager.findJobDetail(FIRST_JOB); - ProcSearchInterface procSearch = procSearchFactory.create(); - procSearch.notJobs(ImmutableList.of(firstJob)); - - List foundProcs = whiteboardDao.getProcs(procSearch).getProcsList(); - - assertEquals(1, foundProcs.size()); - assertThat( - foundProcs.stream().map(Proc::getJobName).collect(Collectors.toList())) - .containsOnly(SECOND_JOB); - } - - @Test - @Transactional - @Rollback - public void testNotGroups() { - createHostsJobsAndProcs(); - - JobDetail firstJob = jobManager.findJobDetail(FIRST_JOB); - GroupDetail newGroup = createGroup(whiteboardDao.getShow(firstJob.getShowId())); - Inherit[] emptyInherits = {}; - groupManager.reparentJob(firstJob, newGroup, emptyInherits); - - ProcSearchInterface procSearch = procSearchFactory.create(); - procSearch.notGroups(ImmutableList.of(newGroup)); - - List foundProcs = whiteboardDao.getProcs(procSearch).getProcsList(); - - assertEquals(1, foundProcs.size()); - assertThat( - foundProcs.stream().map(Proc::getGroupName).collect(Collectors.toList())) - .containsOnly(DEFAULT_GROUP_NAME); - } - - @Test - @Transactional - @Rollback - public void testFilterByHost() { - createHostsJobsAndProcs(); - - ProcSearchInterface procSearch = procSearchFactory.create(); - procSearch.filterByHost(hostManager.findDispatchHost(FIRST_HOST)); - - List foundProcs = whiteboardDao.getProcs(procSearch).getProcsList(); - - assertEquals(1, foundProcs.size()); - assertThat( - foundProcs.stream().map( - proc -> hostManager.getVirtualProc(proc.getId()).hostName) - .collect(Collectors.toList())) - .containsOnly(FIRST_HOST); - } - - // TODO: test by duration range - - private void createHostsJobsAndProcs() { - createHosts(); - launchJobs(); - - DispatchHost firstHost = hostManager.findDispatchHost(FIRST_HOST); - DispatchHost secondHost = hostManager.findDispatchHost(SECOND_HOST); - JobDetail firstJob = jobManager.findJobDetail(FIRST_JOB); - JobDetail secondJob = jobManager.findJobDetail(SECOND_JOB); - - dispatcher.dispatchHost(firstHost, firstJob); - dispatcher.dispatchHost(secondHost, secondJob); - } - - private void launchJobs() { - ClassLoader classLoader = getClass().getClassLoader(); - jobLauncher.testMode = true; - File file = new File( - classLoader.getResource("conf/jobspec/jobspec_dispatch_test.xml").getFile()); - jobLauncher.launch(file); - } - - private RenderHost.Builder buildRenderHost() { - return RenderHost.newBuilder() - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem(8173264) - .setTotalSwap(20960) - .setNimbyEnabled(false) - .setNumProcs(1) - .setCoresPerProc(100) - .addTags("test") - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("SP_OS", "Linux"); - } - - private void createHosts() { - RenderHost host1 = buildRenderHost() - .setName(FIRST_HOST) - .build(); - RenderHost host2 = buildRenderHost() - .setName(SECOND_HOST) - .build(); - - hostManager.createHost(host1, - adminManager.findAllocationDetail("spi", "general")); - hostManager.createHost(host2, - adminManager.findAllocationDetail("spi", "general")); - } - - private GroupDetail createGroup(Show show) { - GroupDetail newGroupDetail = new GroupDetail(); - newGroupDetail.name = NEW_GROUP_NAME; - newGroupDetail.showId = show.getId(); - groupManager.createGroup(newGroupDetail, null); - return groupManager.getGroupDetail( - whiteboardDao.findGroup(show.getName(), NEW_GROUP_NAME).getId()); - } + ProcSearchInterface procSearch = procSearchFactory.create(criteria); + + assertEquals(criteria, procSearch.getCriteria()); + } + + @Test + @Transactional + @Rollback + public void testSetCriteria() { + ProcSearchCriteria criteria = + ProcSearchInterface.criteriaFactory().toBuilder().addHosts("test-host").build(); + ProcSearchInterface procSearch = procSearchFactory.create(); + + // Ensure we can distinguish between the default and non-default criteria. + assertNotEquals(criteria, procSearch.getCriteria()); + + procSearch.setCriteria(criteria); + + assertEquals(criteria, procSearch.getCriteria()); + } + + @Test + @Transactional + @Rollback + public void testNotJobs() { + createHostsJobsAndProcs(); + + JobDetail firstJob = jobManager.findJobDetail(FIRST_JOB); + ProcSearchInterface procSearch = procSearchFactory.create(); + procSearch.notJobs(ImmutableList.of(firstJob)); + + List foundProcs = whiteboardDao.getProcs(procSearch).getProcsList(); + + assertEquals(1, foundProcs.size()); + assertThat(foundProcs.stream().map(Proc::getJobName).collect(Collectors.toList())) + .containsOnly(SECOND_JOB); + } + + @Test + @Transactional + @Rollback + public void testNotGroups() { + createHostsJobsAndProcs(); + + JobDetail firstJob = jobManager.findJobDetail(FIRST_JOB); + GroupDetail newGroup = createGroup(whiteboardDao.getShow(firstJob.getShowId())); + Inherit[] emptyInherits = {}; + groupManager.reparentJob(firstJob, newGroup, emptyInherits); + + ProcSearchInterface procSearch = procSearchFactory.create(); + procSearch.notGroups(ImmutableList.of(newGroup)); + + List foundProcs = whiteboardDao.getProcs(procSearch).getProcsList(); + + assertEquals(1, foundProcs.size()); + assertThat(foundProcs.stream().map(Proc::getGroupName).collect(Collectors.toList())) + .containsOnly(DEFAULT_GROUP_NAME); + } + + @Test + @Transactional + @Rollback + public void testFilterByHost() { + createHostsJobsAndProcs(); + + ProcSearchInterface procSearch = procSearchFactory.create(); + procSearch.filterByHost(hostManager.findDispatchHost(FIRST_HOST)); + + List foundProcs = whiteboardDao.getProcs(procSearch).getProcsList(); + + assertEquals(1, foundProcs.size()); + assertThat(foundProcs.stream().map(proc -> hostManager.getVirtualProc(proc.getId()).hostName) + .collect(Collectors.toList())).containsOnly(FIRST_HOST); + } + + // TODO: test by duration range + + private void createHostsJobsAndProcs() { + createHosts(); + launchJobs(); + + DispatchHost firstHost = hostManager.findDispatchHost(FIRST_HOST); + DispatchHost secondHost = hostManager.findDispatchHost(SECOND_HOST); + JobDetail firstJob = jobManager.findJobDetail(FIRST_JOB); + JobDetail secondJob = jobManager.findJobDetail(SECOND_JOB); + + dispatcher.dispatchHost(firstHost, firstJob); + dispatcher.dispatchHost(secondHost, secondJob); + } + + private void launchJobs() { + ClassLoader classLoader = getClass().getClassLoader(); + jobLauncher.testMode = true; + File file = + new File(classLoader.getResource("conf/jobspec/jobspec_dispatch_test.xml").getFile()); + jobLauncher.launch(file); + } + + private RenderHost.Builder buildRenderHost() { + return RenderHost.newBuilder().setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) + .setNumProcs(1).setCoresPerProc(100).addTags("test").setState(HardwareState.UP) + .setFacility("spi").putAttributes("SP_OS", "Linux"); + } + + private void createHosts() { + RenderHost host1 = buildRenderHost().setName(FIRST_HOST).build(); + RenderHost host2 = buildRenderHost().setName(SECOND_HOST).build(); + + hostManager.createHost(host1, adminManager.findAllocationDetail("spi", "general")); + hostManager.createHost(host2, adminManager.findAllocationDetail("spi", "general")); + } + + private GroupDetail createGroup(Show show) { + GroupDetail newGroupDetail = new GroupDetail(); + newGroupDetail.name = NEW_GROUP_NAME; + newGroupDetail.showId = show.getId(); + groupManager.createGroup(newGroupDetail, null); + return groupManager + .getGroupDetail(whiteboardDao.findGroup(show.getName(), NEW_GROUP_NAME).getId()); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ActionDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ActionDaoTests.java index 71580beb6..ce40a5749 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ActionDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ActionDaoTests.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.test.dao.postgres; import javax.annotation.Resource; @@ -45,162 +42,157 @@ import static org.junit.Assert.assertEquals; - @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class ActionDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - ActionDao actionDao; - - @Resource - FilterDao filterDao; - - @Resource - ShowDao showDao; - - @Resource - GroupDao groupDao; - - @Resource - JobManager jobManager; - - private static String FILTER_NAME = "test_filter"; - - public ShowInterface getShow() { - return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); - } - - public FilterEntity buildFilter() { - FilterEntity filter = new FilterEntity(); - filter.name = FILTER_NAME; - filter.showId = "00000000-0000-0000-0000-000000000000"; - filter.type = FilterType.MATCH_ANY; - filter.enabled = true; - - return filter; - } - - @Test - @Transactional - @Rollback(true) - public void testCreateAction() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.PAUSE_JOB; - a1.filterId = f.getFilterId(); - a1.booleanValue = true; - a1.valueType = ActionValueType.BOOLEAN_TYPE; - actionDao.createAction(a1); - - ActionEntity a2 = new ActionEntity(); - a2.type = ActionType.MOVE_JOB_TO_GROUP; - a2.filterId = f.getFilterId(); - a2.groupValue = groupDao.getRootGroupId(getShow()); - a2.valueType = ActionValueType.GROUP_TYPE; - actionDao.createAction(a2); - - ActionEntity a3 = new ActionEntity(); - a3.type = ActionType.SET_JOB_MAX_CORES; - a3.filterId = f.getFilterId(); - a3.floatValue = 1f; - a3.valueType = ActionValueType.FLOAT_TYPE; - actionDao.createAction(a3); - - ActionEntity a4 = new ActionEntity(); - a4.type = ActionType.SET_JOB_MIN_CORES; - a4.filterId = f.getFilterId(); - a4.floatValue = 1; - a4.valueType = ActionValueType.FLOAT_TYPE; - actionDao.createAction(a4); - - ActionEntity a5 = new ActionEntity(); - a5.type = ActionType.STOP_PROCESSING; - a5.filterId = f.getFilterId(); - a5.valueType = ActionValueType.NONE_TYPE; - actionDao.createAction(a5); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteAction() { - - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a = new ActionEntity(); - a.type = ActionType.STOP_PROCESSING; - a.filterId = f.getFilterId(); - a.valueType = ActionValueType.NONE_TYPE; - actionDao.createAction(a); - actionDao.deleteAction(a); - } - - @Test - @Transactional - @Rollback(true) - public void testGetAction() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a = new ActionEntity(); - a.type = ActionType.STOP_PROCESSING; - a.filterId = f.getFilterId(); - a.valueType = ActionValueType.NONE_TYPE; - actionDao.createAction(a); - actionDao.getAction(a); - actionDao.getAction(a.getActionId()); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateAction() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a = new ActionEntity(); - a.type = ActionType.STOP_PROCESSING; - a.filterId = f.getFilterId(); - a.name = null; - a.valueType = ActionValueType.NONE_TYPE; - actionDao.createAction(a); - - a.floatValue = 1f; - a.type = ActionType.SET_JOB_MIN_CORES; - a.valueType = ActionValueType.FLOAT_TYPE; - - actionDao.updateAction(a); - - assertEquals(Integer.valueOf(1), - jdbcTemplate.queryForObject( - "SELECT float_value FROM action WHERE pk_action=?", - Integer.class, a.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetActions() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a = new ActionEntity(); - a.type = ActionType.STOP_PROCESSING; - a.filterId = f.getFilterId(); - a.name = null; - a.valueType = ActionValueType.NONE_TYPE; - actionDao.createAction(a); - - actionDao.getActions(f); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class ActionDaoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + ActionDao actionDao; + + @Resource + FilterDao filterDao; + + @Resource + ShowDao showDao; + + @Resource + GroupDao groupDao; + + @Resource + JobManager jobManager; + + private static String FILTER_NAME = "test_filter"; + + public ShowInterface getShow() { + return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); + } + + public FilterEntity buildFilter() { + FilterEntity filter = new FilterEntity(); + filter.name = FILTER_NAME; + filter.showId = "00000000-0000-0000-0000-000000000000"; + filter.type = FilterType.MATCH_ANY; + filter.enabled = true; + + return filter; + } + + @Test + @Transactional + @Rollback(true) + public void testCreateAction() { + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.PAUSE_JOB; + a1.filterId = f.getFilterId(); + a1.booleanValue = true; + a1.valueType = ActionValueType.BOOLEAN_TYPE; + actionDao.createAction(a1); + + ActionEntity a2 = new ActionEntity(); + a2.type = ActionType.MOVE_JOB_TO_GROUP; + a2.filterId = f.getFilterId(); + a2.groupValue = groupDao.getRootGroupId(getShow()); + a2.valueType = ActionValueType.GROUP_TYPE; + actionDao.createAction(a2); + + ActionEntity a3 = new ActionEntity(); + a3.type = ActionType.SET_JOB_MAX_CORES; + a3.filterId = f.getFilterId(); + a3.floatValue = 1f; + a3.valueType = ActionValueType.FLOAT_TYPE; + actionDao.createAction(a3); + + ActionEntity a4 = new ActionEntity(); + a4.type = ActionType.SET_JOB_MIN_CORES; + a4.filterId = f.getFilterId(); + a4.floatValue = 1; + a4.valueType = ActionValueType.FLOAT_TYPE; + actionDao.createAction(a4); + + ActionEntity a5 = new ActionEntity(); + a5.type = ActionType.STOP_PROCESSING; + a5.filterId = f.getFilterId(); + a5.valueType = ActionValueType.NONE_TYPE; + actionDao.createAction(a5); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteAction() { + + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a = new ActionEntity(); + a.type = ActionType.STOP_PROCESSING; + a.filterId = f.getFilterId(); + a.valueType = ActionValueType.NONE_TYPE; + actionDao.createAction(a); + actionDao.deleteAction(a); + } + + @Test + @Transactional + @Rollback(true) + public void testGetAction() { + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a = new ActionEntity(); + a.type = ActionType.STOP_PROCESSING; + a.filterId = f.getFilterId(); + a.valueType = ActionValueType.NONE_TYPE; + actionDao.createAction(a); + actionDao.getAction(a); + actionDao.getAction(a.getActionId()); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateAction() { + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a = new ActionEntity(); + a.type = ActionType.STOP_PROCESSING; + a.filterId = f.getFilterId(); + a.name = null; + a.valueType = ActionValueType.NONE_TYPE; + actionDao.createAction(a); + + a.floatValue = 1f; + a.type = ActionType.SET_JOB_MIN_CORES; + a.valueType = ActionValueType.FLOAT_TYPE; + + actionDao.updateAction(a); + + assertEquals(Integer.valueOf(1), jdbcTemplate + .queryForObject("SELECT float_value FROM action WHERE pk_action=?", Integer.class, a.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetActions() { + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a = new ActionEntity(); + a.type = ActionType.STOP_PROCESSING; + a.filterId = f.getFilterId(); + a.name = null; + a.valueType = ActionValueType.NONE_TYPE; + actionDao.createAction(a); + + actionDao.getActions(f); + } } - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/AllocationDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/AllocationDaoTests.java index bbc6914a4..f0f07a78e 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/AllocationDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/AllocationDaoTests.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.test.dao.postgres; import javax.annotation.Resource; @@ -43,165 +40,153 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; - @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class AllocationDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - AllocationDao allocDao; - - @Resource - FacilityDao facilityDao; - - @Resource - AdminManager adminManager; - - public static final String ALLOC_FQN = "spi.test_alloc"; - public static final String ALLOC_NAME = "test_alloc"; - public static final String ALLOC_TAG = "test"; - - private AllocationEntity alloc; - - @Before - public void before() { - - alloc = new AllocationEntity(); - alloc.name = ALLOC_NAME; - alloc.tag = ALLOC_TAG; - - allocDao.insertAllocation( - facilityDao.getFacility("spi"), alloc); - } - - @Test - @Transactional - @Rollback(true) - public void testGetAllocation() { - allocDao.getAllocationEntity(alloc.getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindAllocation() { - FacilityInterface f = facilityDao.getFacility("spi"); - allocDao.findAllocationEntity(f.getName(), ALLOC_NAME); - } - - @Test - @Transactional - @Rollback(true) - public void testFindAllocation2() { - FacilityInterface f = facilityDao.getFacility("spi"); - allocDao.findAllocationEntity(ALLOC_FQN); - } - - - @Test - @Transactional - @Rollback(true) - public void testDeleteAllocation() { - allocDao.deleteAllocation(alloc); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteAllocationWithProc() { - - // Use the alloc so deleting triggers it just to be disaled. - ShowEntity show = adminManager.getShowEntity( - "00000000-0000-0000-0000-000000000000"); - adminManager.createSubscription(show, alloc, 10, 10); - allocDao.deleteAllocation(alloc); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(1) FROM alloc WHERE pk_alloc=? AND b_enabled = false", - Integer.class, alloc.getAllocationId())); - - assertEquals(ALLOC_FQN, jdbcTemplate.queryForObject( - "SELECT str_name FROM alloc WHERE pk_alloc=? AND b_enabled = false", - String.class, alloc.getAllocationId())); - - // Now re-enable it. - allocDao.insertAllocation(facilityDao.getDefaultFacility(), alloc); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(1) FROM alloc WHERE pk_alloc=? AND b_enabled = true", - Integer.class, alloc.getAllocationId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateAllocationName() { - allocDao.updateAllocationName(alloc, "frickjack"); - assertEquals("spi.frickjack", jdbcTemplate.queryForObject( - "SELECT str_name FROM alloc WHERE pk_alloc=?", - String.class, - alloc.getId())); - } - - @Test(expected = IllegalArgumentException.class) - @Transactional - @Rollback(true) - public void testUpdateAllocationNameBad() { - allocDao.updateAllocationName(alloc, "spi.frickjack"); - assertEquals("spi.frickjack", jdbcTemplate.queryForObject( - "SELECT str_name FROM alloc WHERE pk_alloc=?", - String.class, alloc.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateAllocationTag() { - allocDao.updateAllocationTag(alloc, "foo"); - assertEquals("foo",jdbcTemplate.queryForObject( - "SELECT str_tag FROM alloc WHERE pk_alloc=?", - String.class, alloc.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateAllocationBillable() { - allocDao.updateAllocationBillable(alloc, false); - - assertFalse(jdbcTemplate.queryForObject( - "SELECT b_billable FROM alloc WHERE pk_alloc=?", - Boolean.class, alloc.getId())); - - allocDao.updateAllocationBillable(alloc, true); - - assertTrue(jdbcTemplate.queryForObject( - "SELECT b_billable FROM alloc WHERE pk_alloc=?", - Boolean.class, alloc.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testSetDefaultAllocation() { - AllocationEntity newAlloc = new AllocationEntity(); - newAlloc.name = "spi.new_alloc"; - newAlloc.tag = "new_alloc"; - allocDao.insertAllocation( - facilityDao.getFacility("spi"), newAlloc); - - allocDao.setDefaultAllocation(newAlloc); - AllocationEntity defaultAlloc = allocDao.getDefaultAllocationEntity(); - assertEquals(newAlloc.getAllocationId(), defaultAlloc.getAllocationId()); - assertEquals(newAlloc.name, defaultAlloc.name); - assertEquals(newAlloc.tag, defaultAlloc.tag); - assertEquals( - facilityDao.getFacility("spi").getFacilityId(), - defaultAlloc.getFacilityId()); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class AllocationDaoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + AllocationDao allocDao; + + @Resource + FacilityDao facilityDao; + + @Resource + AdminManager adminManager; + + public static final String ALLOC_FQN = "spi.test_alloc"; + public static final String ALLOC_NAME = "test_alloc"; + public static final String ALLOC_TAG = "test"; + + private AllocationEntity alloc; + + @Before + public void before() { + + alloc = new AllocationEntity(); + alloc.name = ALLOC_NAME; + alloc.tag = ALLOC_TAG; + + allocDao.insertAllocation(facilityDao.getFacility("spi"), alloc); + } + + @Test + @Transactional + @Rollback(true) + public void testGetAllocation() { + allocDao.getAllocationEntity(alloc.getId()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindAllocation() { + FacilityInterface f = facilityDao.getFacility("spi"); + allocDao.findAllocationEntity(f.getName(), ALLOC_NAME); + } + + @Test + @Transactional + @Rollback(true) + public void testFindAllocation2() { + FacilityInterface f = facilityDao.getFacility("spi"); + allocDao.findAllocationEntity(ALLOC_FQN); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteAllocation() { + allocDao.deleteAllocation(alloc); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteAllocationWithProc() { + + // Use the alloc so deleting triggers it just to be disaled. + ShowEntity show = adminManager.getShowEntity("00000000-0000-0000-0000-000000000000"); + adminManager.createSubscription(show, alloc, 10, 10); + allocDao.deleteAllocation(alloc); + + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject( + "SELECT COUNT(1) FROM alloc WHERE pk_alloc=? AND b_enabled = false", Integer.class, + alloc.getAllocationId())); + + assertEquals(ALLOC_FQN, + jdbcTemplate.queryForObject( + "SELECT str_name FROM alloc WHERE pk_alloc=? AND b_enabled = false", String.class, + alloc.getAllocationId())); + + // Now re-enable it. + allocDao.insertAllocation(facilityDao.getDefaultFacility(), alloc); + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject( + "SELECT COUNT(1) FROM alloc WHERE pk_alloc=? AND b_enabled = true", Integer.class, + alloc.getAllocationId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateAllocationName() { + allocDao.updateAllocationName(alloc, "frickjack"); + assertEquals("spi.frickjack", jdbcTemplate.queryForObject( + "SELECT str_name FROM alloc WHERE pk_alloc=?", String.class, alloc.getId())); + } + + @Test(expected = IllegalArgumentException.class) + @Transactional + @Rollback(true) + public void testUpdateAllocationNameBad() { + allocDao.updateAllocationName(alloc, "spi.frickjack"); + assertEquals("spi.frickjack", jdbcTemplate.queryForObject( + "SELECT str_name FROM alloc WHERE pk_alloc=?", String.class, alloc.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateAllocationTag() { + allocDao.updateAllocationTag(alloc, "foo"); + assertEquals("foo", jdbcTemplate.queryForObject("SELECT str_tag FROM alloc WHERE pk_alloc=?", + String.class, alloc.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateAllocationBillable() { + allocDao.updateAllocationBillable(alloc, false); + + assertFalse(jdbcTemplate.queryForObject("SELECT b_billable FROM alloc WHERE pk_alloc=?", + Boolean.class, alloc.getId())); + + allocDao.updateAllocationBillable(alloc, true); + + assertTrue(jdbcTemplate.queryForObject("SELECT b_billable FROM alloc WHERE pk_alloc=?", + Boolean.class, alloc.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void testSetDefaultAllocation() { + AllocationEntity newAlloc = new AllocationEntity(); + newAlloc.name = "spi.new_alloc"; + newAlloc.tag = "new_alloc"; + allocDao.insertAllocation(facilityDao.getFacility("spi"), newAlloc); + + allocDao.setDefaultAllocation(newAlloc); + AllocationEntity defaultAlloc = allocDao.getDefaultAllocationEntity(); + assertEquals(newAlloc.getAllocationId(), defaultAlloc.getAllocationId()); + assertEquals(newAlloc.name, defaultAlloc.name); + assertEquals(newAlloc.tag, defaultAlloc.tag); + assertEquals(facilityDao.getFacility("spi").getFacilityId(), defaultAlloc.getFacilityId()); + } } - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/BookingDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/BookingDaoTests.java index 577b53eac..fe173ba3d 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/BookingDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/BookingDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import java.io.File; @@ -56,402 +52,357 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; - @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class BookingDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - JobManager jobManager; - - @Resource - HostDao hostDao; - - @Resource - BookingDao bookingDao; - - @Resource - DispatcherDao dispatcherDao; - - @Resource - ProcDao procDao; - - @Resource - Whiteboard whiteboard; - - public DispatchHost createHost() { - RenderHost host = RenderHost.newBuilder() - .setName("test_host") - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem((int) CueUtil.GB16) - .setTotalSwap((int) CueUtil.GB16) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(100) - .setState(HardwareState.UP) - .setFacility("spi") - .addTags("general") - .setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512) - .build(); - DispatchHost dh = hostManager.createHost(host); - hostManager.setAllocation(dh, - adminManager.findAllocationDetail("spi", "general")); - - return dh; - } - - public JobDetail launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail d = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - jobManager.setJobPaused(d, false); - return d; - } - - @Test - @Transactional - @Rollback(true) - public void insertLocalJobAssignment() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setMaxGpuMemory(1); - lja.setThreads(2); - - bookingDao.insertLocalHostAssignment(h, j, lja); - - - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT int_threads FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_cores_max FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( - "SELECT int_mem_max FROM host_local WHERE pk_job=?", - Long.class, j.getJobId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_cores_idle FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( - "SELECT int_mem_idle FROM host_local WHERE pk_job=?", - Long.class, j.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void insertLocalLayerAssignment() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - LayerInterface layer = jobManager.getLayers(j).get(0); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setMaxGpuMemory(1); - lja.setThreads(2); - - bookingDao.insertLocalHostAssignment(h, layer, lja); - - assertEquals(layer.getLayerId(), jdbcTemplate.queryForObject( - "SELECT pk_layer FROM host_local WHERE pk_host_local=?", - String.class, lja.getId())); - - assertEquals(RenderPartitionType.LAYER_PARTITION.toString(), - jdbcTemplate.queryForObject( - "SELECT str_type FROM host_local WHERE pk_host_local=?", - String.class, lja.getId())); - - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT int_threads FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_cores_max FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( - "SELECT int_mem_max FROM host_local WHERE pk_job=?", - Long.class, j.getJobId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_cores_idle FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( - "SELECT int_mem_idle FROM host_local WHERE pk_job=?", - Long.class, j.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void insertLocalFrameAssignment() { +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class BookingDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - DispatchHost h = createHost(); - JobDetail j = launchJob(); - LayerInterface layer = jobManager.getLayers(j).get(0); - FrameInterface frame = jobManager.findFrame(layer, 1); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setMaxGpuMemory(1); - lja.setThreads(2); - - bookingDao.insertLocalHostAssignment(h, frame, lja); - - assertEquals(frame.getFrameId(), jdbcTemplate.queryForObject( - "SELECT pk_frame FROM host_local WHERE pk_host_local=?", - String.class, lja.getId())); - - assertEquals(RenderPartitionType.FRAME_PARTITION.toString(), - jdbcTemplate.queryForObject( - "SELECT str_type FROM host_local WHERE pk_host_local=?", - String.class, lja.getId())); - - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT int_threads FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_cores_max FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( - "SELECT int_mem_max FROM host_local WHERE pk_job=?", - Long.class, j.getJobId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_cores_idle FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( - "SELECT int_mem_idle FROM host_local WHERE pk_job=?", - Long.class, j.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLocalJobAssignment() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - lja.setMaxGpuMemory(1); - - bookingDao.insertLocalHostAssignment(h, j, lja); - - LocalHostAssignment lja2 = bookingDao.getLocalJobAssignment(h.getHostId(), - j.getJobId()); - - assertEquals(lja.getMaxCoreUnits(), lja2.getMaxCoreUnits()); - assertEquals(lja.getMaxMemory(), lja2.getMaxMemory()); - assertEquals(lja.getMaxGpuMemory(), lja2.getMaxGpuMemory()); - assertEquals(lja.getThreads(), lja2.getThreads()); - - } - - @Test - @Transactional - @Rollback(true) - public void testGetRenderPartition() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - lja.setMaxGpuMemory(1); + @Resource + HostManager hostManager; - bookingDao.insertLocalHostAssignment(h, j, lja); + @Resource + AdminManager adminManager; - LocalHostAssignment lja2 = bookingDao.getLocalJobAssignment(h.getHostId(), - j.getJobId()); + @Resource + JobLauncher jobLauncher; - assertEquals(lja.getMaxCoreUnits(), lja2.getMaxCoreUnits()); - assertEquals(lja.getMaxMemory(), lja2.getMaxMemory()); - assertEquals(lja.getThreads(), lja2.getThreads()); - assertEquals(lja.getMaxGpuMemory(), lja2.getMaxGpuMemory()); + @Resource + JobManager jobManager; - RenderPartition rp = whiteboard.getRenderPartition(lja2); + @Resource + HostDao hostDao; - assertEquals(lja2.getMaxCoreUnits(), rp.getMaxCores()); - assertEquals(lja2.getMaxMemory(), rp.getMaxMemory()); - assertEquals(lja2.getThreads(), rp.getThreads()); - logger.info("--------------------"); - logger.info(lja2.getMaxGpuMemory()); - logger.info(rp.getMaxGpuMemory()); - assertEquals(lja2.getMaxGpuMemory(), rp.getMaxGpuMemory()); - assertEquals(h.getName(), rp.getHost()); - assertEquals(j.getName(), rp.getJob()); - } + @Resource + BookingDao bookingDao; - @Test - @Transactional - @Rollback(true) - public void testGetProcs() { + @Resource + DispatcherDao dispatcherDao; - DispatchHost h = createHost(); - JobDetail j = launchJob(); + @Resource + ProcDao procDao; - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - lja.setMaxGpuMemory(1); + @Resource + Whiteboard whiteboard; - bookingDao.insertLocalHostAssignment(h, j, lja); + public DispatchHost createHost() { + RenderHost host = RenderHost.newBuilder().setName("test_host").setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16).setTotalSwap((int) CueUtil.GB16) + .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(100).setState(HardwareState.UP) + .setFacility("spi").addTags("general").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); + DispatchHost dh = hostManager.createHost(host); + hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); - assertEquals(0, procDao.findVirtualProcs(lja).size()); - } + return dh; + } - @Test - @Transactional - @Rollback(true) - public void updateMaxCores() { + public JobDetail launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail d = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + jobManager.setJobPaused(d, false); + return d; + } - DispatchHost h = createHost(); - JobDetail j = launchJob(); + @Test + @Transactional + @Rollback(true) + public void insertLocalJobAssignment() { - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - lja.setMaxGpuMemory(1); + DispatchHost h = createHost(); + JobDetail j = launchJob(); - bookingDao.insertLocalHostAssignment(h, j, lja); - assertTrue(bookingDao.updateMaxCores(lja, 100)); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_cores_max FROM host_local WHERE pk_host=?", - Integer.class, h.getHostId())); + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setMaxGpuMemory(1); + lja.setThreads(2); - LocalHostAssignment lj2 = bookingDao.getLocalJobAssignment(lja.id); + bookingDao.insertLocalHostAssignment(h, j, lja); - assertEquals(100, lj2.getIdleCoreUnits()); - assertEquals(100, lj2.getMaxCoreUnits()); + assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( + "SELECT int_threads FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); - bookingDao.updateMaxCores(lja, 200); + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); - lj2 = bookingDao.getLocalJobAssignment(lja.id); + assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( + "SELECT int_cores_max FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); - assertEquals(200, lj2.getIdleCoreUnits()); - assertEquals(200, lj2.getMaxCoreUnits()); - } + assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( + "SELECT int_mem_max FROM host_local WHERE pk_job=?", Long.class, j.getJobId())); - @Test - @Transactional - @Rollback(true) - public void updateMaxMemory() { + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); - DispatchHost h = createHost(); - JobDetail j = launchJob(); + assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( + "SELECT int_cores_idle FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - lja.setMaxGpuMemory(1); + assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( + "SELECT int_mem_idle FROM host_local WHERE pk_job=?", Long.class, j.getJobId())); + } - bookingDao.insertLocalHostAssignment(h, j, lja); - bookingDao.updateMaxMemory(lja, CueUtil.GB2); + @Test + @Transactional + @Rollback(true) + public void insertLocalLayerAssignment() { - LocalHostAssignment lj2 = bookingDao.getLocalJobAssignment(lja.id); + DispatchHost h = createHost(); + JobDetail j = launchJob(); + LayerInterface layer = jobManager.getLayers(j).get(0); - assertEquals(CueUtil.GB2, lj2.getIdleMemory()); - assertEquals(CueUtil.GB2, lj2.getMaxMemory()); + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setMaxGpuMemory(1); + lja.setThreads(2); - bookingDao.updateMaxMemory(lja, CueUtil.GB4); + bookingDao.insertLocalHostAssignment(h, layer, lja); - lj2 = bookingDao.getLocalJobAssignment(lja.id); + assertEquals(layer.getLayerId(), jdbcTemplate.queryForObject( + "SELECT pk_layer FROM host_local WHERE pk_host_local=?", String.class, lja.getId())); - assertEquals(CueUtil.GB4, lj2.getIdleMemory()); - assertEquals(CueUtil.GB4, lj2.getMaxMemory()); -} + assertEquals(RenderPartitionType.LAYER_PARTITION.toString(), jdbcTemplate.queryForObject( + "SELECT str_type FROM host_local WHERE pk_host_local=?", String.class, lja.getId())); - @Test - @Transactional - @Rollback(true) - public void updateMaxGpuMemory() { + assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( + "SELECT int_threads FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); - DispatchHost h = createHost(); - JobDetail j = launchJob(); + assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( + "SELECT int_cores_max FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - lja.setMaxGpuMemory(1); + assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( + "SELECT int_mem_max FROM host_local WHERE pk_job=?", Long.class, j.getJobId())); - bookingDao.insertLocalHostAssignment(h, j, lja); - bookingDao.updateMaxMemory(lja, CueUtil.GB2); + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); - LocalHostAssignment lj2 = bookingDao.getLocalJobAssignment(lja.id); + assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( + "SELECT int_cores_idle FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); - assertEquals(CueUtil.GB2, lj2.getIdleMemory()); - assertEquals(CueUtil.GB2, lj2.getMaxMemory()); - assertEquals(1, lj2.getMaxGpuMemory()); + assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( + "SELECT int_mem_idle FROM host_local WHERE pk_job=?", Long.class, j.getJobId())); + } - bookingDao.updateMaxGpuMemory(lja, 2); + @Test + @Transactional + @Rollback(true) + public void insertLocalFrameAssignment() { - lj2 = bookingDao.getLocalJobAssignment(lja.id); + DispatchHost h = createHost(); + JobDetail j = launchJob(); + LayerInterface layer = jobManager.getLayers(j).get(0); + FrameInterface frame = jobManager.findFrame(layer, 1); - assertEquals(CueUtil.GB2, lj2.getIdleMemory()); - assertEquals(CueUtil.GB2, lj2.getMaxMemory()); - assertEquals(2, lj2.getMaxGpuMemory()); - } -} + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setMaxGpuMemory(1); + lja.setThreads(2); + + bookingDao.insertLocalHostAssignment(h, frame, lja); + + assertEquals(frame.getFrameId(), jdbcTemplate.queryForObject( + "SELECT pk_frame FROM host_local WHERE pk_host_local=?", String.class, lja.getId())); + + assertEquals(RenderPartitionType.FRAME_PARTITION.toString(), jdbcTemplate.queryForObject( + "SELECT str_type FROM host_local WHERE pk_host_local=?", String.class, lja.getId())); + + assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( + "SELECT int_threads FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); + + assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( + "SELECT int_cores_max FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); + + assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( + "SELECT int_mem_max FROM host_local WHERE pk_job=?", Long.class, j.getJobId())); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); + + assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( + "SELECT int_cores_idle FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); + + assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( + "SELECT int_mem_idle FROM host_local WHERE pk_job=?", Long.class, j.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLocalJobAssignment() { + + DispatchHost h = createHost(); + JobDetail j = launchJob(); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + lja.setMaxGpuMemory(1); + + bookingDao.insertLocalHostAssignment(h, j, lja); + + LocalHostAssignment lja2 = bookingDao.getLocalJobAssignment(h.getHostId(), j.getJobId()); + + assertEquals(lja.getMaxCoreUnits(), lja2.getMaxCoreUnits()); + assertEquals(lja.getMaxMemory(), lja2.getMaxMemory()); + assertEquals(lja.getMaxGpuMemory(), lja2.getMaxGpuMemory()); + assertEquals(lja.getThreads(), lja2.getThreads()); + + } + + @Test + @Transactional + @Rollback(true) + public void testGetRenderPartition() { + + DispatchHost h = createHost(); + JobDetail j = launchJob(); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + lja.setMaxGpuMemory(1); + + bookingDao.insertLocalHostAssignment(h, j, lja); + + LocalHostAssignment lja2 = bookingDao.getLocalJobAssignment(h.getHostId(), j.getJobId()); + + assertEquals(lja.getMaxCoreUnits(), lja2.getMaxCoreUnits()); + assertEquals(lja.getMaxMemory(), lja2.getMaxMemory()); + assertEquals(lja.getThreads(), lja2.getThreads()); + assertEquals(lja.getMaxGpuMemory(), lja2.getMaxGpuMemory()); + + RenderPartition rp = whiteboard.getRenderPartition(lja2); + + assertEquals(lja2.getMaxCoreUnits(), rp.getMaxCores()); + assertEquals(lja2.getMaxMemory(), rp.getMaxMemory()); + assertEquals(lja2.getThreads(), rp.getThreads()); + logger.info("--------------------"); + logger.info(lja2.getMaxGpuMemory()); + logger.info(rp.getMaxGpuMemory()); + assertEquals(lja2.getMaxGpuMemory(), rp.getMaxGpuMemory()); + assertEquals(h.getName(), rp.getHost()); + assertEquals(j.getName(), rp.getJob()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetProcs() { + + DispatchHost h = createHost(); + JobDetail j = launchJob(); + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + lja.setMaxGpuMemory(1); + + bookingDao.insertLocalHostAssignment(h, j, lja); + + assertEquals(0, procDao.findVirtualProcs(lja).size()); + } + + @Test + @Transactional + @Rollback(true) + public void updateMaxCores() { + + DispatchHost h = createHost(); + JobDetail j = launchJob(); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + lja.setMaxGpuMemory(1); + + bookingDao.insertLocalHostAssignment(h, j, lja); + assertTrue(bookingDao.updateMaxCores(lja, 100)); + assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( + "SELECT int_cores_max FROM host_local WHERE pk_host=?", Integer.class, h.getHostId())); + + LocalHostAssignment lj2 = bookingDao.getLocalJobAssignment(lja.id); + + assertEquals(100, lj2.getIdleCoreUnits()); + assertEquals(100, lj2.getMaxCoreUnits()); + + bookingDao.updateMaxCores(lja, 200); + + lj2 = bookingDao.getLocalJobAssignment(lja.id); + + assertEquals(200, lj2.getIdleCoreUnits()); + assertEquals(200, lj2.getMaxCoreUnits()); + } + + @Test + @Transactional + @Rollback(true) + public void updateMaxMemory() { + + DispatchHost h = createHost(); + JobDetail j = launchJob(); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + lja.setMaxGpuMemory(1); + + bookingDao.insertLocalHostAssignment(h, j, lja); + bookingDao.updateMaxMemory(lja, CueUtil.GB2); + + LocalHostAssignment lj2 = bookingDao.getLocalJobAssignment(lja.id); + + assertEquals(CueUtil.GB2, lj2.getIdleMemory()); + assertEquals(CueUtil.GB2, lj2.getMaxMemory()); + + bookingDao.updateMaxMemory(lja, CueUtil.GB4); + + lj2 = bookingDao.getLocalJobAssignment(lja.id); + + assertEquals(CueUtil.GB4, lj2.getIdleMemory()); + assertEquals(CueUtil.GB4, lj2.getMaxMemory()); + } + + @Test + @Transactional + @Rollback(true) + public void updateMaxGpuMemory() { + + DispatchHost h = createHost(); + JobDetail j = launchJob(); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + lja.setMaxGpuMemory(1); + + bookingDao.insertLocalHostAssignment(h, j, lja); + bookingDao.updateMaxMemory(lja, CueUtil.GB2); + + LocalHostAssignment lj2 = bookingDao.getLocalJobAssignment(lja.id); + + assertEquals(CueUtil.GB2, lj2.getIdleMemory()); + assertEquals(CueUtil.GB2, lj2.getMaxMemory()); + assertEquals(1, lj2.getMaxGpuMemory()); + + bookingDao.updateMaxGpuMemory(lja, 2); + + lj2 = bookingDao.getLocalJobAssignment(lja.id); + + assertEquals(CueUtil.GB2, lj2.getIdleMemory()); + assertEquals(CueUtil.GB2, lj2.getMaxMemory()); + assertEquals(2, lj2.getMaxGpuMemory()); + } +} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/CommentDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/CommentDaoTests.java index 9282d7b79..5bdfd6544 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/CommentDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/CommentDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import java.io.File; @@ -49,191 +45,177 @@ import static org.junit.Assert.assertNotNull; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class CommentDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - CommentDao commentDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - HostManager hostManager; - - @Before - public void testMode() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteComment() { - - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; - - commentDao.insertComment(job, d); - commentDao.deleteComment(d.getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetComment() { +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class CommentDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; + @Resource + CommentDao commentDao; - commentDao.insertComment(job, d); + @Resource + JobManager jobManager; - CommentDetail nd = commentDao.getCommentDetail(d.getId()); + @Resource + JobLauncher jobLauncher; - assertEquals(d.message,nd.message); - assertEquals(d.subject,nd.subject); - assertEquals(d.user,nd.user); - } + @Resource + HostManager hostManager; - @Test - @Transactional - @Rollback(true) - public void testInsertCommentOnJob() { + @Before + public void testMode() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + } - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + @Test + @Transactional + @Rollback(true) + public void testDeleteComment() { - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - commentDao.insertComment(job, d); + CommentDetail d = new CommentDetail(); + d.message = "a message"; + d.subject = "a subject"; + d.user = "user"; - CommentDetail nd = commentDao.getCommentDetail(d.getId()); + commentDao.insertComment(job, d); + commentDao.deleteComment(d.getId()); + } - assertEquals(d.message,nd.message); - assertEquals(d.subject,nd.subject); - assertEquals(d.user,nd.user); - } + @Test + @Transactional + @Rollback(true) + public void testGetComment() { - @Test - @Transactional - @Rollback(true) - public void testInsertCommentOnHost() { - - RenderHost host = RenderHost.newBuilder() - .setName("boo") - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(15290520) - .setFreeSwap(2076) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem(15290520) - .setTotalSwap(2096) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(400) - .addTags("linux") - .setState(HardwareState.UP) - .setFacility("spi") - .setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512) - .build(); - - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; - - DispatchHost h = hostManager.createHost(host); - commentDao.insertComment(h, d); - - assertNotNull(d.id); - - CommentDetail nd = commentDao.getCommentDetail(d.getId()); - - assertEquals(d.message,nd.message); - assertEquals(d.subject,nd.subject); - assertEquals(d.user,nd.user); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateComment() { - - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; - - commentDao.insertComment(job, d); - - d.message = "no"; - d.subject = "no"; - - commentDao.updateComment(d); - - CommentDetail nd = commentDao.getCommentDetail(d.getId()); - - assertEquals("no",nd.message); - assertEquals("no",nd.subject); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateCommentMessage() { - - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; - - commentDao.insertComment(job, d); - commentDao.updateCommentMessage(d.getId(), "no"); - CommentDetail nd = commentDao.getCommentDetail(d.getId()); - assertEquals("no",nd.message); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateCommentSubject() { - - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; - - commentDao.insertComment(job, d); - commentDao.updateCommentSubject(d.getId(), "no"); - CommentDetail nd = commentDao.getCommentDetail(d.getId()); - assertEquals("no",nd.subject); - } -} + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + + CommentDetail d = new CommentDetail(); + d.message = "a message"; + d.subject = "a subject"; + d.user = "user"; + + commentDao.insertComment(job, d); + + CommentDetail nd = commentDao.getCommentDetail(d.getId()); + + assertEquals(d.message, nd.message); + assertEquals(d.subject, nd.subject); + assertEquals(d.user, nd.user); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertCommentOnJob() { + + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + + CommentDetail d = new CommentDetail(); + d.message = "a message"; + d.subject = "a subject"; + d.user = "user"; + + commentDao.insertComment(job, d); + + CommentDetail nd = commentDao.getCommentDetail(d.getId()); + + assertEquals(d.message, nd.message); + assertEquals(d.subject, nd.subject); + assertEquals(d.user, nd.user); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertCommentOnHost() { + + RenderHost host = RenderHost.newBuilder().setName("boo").setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(15290520).setFreeSwap(2076).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem(15290520).setTotalSwap(2096).setNimbyEnabled(false) + .setNumProcs(2).setCoresPerProc(400).addTags("linux").setState(HardwareState.UP) + .setFacility("spi").setFreeGpuMem((int) CueUtil.MB512).setTotalGpuMem((int) CueUtil.MB512) + .build(); + + CommentDetail d = new CommentDetail(); + d.message = "a message"; + d.subject = "a subject"; + d.user = "user"; + + DispatchHost h = hostManager.createHost(host); + commentDao.insertComment(h, d); + + assertNotNull(d.id); + CommentDetail nd = commentDao.getCommentDetail(d.getId()); + + assertEquals(d.message, nd.message); + assertEquals(d.subject, nd.subject); + assertEquals(d.user, nd.user); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateComment() { + + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + + CommentDetail d = new CommentDetail(); + d.message = "a message"; + d.subject = "a subject"; + d.user = "user"; + + commentDao.insertComment(job, d); + + d.message = "no"; + d.subject = "no"; + + commentDao.updateComment(d); + + CommentDetail nd = commentDao.getCommentDetail(d.getId()); + + assertEquals("no", nd.message); + assertEquals("no", nd.subject); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateCommentMessage() { + + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + + CommentDetail d = new CommentDetail(); + d.message = "a message"; + d.subject = "a subject"; + d.user = "user"; + + commentDao.insertComment(job, d); + commentDao.updateCommentMessage(d.getId(), "no"); + CommentDetail nd = commentDao.getCommentDetail(d.getId()); + assertEquals("no", nd.message); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateCommentSubject() { + + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + + CommentDetail d = new CommentDetail(); + d.message = "a message"; + d.subject = "a subject"; + d.user = "user"; + + commentDao.insertComment(job, d); + commentDao.updateCommentSubject(d.getId(), "no"); + CommentDetail nd = commentDao.getCommentDetail(d.getId()); + assertEquals("no", nd.subject); + } +} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DeedDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DeedDaoTests.java index 962b669bb..2ea1d215d 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DeedDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DeedDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import javax.annotation.Resource; @@ -49,122 +45,104 @@ import static org.junit.Assert.assertTrue; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class DeedDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - OwnerManager ownerManager; - - @Resource - DeedDao deedDao; - - @Resource - AdminManager adminManager; - - @Resource - HostManager hostManager; - - public DispatchHost createHost() { - - RenderHost host = RenderHost.newBuilder() - .setName("test_host") - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(15290520) - .setFreeSwap(2076) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem((int) CueUtil.GB16) - .setTotalSwap((int) CueUtil.GB16) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(100) - .addTags("general") - .setState(HardwareState.UP) - .setFacility("spi") - .setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512) - .build(); - - DispatchHost dh = hostManager.createHost(host); - hostManager.setAllocation(dh, - adminManager.findAllocationDetail("spi", "general")); - - return dh; - } - - @Test - @Transactional - @Rollback(true) - public void testInsertDeed() { - - DispatchHost host = createHost(); - ShowInterface s = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("squarepants", s); - DeedEntity d = deedDao.insertDeed(o, host); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(1) FROM deed WHERE pk_deed=?", - Integer.class, d.getId())); - - assertEquals(host.getName(), d.host); - } - - @Test - @Transactional - @Rollback(true) - public void tesDeleteDeed() { - - DispatchHost host = createHost(); - ShowInterface s = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("squarepants", s); - DeedEntity d = deedDao.insertDeed(o, host); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(1) FROM deed WHERE pk_deed=?", - Integer.class, d.getId())); - - assertTrue(deedDao.deleteDeed(d)); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(1) FROM deed WHERE pk_deed=?", - Integer.class, d.getId())); - - assertFalse(deedDao.deleteDeed(d)); - } - - @Test - @Transactional - @Rollback(true) - public void tesGetDeed() { - - DispatchHost host = createHost(); - ShowInterface s = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("squarepants", s); - DeedEntity d = deedDao.insertDeed(o, host); - - DeedEntity d2 = deedDao.getDeed(d.id); - - assertEquals(d, d2); - } - - @Test - @Transactional - @Rollback(true) - public void tesGetDeeds() { - - DispatchHost host = createHost(); - ShowInterface s = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("squarepants", s); - DeedEntity d = deedDao.insertDeed(o, host); - - assertEquals(1, deedDao.getDeeds(o).size()); - assertEquals(d, deedDao.getDeeds(o).get(0)); - } -} +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class DeedDaoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + OwnerManager ownerManager; + + @Resource + DeedDao deedDao; + + @Resource + AdminManager adminManager; + + @Resource + HostManager hostManager; + + public DispatchHost createHost() { + + RenderHost host = RenderHost.newBuilder().setName("test_host").setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(15290520).setFreeSwap(2076).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16).setTotalSwap((int) CueUtil.GB16) + .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(100).addTags("general") + .setState(HardwareState.UP).setFacility("spi").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); + + DispatchHost dh = hostManager.createHost(host); + hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); + + return dh; + } + + @Test + @Transactional + @Rollback(true) + public void testInsertDeed() { + + DispatchHost host = createHost(); + ShowInterface s = adminManager.findShowEntity("pipe"); + OwnerEntity o = ownerManager.createOwner("squarepants", s); + DeedEntity d = deedDao.insertDeed(o, host); + + assertEquals(Integer.valueOf(1), jdbcTemplate + .queryForObject("SELECT COUNT(1) FROM deed WHERE pk_deed=?", Integer.class, d.getId())); + assertEquals(host.getName(), d.host); + } + + @Test + @Transactional + @Rollback(true) + public void tesDeleteDeed() { + + DispatchHost host = createHost(); + ShowInterface s = adminManager.findShowEntity("pipe"); + OwnerEntity o = ownerManager.createOwner("squarepants", s); + DeedEntity d = deedDao.insertDeed(o, host); + + assertEquals(Integer.valueOf(1), jdbcTemplate + .queryForObject("SELECT COUNT(1) FROM deed WHERE pk_deed=?", Integer.class, d.getId())); + + assertTrue(deedDao.deleteDeed(d)); + + assertEquals(Integer.valueOf(0), jdbcTemplate + .queryForObject("SELECT COUNT(1) FROM deed WHERE pk_deed=?", Integer.class, d.getId())); + + assertFalse(deedDao.deleteDeed(d)); + } + + @Test + @Transactional + @Rollback(true) + public void tesGetDeed() { + + DispatchHost host = createHost(); + ShowInterface s = adminManager.findShowEntity("pipe"); + OwnerEntity o = ownerManager.createOwner("squarepants", s); + DeedEntity d = deedDao.insertDeed(o, host); + + DeedEntity d2 = deedDao.getDeed(d.id); + + assertEquals(d, d2); + } + + @Test + @Transactional + @Rollback(true) + public void tesGetDeeds() { + + DispatchHost host = createHost(); + ShowInterface s = adminManager.findShowEntity("pipe"); + OwnerEntity o = ownerManager.createOwner("squarepants", s); + DeedEntity d = deedDao.insertDeed(o, host); + + assertEquals(1, deedDao.getDeeds(o).size()); + assertEquals(d, deedDao.getDeeds(o).get(0)); + } +} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DepartmentDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DepartmentDaoTests.java index afd60adb5..b11af87dd 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DepartmentDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DepartmentDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import javax.annotation.Resource; @@ -40,73 +36,71 @@ import static org.junit.Assert.assertTrue; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class DepartmentDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - DepartmentDao departmentDao; - - @Resource - AdminManager adminManager; - - - @Test - @Transactional - @Rollback(true) - public void testGetDepartment() { - String dept= "AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"; - assertEquals(dept, departmentDao.getDepartment(dept).getId()); - assertEquals(dept, departmentDao.getDepartment(dept).getDepartmentId()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindDepartment() { - String dept= "Hair"; - assertEquals(dept, departmentDao.findDepartment(dept).getName()); - } - - @Test - @Transactional - @Rollback(true) - public void testgetDefaultDepartment() { - assertEquals(jdbcTemplate.queryForObject( - "SELECT pk_dept FROM dept WHERE b_default=true", - String.class),departmentDao.getDefaultDepartment().getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testDepartmentExists() { - String dept= "Cloth"; - assertTrue(departmentDao.departmentExists(dept)); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertDepartment() { - String deptName = "TestDept"; - departmentDao.insertDepartment(deptName); - DepartmentInterface d = departmentDao.findDepartment(deptName); - assertEquals(d.getName(), deptName); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteDepartment() { - String deptName = "TestDept"; - departmentDao.insertDepartment(deptName); - DepartmentInterface d = departmentDao.findDepartment(deptName); - assertEquals(d.getName(), deptName); - departmentDao.deleteDepartment(d); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class DepartmentDaoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + DepartmentDao departmentDao; + + @Resource + AdminManager adminManager; + + @Test + @Transactional + @Rollback(true) + public void testGetDepartment() { + String dept = "AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"; + assertEquals(dept, departmentDao.getDepartment(dept).getId()); + assertEquals(dept, departmentDao.getDepartment(dept).getDepartmentId()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindDepartment() { + String dept = "Hair"; + assertEquals(dept, departmentDao.findDepartment(dept).getName()); + } + + @Test + @Transactional + @Rollback(true) + public void testgetDefaultDepartment() { + assertEquals( + jdbcTemplate.queryForObject("SELECT pk_dept FROM dept WHERE b_default=true", String.class), + departmentDao.getDefaultDepartment().getId()); + } + + @Test + @Transactional + @Rollback(true) + public void testDepartmentExists() { + String dept = "Cloth"; + assertTrue(departmentDao.departmentExists(dept)); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertDepartment() { + String deptName = "TestDept"; + departmentDao.insertDepartment(deptName); + DepartmentInterface d = departmentDao.findDepartment(deptName); + assertEquals(d.getName(), deptName); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteDepartment() { + String deptName = "TestDept"; + departmentDao.insertDepartment(deptName); + DepartmentInterface d = departmentDao.findDepartment(deptName); + assertEquals(d.getName(), deptName); + departmentDao.deleteDepartment(d); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DependDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DependDaoTests.java index d7cb39c2d..698f162ce 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DependDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DependDaoTests.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.test.dao.postgres; import java.io.File; @@ -62,394 +59,391 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; - @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class DependDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - DependDao dependDao; - - @Resource - FrameDao frameDao; - - @Resource - LayerDao layerDao; - - @Resource - JobManager jobManager; - - @Resource - DependManager dependManager; - - @Resource - JobManagerSupport jobManagerSupport; - - @Resource - JobLauncher jobLauncher; - - @Before - public void launchTestJobs() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_depend_test.xml")); - } - - public JobDetail getJobA() { - return jobManager.findJobDetail("pipe-dev.cue-testuser_depend_test_a"); - } - - public JobDetail getJobB() { - return jobManager.findJobDetail("pipe-dev.cue-testuser_depend_test_b"); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertJobOnJob() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - JobOnJob depend = new JobOnJob(job_a, job_b); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.JOB_ON_JOB, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertJobOnLayer() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - LayerInterface layer = layerDao.findLayer(job_b, "pass_1"); - JobOnLayer depend = new JobOnLayer(job_a, layer); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.JOB_ON_LAYER, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertJobOnFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); - JobOnFrame depend = new JobOnFrame(job_a, frame); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.JOB_ON_FRAME, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerOnJob() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer = layerDao.findLayer(job_b, "pass_1"); - - LayerOnJob depend = new LayerOnJob(layer, job_a); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.LAYER_ON_JOB, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerOnLayer() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - - LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.LAYER_ON_LAYER, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerOnFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer = layerDao.findLayer(job_a, "pass_1"); - FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); - - LayerOnFrame depend = new LayerOnFrame(layer, frame); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.LAYER_ON_FRAME, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertFrameOnJob() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); - - FrameOnJob depend = new FrameOnJob(frame, job_a); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.FRAME_ON_JOB, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertFrameOnLayer() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer = layerDao.findLayer(job_a, "pass_1"); - FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); - - FrameOnLayer depend = new FrameOnLayer(frame,layer); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.FRAME_ON_LAYER, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertFrameOnFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); - FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); - - FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.FRAME_ON_FRAME, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertFrameByFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - - FrameByFrame depend = new FrameByFrame(layer_a, layer_b); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.FRAME_BY_FRAME, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertPreviousFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - - PreviousFrame depend = new PreviousFrame(layer_a, layer_b); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.PREVIOUS_FRAME, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testReinsertFrameOnFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); - FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); - - FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.FRAME_ON_FRAME, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - - dependDao.setInactive(lwd); - - // Try to reinsert it now that the original is inactive. - depend = new FrameOnFrame(frame_a, frame_b); - dependDao.insertDepend(depend); - } - - @Test - @Transactional - @Rollback(true) - public void testGetWhatDependsOnJob() { - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - JobOnJob depend = new JobOnJob(job_a, job_b); - dependDao.insertDepend(depend); - - assertEquals(1, dependDao.getWhatDependsOn(job_b).size()); - assertEquals(0, dependDao.getWhatDependsOn(job_a).size()); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class DependDaoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + DependDao dependDao; + + @Resource + FrameDao frameDao; + + @Resource + LayerDao layerDao; + + @Resource + JobManager jobManager; + + @Resource + DependManager dependManager; + + @Resource + JobManagerSupport jobManagerSupport; + + @Resource + JobLauncher jobLauncher; + + @Before + public void launchTestJobs() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_depend_test.xml")); + } + + public JobDetail getJobA() { + return jobManager.findJobDetail("pipe-dev.cue-testuser_depend_test_a"); + } + + public JobDetail getJobB() { + return jobManager.findJobDetail("pipe-dev.cue-testuser_depend_test_b"); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertJobOnJob() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + + JobOnJob depend = new JobOnJob(job_a, job_b); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.JOB_ON_JOB, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertJobOnLayer() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + + LayerInterface layer = layerDao.findLayer(job_b, "pass_1"); + JobOnLayer depend = new JobOnLayer(job_a, layer); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.JOB_ON_LAYER, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertJobOnFrame() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + + FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); + JobOnFrame depend = new JobOnFrame(job_a, frame); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.JOB_ON_FRAME, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertLayerOnJob() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer = layerDao.findLayer(job_b, "pass_1"); + + LayerOnJob depend = new LayerOnJob(layer, job_a); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.LAYER_ON_JOB, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertLayerOnLayer() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + + LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.LAYER_ON_LAYER, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertLayerOnFrame() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer = layerDao.findLayer(job_a, "pass_1"); + FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); + + LayerOnFrame depend = new LayerOnFrame(layer, frame); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.LAYER_ON_FRAME, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertFrameOnJob() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); + + FrameOnJob depend = new FrameOnJob(frame, job_a); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.FRAME_ON_JOB, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertFrameOnLayer() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer = layerDao.findLayer(job_a, "pass_1"); + FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); + + FrameOnLayer depend = new FrameOnLayer(frame, layer); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.FRAME_ON_LAYER, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertFrameOnFrame() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + + FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); + FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); + + FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.FRAME_ON_FRAME, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertFrameByFrame() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + + FrameByFrame depend = new FrameByFrame(layer_a, layer_b); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.FRAME_BY_FRAME, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertPreviousFrame() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + + PreviousFrame depend = new PreviousFrame(layer_a, layer_b); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.PREVIOUS_FRAME, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testReinsertFrameOnFrame() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + + FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); + FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); + + FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.FRAME_ON_FRAME, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + + dependDao.setInactive(lwd); + + // Try to reinsert it now that the original is inactive. + depend = new FrameOnFrame(frame_a, frame_b); + dependDao.insertDepend(depend); + } + + @Test + @Transactional + @Rollback(true) + public void testGetWhatDependsOnJob() { + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + + JobOnJob depend = new JobOnJob(job_a, job_b); + dependDao.insertDepend(depend); + + assertEquals(1, dependDao.getWhatDependsOn(job_b).size()); + assertEquals(0, dependDao.getWhatDependsOn(job_a).size()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetWhatDependsOnLayer() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + + LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); + dependDao.insertDepend(depend); + assertEquals(1, dependDao.getWhatDependsOn(layer_b).size()); + assertEquals(0, dependDao.getWhatDependsOn(layer_a).size()); + } - @Test - @Transactional - @Rollback(true) - public void testGetWhatDependsOnLayer() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + @Test + @Transactional + @Rollback(true) + public void testGetWhatDependsOnLayerInactive() { - LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); - dependDao.insertDepend(depend); + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); - assertEquals(1, dependDao.getWhatDependsOn(layer_b).size()); - assertEquals(0, dependDao.getWhatDependsOn(layer_a).size()); - } + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - @Test - @Transactional - @Rollback(true) - public void testGetWhatDependsOnLayerInactive() { + LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); + dependDao.insertDepend(depend); - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); + dependDao.setInactive(dependDao.getDepend(depend.getId())); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + assertEquals(1, dependDao.getWhatDependsOn(layer_b, false).size()); + assertEquals(0, dependDao.getWhatDependsOn(layer_b, true).size()); + } - LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); - dependDao.insertDepend(depend); + @Test + @Transactional + @Rollback(true) + public void testGetWhatDependsOnFrame() { - dependDao.setInactive(dependDao.getDepend(depend.getId())); + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); - assertEquals(1, dependDao.getWhatDependsOn(layer_b, false).size()); - assertEquals(0, dependDao.getWhatDependsOn(layer_b, true).size()); - } + FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); + FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); - @Test - @Transactional - @Rollback(true) - public void testGetWhatDependsOnFrame() { + FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); + dependDao.insertDepend(depend); - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); + assertEquals(1, dependDao.getWhatDependsOn(frame_b).size()); + assertEquals(0, dependDao.getWhatDependsOn(frame_a).size()); + } - FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); - FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); + @Test + @Transactional + @Rollback(true) + public void testGetWhatDependsOnFrameInactive() { - FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); - dependDao.insertDepend(depend); + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); - assertEquals(1, dependDao.getWhatDependsOn(frame_b).size()); - assertEquals(0, dependDao.getWhatDependsOn(frame_a).size()); - } + FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); + FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); - @Test - @Transactional - @Rollback(true) - public void testGetWhatDependsOnFrameInactive() { + FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); + dependDao.insertDepend(depend); - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); + dependDao.setInactive(dependDao.getDepend(depend.getId())); - FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); - FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); - - FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); - dependDao.insertDepend(depend); - - dependDao.setInactive(dependDao.getDepend(depend.getId())); - - assertEquals(1, dependDao.getWhatDependsOn(frame_b, false).size()); - assertEquals(0, dependDao.getWhatDependsOn(frame_b, true).size()); - assertEquals(0, dependDao.getWhatDependsOn(frame_a, true).size()); - } + assertEquals(1, dependDao.getWhatDependsOn(frame_b, false).size()); + assertEquals(0, dependDao.getWhatDependsOn(frame_b, true).size()); + assertEquals(0, dependDao.getWhatDependsOn(frame_a, true).size()); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoFifoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoFifoTests.java index c4fdfd892..c77f04915 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoFifoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoFifoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import java.io.File; @@ -66,178 +62,161 @@ import static org.junit.Assert.assertTrue; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class DispatcherDaoFifoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - DispatcherDao dispatcherDao; - - @Resource - HostDao hostDao; - - @Resource - JobManager jobManager; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - GroupManager groupManager; - - @Resource - Dispatcher dispatcher; - - @Resource - JobLauncher jobLauncher; - - private static final String HOSTNAME="beta"; - - public DispatchHost getHost() { - return hostDao.findDispatchHost(HOSTNAME); - } - - private void launchJobs(int count) throws Exception { - Document docTemplate = new SAXBuilder(true).build( - new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); - docTemplate.getDocType().setSystemID("http://localhost:8080/spcue/dtd/cjsl-1.12.dtd"); - Element root = docTemplate.getRootElement(); - Element jobTemplate = root.getChild("job"); - Element depends = root.getChild("depends"); - assertEquals(jobTemplate.getAttributeValue("name"), "test"); - root.removeContent(jobTemplate); - root.removeContent(depends); - - long t = System.currentTimeMillis(); - for (int i = 0; i < count; i++) { - Document doc = (Document) docTemplate.clone(); - root = doc.getRootElement(); - Element job = (Element) jobTemplate.clone(); - job.setAttribute("name", "job" + i); - root.addContent(job); - root.addContent((Element) depends.clone()); - jobLauncher.launch(new XMLOutputter().outputString(doc)); - - // Force to set incremental ts_started to the jobs - // because current_timestamp is not updated during test. - jdbcTemplate.update("UPDATE job SET ts_started = ? WHERE str_name = ?", - new Timestamp(t + i), "pipe-default-testuser_job" + i); - } - } - - @Before - public void launchJob() { - dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.FIFO); - - dispatcher.setTestMode(true); - jobLauncher.testMode = true; +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class DispatcherDaoFifoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + DispatcherDao dispatcherDao; + + @Resource + HostDao hostDao; + + @Resource + JobManager jobManager; + + @Resource + HostManager hostManager; + + @Resource + AdminManager adminManager; + + @Resource + GroupManager groupManager; + + @Resource + Dispatcher dispatcher; + + @Resource + JobLauncher jobLauncher; + + private static final String HOSTNAME = "beta"; + + public DispatchHost getHost() { + return hostDao.findDispatchHost(HOSTNAME); + } + + private void launchJobs(int count) throws Exception { + Document docTemplate = + new SAXBuilder(true).build(new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); + docTemplate.getDocType().setSystemID("http://localhost:8080/spcue/dtd/cjsl-1.12.dtd"); + Element root = docTemplate.getRootElement(); + Element jobTemplate = root.getChild("job"); + Element depends = root.getChild("depends"); + assertEquals(jobTemplate.getAttributeValue("name"), "test"); + root.removeContent(jobTemplate); + root.removeContent(depends); + + long t = System.currentTimeMillis(); + for (int i = 0; i < count; i++) { + Document doc = (Document) docTemplate.clone(); + root = doc.getRootElement(); + Element job = (Element) jobTemplate.clone(); + job.setAttribute("name", "job" + i); + root.addContent(job); + root.addContent((Element) depends.clone()); + jobLauncher.launch(new XMLOutputter().outputString(doc)); + + // Force to set incremental ts_started to the jobs + // because current_timestamp is not updated during test. + jdbcTemplate.update("UPDATE job SET ts_started = ? WHERE str_name = ?", new Timestamp(t + i), + "pipe-default-testuser_job" + i); } - - @After - public void resetFifoScheduling() { - dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.PRIORITY_ONLY); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder() - .setName(HOSTNAME) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem(8173264) - .setTotalSwap(20960) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(100) - .addTags("test") - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("SP_OS", "Linux") - .build(); - - hostManager.createHost(host, - adminManager.findAllocationDetail("spi", "general")); - } - - @Test - @Transactional - @Rollback(true) - public void testFifoSchedulingEnabled() { - assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.FIFO); - dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.PRIORITY_ONLY); - assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.PRIORITY_ONLY); - dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.FIFO); - assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.FIFO); - } - - @Test - @Transactional - @Rollback(true) - public void testAllSorted() throws Exception { - int count = 10; - launchJobs(count); - - Set jobs = dispatcherDao.findDispatchJobs(getHost(), count); - assertEquals(count, jobs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testPortionSorted() throws Exception { - int count = 100; - launchJobs(count); - - int portion = 19; - Set jobs = dispatcherDao.findDispatchJobs(getHost(), (portion + 1) / 10); - assertEquals(portion, jobs.size()); + } + + @Before + public void launchJob() { + dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.FIFO); + + dispatcher.setTestMode(true); + jobLauncher.testMode = true; + } + + @After + public void resetFifoScheduling() { + dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.PRIORITY_ONLY); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) + .setNumProcs(2).setCoresPerProc(100).addTags("test").setState(HardwareState.UP) + .setFacility("spi").putAttributes("SP_OS", "Linux").build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } + + @Test + @Transactional + @Rollback(true) + public void testFifoSchedulingEnabled() { + assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.FIFO); + dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.PRIORITY_ONLY); + assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.PRIORITY_ONLY); + dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.FIFO); + assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.FIFO); + } + + @Test + @Transactional + @Rollback(true) + public void testAllSorted() throws Exception { + int count = 10; + launchJobs(count); + + Set jobs = dispatcherDao.findDispatchJobs(getHost(), count); + assertEquals(count, jobs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testPortionSorted() throws Exception { + int count = 100; + launchJobs(count); + + int portion = 19; + Set jobs = dispatcherDao.findDispatchJobs(getHost(), (portion + 1) / 10); + assertEquals(portion, jobs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testFifoSchedulingDisabled() throws Exception { + dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.PRIORITY_ONLY); + + int count = 10; + launchJobs(count); + + Set jobs = dispatcherDao.findDispatchJobs(getHost(), count); + assertEquals(count, jobs.size()); + + List sortedJobs = new ArrayList(jobs); + Collections.sort(sortedJobs, Comparator.comparing(jobId -> jobManager.getJob(jobId).getName())); + + for (int i = 0; i < count; i++) { + assertEquals("pipe-default-testuser_job" + i, jobManager.getJob(sortedJobs.get(i)).getName()); } + } - @Test - @Transactional - @Rollback(true) - public void testFifoSchedulingDisabled() throws Exception { - dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.PRIORITY_ONLY); - - int count = 10; - launchJobs(count); + @Test + @Transactional + @Rollback(true) + public void testGroup() throws Exception { + int count = 10; + launchJobs(count); - Set jobs = dispatcherDao.findDispatchJobs(getHost(), count); - assertEquals(count, jobs.size()); + JobDetail job = jobManager.findJobDetail("pipe-default-testuser_job0"); + assertNotNull(job); - List sortedJobs = new ArrayList(jobs); - Collections.sort(sortedJobs, - Comparator.comparing(jobId -> jobManager.getJob(jobId).getName())); - - for (int i = 0; i < count; i++) { - assertEquals("pipe-default-testuser_job" + i, - jobManager.getJob(sortedJobs.get(i)).getName()); - } - } - - @Test - @Transactional - @Rollback(true) - public void testGroup() throws Exception { - int count = 10; - launchJobs(count); - - JobDetail job = jobManager.findJobDetail("pipe-default-testuser_job0"); - assertNotNull(job); - - Set jobs = dispatcherDao.findDispatchJobs(getHost(), - groupManager.getGroupDetail(job)); - assertEquals(count, jobs.size()); - } + Set jobs = dispatcherDao.findDispatchJobs(getHost(), groupManager.getGroupDetail(job)); + assertEquals(count, jobs.size()); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoTests.java index 1ff849473..ef3719d9b 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import java.io.File; @@ -68,503 +64,459 @@ import static org.junit.Assert.assertTrue; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class DispatcherDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - DispatcherDao dispatcherDao; - - @Resource - HostDao hostDao; - - @Resource - ProcDao procDao; - - @Resource - LayerDao layerDao; - - @Resource - JobDao jobDao; - - @Resource - AllocationDao allocationDao; - - @Resource - JobManager jobManager; - - @Resource - DispatchSupport dispatchSupport; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - GroupManager groupManager; - - @Resource - Dispatcher dispatcher; - - @Resource - JobLauncher jobLauncher; - - @Resource - BookingDao bookingDao; - - private static final String HOSTNAME="beta"; - - public DispatchHost getHost() { - return hostDao.findDispatchHost(HOSTNAME); - } - - public JobDetail getJob1() { - return jobManager.findJobDetail( - "pipe-dev.cue-testuser_shell_dispatch_test_v1"); - } - - public JobDetail getJob2() { - return jobManager.findJobDetail( - "pipe-dev.cue-testuser_shell_dispatch_test_v2"); - } - - @Before - public void launchJob() { - dispatcher.setTestMode(true); - jobLauncher.testMode = true; - jobLauncher.launch( - new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder() - .setName(HOSTNAME) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem(8173264) - .setTotalSwap(20960) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(100) - .addTags("test") - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("SP_OS", "Linux") - .build(); - - hostManager.createHost(host, - adminManager.findAllocationDetail("spi", "general")); - } - - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFrameByHost() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - - for (LayerDetail layer: layerDao.getLayerDetails(job)) { - assertTrue(layer.tags.contains("general")); - } - - assertTrue(jdbcTemplate.queryForObject( - "SELECT str_tags FROM host WHERE pk_host=?",String.class, - host.id).contains("general")); - - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job, host); - assertNotNull(frame); - assertEquals(frame.name, "0001-pass_1"); - } - - - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFrameByProc() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - - // TODO: fix the fact you can book the same proc on multiple frames - // probably just need to make sure you can't update a proc's frame - // assignment unless the frame id is null. - - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job, host); - assertNotNull(frame); - assertEquals("0001-pass_1", frame.name); - - VirtualProc proc = VirtualProc.build(host, frame, job.os); - proc.coresReserved = 100; - dispatcher.dispatch(frame, proc); - - frame = dispatcherDao.findNextDispatchFrame(job, proc); - assertNotNull(frame); - assertEquals("0001-pass_2", frame.name); - dispatcher.dispatch(frame, proc); - - frame = dispatcherDao.findNextDispatchFrame(job, proc); - assertNotNull(frame); - assertEquals("0002-pass_1", frame.name); - dispatcher.dispatch(frame, proc); - } - - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFramesByProc() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - - // TODO: fix the fact you can book the same proc on multiple frames - // probably just need to make sure you can't update a proc's frame - // assignment unless the frame id is null. - - List frames = - dispatcherDao.findNextDispatchFrames(job, host,10); - assertEquals(10, frames.size()); - - DispatchFrame frame = frames.get(0); - - VirtualProc proc = VirtualProc.build(host, frame, job.os); - proc.coresReserved = 100; - dispatcher.dispatch(frame, proc); - - frame = dispatcherDao.findNextDispatchFrame(job, proc); - assertNotNull(frame); - assertEquals(frame.name,"0001-pass_2"); - dispatcher.dispatch(frame, proc); - - frame = dispatcherDao.findNextDispatchFrame(job, proc); - assertNotNull(frame); - assertEquals(frame.name,"0002-pass_1"); - dispatcher.dispatch(frame, proc); - } - - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFramesByHostAndJobLocal() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - host.isLocalDispatch = true; - List frames = - dispatcherDao.findNextDispatchFrames(job, host, 10); - assertEquals(10, frames.size()); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class DispatcherDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFramesByHostAndLayerLocal() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - LayerInterface layer = jobManager.getLayers(job).get(0); - host.isLocalDispatch = true; - - List frames = - dispatcherDao.findNextDispatchFrames(layer, host, 10); - assertEquals(10, frames.size()); - } + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFramesByProcAndJobLocal() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - host.isLocalDispatch = true; - List frames = - dispatcherDao.findNextDispatchFrames(job, host, 10); - assertEquals(10, frames.size()); - - DispatchFrame frame = frames.get(0); - VirtualProc proc = VirtualProc.build(host, frame, job.os); - proc.coresReserved = 100; - proc.isLocalDispatch = true; - - frames = dispatcherDao.findNextDispatchFrames(job, proc, 10); - assertEquals(10, frames.size()); - } + @Resource + DispatcherDao dispatcherDao; - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFramesByProcAndLayerLocal() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - LayerInterface layer = jobManager.getLayers(job).get(0); - host.isLocalDispatch = true; - - List frames = - dispatcherDao.findNextDispatchFrames(layer, host, 10); - assertEquals(10, frames.size()); - - DispatchFrame frame = frames.get(0); - VirtualProc proc = VirtualProc.build(host, frame, job.os); - proc.coresReserved = 100; - proc.isLocalDispatch = true; - - frames = dispatcherDao.findNextDispatchFrames(layer, proc, 10); - assertEquals(10, frames.size()); - } + @Resource + HostDao hostDao; + @Resource + ProcDao procDao; - @Test - @Transactional - @Rollback(true) - public void testFindDispatchJobs() { - DispatchHost host = getHost(); + @Resource + LayerDao layerDao; - assertTrue(jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM job WHERE str_state='PENDING'", Integer.class) > 0); + @Resource + JobDao jobDao; - Set jobs = dispatcherDao.findDispatchJobs(host, 10); - assertTrue(jobs.size() > 0); - } + @Resource + AllocationDao allocationDao; - @Test - @Transactional - @Rollback(true) - public void testFindDispatchJobsByGroup() { - DispatchHost host = getHost(); - final JobDetail job = getJob1(); + @Resource + JobManager jobManager; - assertNotNull(job); - assertNotNull(job.groupId); + @Resource + DispatchSupport dispatchSupport; - Set jobs = dispatcherDao.findDispatchJobs(host, - groupManager.getGroupDetail(job)); - assertTrue(jobs.size() > 0); - } + @Resource + HostManager hostManager; - @Test - @Transactional - @Rollback(true) - public void testFindDispatchJobsByShow() { - DispatchHost host = getHost(); - final JobDetail job = getJob1(); - assertNotNull(job); - - Set jobs = dispatcherDao.findDispatchJobs(host, - adminManager.findShowEntity("pipe"), 5); - assertTrue(jobs.size() > 0); - } + @Resource + AdminManager adminManager; - @Test - @Transactional - @Rollback(true) - public void testFindDispatchJobsByLocal() { - DispatchHost host = getHost(); - final JobDetail job = getJob1(); - assertNotNull(job); - - Set jobs = dispatcherDao.findLocalDispatchJobs(host); - assertEquals(0, jobs.size()); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setThreads(1); - lja.setMaxMemory(CueUtil.GB16); - lja.setMaxCoreUnits(200); - lja.setMaxGpuMemory(1); - bookingDao.insertLocalHostAssignment(host, job, lja); - - jobs = dispatcherDao.findLocalDispatchJobs(host); - assertTrue(jobs.size() > 0); - } + @Resource + GroupManager groupManager; - @Test - @Transactional - @Rollback(true) - public void testfindUnderProcedJob() { - DispatchHost host = getHost(); - JobDetail job1 = getJob1(); - JobDetail job2 = getJob2(); + @Resource + Dispatcher dispatcher; - jobDao.updateMinCores(job1, 0); - jobDao.updateMinCores(job2, 1000); + @Resource + JobLauncher jobLauncher; - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); - assertNotNull(frame); + @Resource + BookingDao bookingDao; - assertEquals(JobState.PENDING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", - String.class, job1.id)); + private static final String HOSTNAME = "beta"; - assertEquals(JobState.PENDING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", - String.class, job2.id)); + public DispatchHost getHost() { + return hostDao.findDispatchHost(HOSTNAME); + } - VirtualProc proc = VirtualProc.build(host, frame, job1.os); - proc.coresReserved = 100; - dispatcher.dispatch(frame, proc); + public JobDetail getJob1() { + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); + } - boolean under = dispatcherDao.findUnderProcedJob(job1, proc); - assertTrue(under); - } + public JobDetail getJob2() { + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v2"); + } - @Test - @Transactional - @Rollback(true) - public void testHigherPriorityJobExistsTrue() { - DispatchHost host = getHost(); - JobDetail job1 = getJob1(); - JobDetail job2 = getJob2(); - job1.priority = 100; - job2.priority = 200; - - jobDao.updateMinCores(job1, 0); - jobDao.updateMinCores(job2, 0); - jobDao.updatePriority(job1, 100); - jobDao.updatePriority(job2, 200); - - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); - assertNotNull(frame); - - assertEquals(JobState.PENDING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", - String.class, job1.id)); - - assertEquals(JobState.PENDING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", - String.class, job2.id)); - - VirtualProc proc = VirtualProc.build(host, frame, job2.os); - proc.coresReserved = 100; - dispatcher.dispatch(frame, proc); - - boolean isHigher = dispatcherDao.higherPriorityJobExists(job1, proc); - assertTrue(isHigher); - } + @Before + public void launchJob() { + dispatcher.setTestMode(true); + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + } - @Test - @Transactional - @Rollback(true) - public void testHigherPriorityJobExistsFalse() { - DispatchHost host = getHost(); - JobDetail job1 = getJob1(); - JobDetail job2 = getJob2(); - job1.priority = 20000; - job2.priority = 100; - - jobDao.updatePriority(job1, 20000); - jobDao.updatePriority(job2, 100); - - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); - assertNotNull(frame); - - assertEquals(JobState.PENDING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", - String.class, job1.id)); - - assertEquals(JobState.PENDING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", - String.class, job2.id)); - - VirtualProc proc = VirtualProc.build(host, frame, job2.os); - proc.coresReserved = 100; - dispatcher.dispatch(frame, proc); - - boolean isHigher = dispatcherDao.higherPriorityJobExists(job1, proc); - assertFalse(isHigher); - } + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) + .setNumProcs(2).setCoresPerProc(100).addTags("test").setState(HardwareState.UP) + .setFacility("spi").putAttributes("SP_OS", "Linux").build(); - @Test - @Transactional - @Rollback(true) - public void testHigherPriorityJobExistsMaxProcBound() { - DispatchHost host = getHost(); - JobDetail job1 = getJob1(); - JobDetail job2 = getJob2(); - job1.priority = 100; - job2.priority = 200; - - jobDao.updateMaxCores(job2, 0); - jobDao.updatePriority(job1, 100); - jobDao.updatePriority(job2, 200); - - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); - assertNotNull(frame); - - assertEquals(JobState.PENDING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", - String.class, job1.id)); - - assertEquals(JobState.PENDING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", - String.class, job2.id)); - - VirtualProc proc = VirtualProc.build(host, frame, job2.os); - proc.coresReserved = 100; - dispatcher.dispatch(frame, proc); - - boolean isHigher = dispatcherDao.higherPriorityJobExists(job1, proc); - assertFalse(isHigher); - } + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } - @Test - @Transactional - @Rollback(true) - public void testFifoSchedulingEnabled() { - assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.PRIORITY_ONLY); - } + @Test + @Transactional + @Rollback(true) + public void testFindNextDispatchFrameByHost() { + DispatchHost host = getHost(); + JobDetail job = getJob1(); - @Test - @Transactional - @Rollback(true) - public void testFindDispatchJobsByShowMultiOs() { - DispatchHost host = getHost(); - // Set multiple Os and confirm jobs with Linux are still being found - final JobDetail job = getJob1(); - assertNotNull(job); - - // Host with different os - host.setOs("centos7,SomethingElse"); - Set jobs = dispatcherDao.findDispatchJobs(host, - adminManager.findShowEntity("pipe"), 5); - assertTrue(jobs.size() == 0); - - // Host with Linux Os (same as defined on spec) - host.setOs("centos7,Linux,rocky9"); - jobs = dispatcherDao.findDispatchJobs(host, - adminManager.findShowEntity("pipe"), 5); - assertTrue(jobs.size() > 0); + for (LayerDetail layer : layerDao.getLayerDetails(job)) { + assertTrue(layer.tags.contains("general")); } - @Test - @Transactional - @Rollback(true) - public void testFindDispatchJobsAllShowsMultiOs() { - DispatchHost host = getHost(); - // Set multiple Os and confirm jobs with Linux are still being found - final JobDetail job = getJob1(); - assertNotNull(job); - - // Host with incompatible OS shouldn't find any job - host.setOs("centos7,SomethingElse"); - Set jobs = dispatcherDao.findDispatchJobs(host, 5); - assertTrue(jobs.size() == 0); - - // Host with Linux Os (same as defined on spec) should find jobs - host.setOs("centos7,Linux,rocky9"); - jobs = dispatcherDao.findDispatchJobs(host, 5); - assertTrue(jobs.size() > 0); - } + assertTrue(jdbcTemplate + .queryForObject("SELECT str_tags FROM host WHERE pk_host=?", String.class, host.id) + .contains("general")); + + DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job, host); + assertNotNull(frame); + assertEquals(frame.name, "0001-pass_1"); + } + + @Test + @Transactional + @Rollback(true) + public void testFindNextDispatchFrameByProc() { + DispatchHost host = getHost(); + JobDetail job = getJob1(); + + // TODO: fix the fact you can book the same proc on multiple frames + // probably just need to make sure you can't update a proc's frame + // assignment unless the frame id is null. + + DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job, host); + assertNotNull(frame); + assertEquals("0001-pass_1", frame.name); + + VirtualProc proc = VirtualProc.build(host, frame, job.os); + proc.coresReserved = 100; + dispatcher.dispatch(frame, proc); + + frame = dispatcherDao.findNextDispatchFrame(job, proc); + assertNotNull(frame); + assertEquals("0001-pass_2", frame.name); + dispatcher.dispatch(frame, proc); + + frame = dispatcherDao.findNextDispatchFrame(job, proc); + assertNotNull(frame); + assertEquals("0002-pass_1", frame.name); + dispatcher.dispatch(frame, proc); + } + + @Test + @Transactional + @Rollback(true) + public void testFindNextDispatchFramesByProc() { + DispatchHost host = getHost(); + JobDetail job = getJob1(); + + // TODO: fix the fact you can book the same proc on multiple frames + // probably just need to make sure you can't update a proc's frame + // assignment unless the frame id is null. + + List frames = dispatcherDao.findNextDispatchFrames(job, host, 10); + assertEquals(10, frames.size()); + + DispatchFrame frame = frames.get(0); + + VirtualProc proc = VirtualProc.build(host, frame, job.os); + proc.coresReserved = 100; + dispatcher.dispatch(frame, proc); + + frame = dispatcherDao.findNextDispatchFrame(job, proc); + assertNotNull(frame); + assertEquals(frame.name, "0001-pass_2"); + dispatcher.dispatch(frame, proc); + + frame = dispatcherDao.findNextDispatchFrame(job, proc); + assertNotNull(frame); + assertEquals(frame.name, "0002-pass_1"); + dispatcher.dispatch(frame, proc); + } + + @Test + @Transactional + @Rollback(true) + public void testFindNextDispatchFramesByHostAndJobLocal() { + DispatchHost host = getHost(); + JobDetail job = getJob1(); + host.isLocalDispatch = true; + List frames = dispatcherDao.findNextDispatchFrames(job, host, 10); + assertEquals(10, frames.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindNextDispatchFramesByHostAndLayerLocal() { + DispatchHost host = getHost(); + JobDetail job = getJob1(); + LayerInterface layer = jobManager.getLayers(job).get(0); + host.isLocalDispatch = true; + + List frames = dispatcherDao.findNextDispatchFrames(layer, host, 10); + assertEquals(10, frames.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindNextDispatchFramesByProcAndJobLocal() { + DispatchHost host = getHost(); + JobDetail job = getJob1(); + host.isLocalDispatch = true; + List frames = dispatcherDao.findNextDispatchFrames(job, host, 10); + assertEquals(10, frames.size()); + + DispatchFrame frame = frames.get(0); + VirtualProc proc = VirtualProc.build(host, frame, job.os); + proc.coresReserved = 100; + proc.isLocalDispatch = true; + + frames = dispatcherDao.findNextDispatchFrames(job, proc, 10); + assertEquals(10, frames.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindNextDispatchFramesByProcAndLayerLocal() { + DispatchHost host = getHost(); + JobDetail job = getJob1(); + LayerInterface layer = jobManager.getLayers(job).get(0); + host.isLocalDispatch = true; + + List frames = dispatcherDao.findNextDispatchFrames(layer, host, 10); + assertEquals(10, frames.size()); + + DispatchFrame frame = frames.get(0); + VirtualProc proc = VirtualProc.build(host, frame, job.os); + proc.coresReserved = 100; + proc.isLocalDispatch = true; + + frames = dispatcherDao.findNextDispatchFrames(layer, proc, 10); + assertEquals(10, frames.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindDispatchJobs() { + DispatchHost host = getHost(); + + assertTrue(jdbcTemplate.queryForObject("SELECT COUNT(*) FROM job WHERE str_state='PENDING'", + Integer.class) > 0); + + Set jobs = dispatcherDao.findDispatchJobs(host, 10); + assertTrue(jobs.size() > 0); + } + + @Test + @Transactional + @Rollback(true) + public void testFindDispatchJobsByGroup() { + DispatchHost host = getHost(); + final JobDetail job = getJob1(); + + assertNotNull(job); + assertNotNull(job.groupId); + + Set jobs = dispatcherDao.findDispatchJobs(host, groupManager.getGroupDetail(job)); + assertTrue(jobs.size() > 0); + } + + @Test + @Transactional + @Rollback(true) + public void testFindDispatchJobsByShow() { + DispatchHost host = getHost(); + final JobDetail job = getJob1(); + assertNotNull(job); + + Set jobs = dispatcherDao.findDispatchJobs(host, adminManager.findShowEntity("pipe"), 5); + assertTrue(jobs.size() > 0); + } + + @Test + @Transactional + @Rollback(true) + public void testFindDispatchJobsByLocal() { + DispatchHost host = getHost(); + final JobDetail job = getJob1(); + assertNotNull(job); + + Set jobs = dispatcherDao.findLocalDispatchJobs(host); + assertEquals(0, jobs.size()); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setThreads(1); + lja.setMaxMemory(CueUtil.GB16); + lja.setMaxCoreUnits(200); + lja.setMaxGpuMemory(1); + bookingDao.insertLocalHostAssignment(host, job, lja); + + jobs = dispatcherDao.findLocalDispatchJobs(host); + assertTrue(jobs.size() > 0); + } + + @Test + @Transactional + @Rollback(true) + public void testfindUnderProcedJob() { + DispatchHost host = getHost(); + JobDetail job1 = getJob1(); + JobDetail job2 = getJob2(); + + jobDao.updateMinCores(job1, 0); + jobDao.updateMinCores(job2, 1000); + + DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); + assertNotNull(frame); + + assertEquals(JobState.PENDING.toString(), jdbcTemplate + .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job1.id)); + + assertEquals(JobState.PENDING.toString(), jdbcTemplate + .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job2.id)); + + VirtualProc proc = VirtualProc.build(host, frame, job1.os); + proc.coresReserved = 100; + dispatcher.dispatch(frame, proc); + + boolean under = dispatcherDao.findUnderProcedJob(job1, proc); + assertTrue(under); + } + + @Test + @Transactional + @Rollback(true) + public void testHigherPriorityJobExistsTrue() { + DispatchHost host = getHost(); + JobDetail job1 = getJob1(); + JobDetail job2 = getJob2(); + job1.priority = 100; + job2.priority = 200; + + jobDao.updateMinCores(job1, 0); + jobDao.updateMinCores(job2, 0); + jobDao.updatePriority(job1, 100); + jobDao.updatePriority(job2, 200); + + DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); + assertNotNull(frame); + + assertEquals(JobState.PENDING.toString(), jdbcTemplate + .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job1.id)); + + assertEquals(JobState.PENDING.toString(), jdbcTemplate + .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job2.id)); + + VirtualProc proc = VirtualProc.build(host, frame, job2.os); + proc.coresReserved = 100; + dispatcher.dispatch(frame, proc); + + boolean isHigher = dispatcherDao.higherPriorityJobExists(job1, proc); + assertTrue(isHigher); + } + + @Test + @Transactional + @Rollback(true) + public void testHigherPriorityJobExistsFalse() { + DispatchHost host = getHost(); + JobDetail job1 = getJob1(); + JobDetail job2 = getJob2(); + job1.priority = 20000; + job2.priority = 100; + + jobDao.updatePriority(job1, 20000); + jobDao.updatePriority(job2, 100); + + DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); + assertNotNull(frame); + + assertEquals(JobState.PENDING.toString(), jdbcTemplate + .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job1.id)); + + assertEquals(JobState.PENDING.toString(), jdbcTemplate + .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job2.id)); + + VirtualProc proc = VirtualProc.build(host, frame, job2.os); + proc.coresReserved = 100; + dispatcher.dispatch(frame, proc); + + boolean isHigher = dispatcherDao.higherPriorityJobExists(job1, proc); + assertFalse(isHigher); + } + + @Test + @Transactional + @Rollback(true) + public void testHigherPriorityJobExistsMaxProcBound() { + DispatchHost host = getHost(); + JobDetail job1 = getJob1(); + JobDetail job2 = getJob2(); + job1.priority = 100; + job2.priority = 200; + + jobDao.updateMaxCores(job2, 0); + jobDao.updatePriority(job1, 100); + jobDao.updatePriority(job2, 200); + + DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); + assertNotNull(frame); + + assertEquals(JobState.PENDING.toString(), jdbcTemplate + .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job1.id)); + + assertEquals(JobState.PENDING.toString(), jdbcTemplate + .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job2.id)); + + VirtualProc proc = VirtualProc.build(host, frame, job2.os); + proc.coresReserved = 100; + dispatcher.dispatch(frame, proc); + + boolean isHigher = dispatcherDao.higherPriorityJobExists(job1, proc); + assertFalse(isHigher); + } + + @Test + @Transactional + @Rollback(true) + public void testFifoSchedulingEnabled() { + assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.PRIORITY_ONLY); + } + + @Test + @Transactional + @Rollback(true) + public void testFindDispatchJobsByShowMultiOs() { + DispatchHost host = getHost(); + // Set multiple Os and confirm jobs with Linux are still being found + final JobDetail job = getJob1(); + assertNotNull(job); + + // Host with different os + host.setOs("centos7,SomethingElse"); + Set jobs = dispatcherDao.findDispatchJobs(host, adminManager.findShowEntity("pipe"), 5); + assertTrue(jobs.size() == 0); + + // Host with Linux Os (same as defined on spec) + host.setOs("centos7,Linux,rocky9"); + jobs = dispatcherDao.findDispatchJobs(host, adminManager.findShowEntity("pipe"), 5); + assertTrue(jobs.size() > 0); + } + + @Test + @Transactional + @Rollback(true) + public void testFindDispatchJobsAllShowsMultiOs() { + DispatchHost host = getHost(); + // Set multiple Os and confirm jobs with Linux are still being found + final JobDetail job = getJob1(); + assertNotNull(job); + + // Host with incompatible OS shouldn't find any job + host.setOs("centos7,SomethingElse"); + Set jobs = dispatcherDao.findDispatchJobs(host, 5); + assertTrue(jobs.size() == 0); + + // Host with Linux Os (same as defined on spec) should find jobs + host.setOs("centos7,Linux,rocky9"); + jobs = dispatcherDao.findDispatchJobs(host, 5); + assertTrue(jobs.size() > 0); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FacilityDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FacilityDaoTests.java index 82019e85e..98f1d994f 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FacilityDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FacilityDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import javax.annotation.Resource; @@ -39,40 +35,39 @@ import static org.junit.Assert.assertTrue; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class FacilityDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; - @Resource - FacilityDao facilityDao; + @Resource + FacilityDao facilityDao; - @Test - @Transactional - @Rollback(true) - public void testGetDetaultFacility() { - assertEquals(jdbcTemplate.queryForObject( - "SELECT pk_facility FROM facility WHERE b_default=true", - String.class),facilityDao.getDefaultFacility().getId()); - } + @Test + @Transactional + @Rollback(true) + public void testGetDetaultFacility() { + assertEquals(jdbcTemplate + .queryForObject("SELECT pk_facility FROM facility WHERE b_default=true", String.class), + facilityDao.getDefaultFacility().getId()); + } - @Test - @Transactional - @Rollback(true) - public void testGetFacility() { - String id = "AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"; - assertEquals(id, facilityDao.getFacility(id).getId()); - assertEquals(id, facilityDao.getFacility("spi").getId()); - } + @Test + @Transactional + @Rollback(true) + public void testGetFacility() { + String id = "AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"; + assertEquals(id, facilityDao.getFacility(id).getId()); + assertEquals(id, facilityDao.getFacility("spi").getId()); + } - @Test - @Transactional - @Rollback(true) - public void testFacilityExists() { - assertTrue(facilityDao.facilityExists("spi")); - assertFalse(facilityDao.facilityExists("rambo")); - } + @Test + @Transactional + @Rollback(true) + public void testFacilityExists() { + assertTrue(facilityDao.facilityExists("spi")); + assertFalse(facilityDao.facilityExists("rambo")); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FilterDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FilterDaoTests.java index d637ae75e..3c80c037a 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FilterDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FilterDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import javax.annotation.Resource; @@ -45,260 +41,235 @@ import static org.junit.Assert.assertTrue; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class FilterDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - FilterDao filterDao; - - @Resource - ShowDao showDao; - - @Resource - AdminManager adminManager; - - private static String FILTER_NAME = "test_filter"; - - public ShowInterface createShow() { - ShowEntity show = new ShowEntity(); - show.name = "testtest"; - adminManager.createShow(show); - return show; - } - - public ShowInterface getShow() { - return showDao.findShowDetail("testtest"); - } - - public FilterEntity buildFilter(ShowInterface show) { - FilterEntity filter = new FilterEntity(); - filter.name = FILTER_NAME; - filter.showId = show.getId(); - filter.type = FilterType.MATCH_ANY; - filter.enabled = true; - - return filter; - } - - @Test - @Transactional - @Rollback(true) - public void testGetActiveFilters() { - filterDao.getActiveFilters(createShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFilters() { - filterDao.getFilters(createShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateSetFilterEnabled() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - filterDao.updateSetFilterEnabled(f, false); - assertFalse(jdbcTemplate.queryForObject( - "SELECT b_enabled FROM filter WHERE pk_filter=?", - Boolean.class, f.getFilterId())); - filterDao.updateSetFilterEnabled(f, true); - assertTrue(jdbcTemplate.queryForObject( - "SELECT b_enabled FROM filter WHERE pk_filter=?", - Boolean.class, f.getFilterId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateSetFilterName() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - assertEquals(FILTER_NAME, jdbcTemplate.queryForObject( - "SELECT str_name FROM filter WHERE pk_filter=?", - String.class, - f.getFilterId())); - filterDao.updateSetFilterName(f, "TEST"); - assertEquals("TEST", jdbcTemplate.queryForObject( - "SELECT str_name FROM filter WHERE pk_filter=?", - String.class, - f.getFilterId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateSetFilterType() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - assertEquals(FilterType.MATCH_ANY.toString(), jdbcTemplate.queryForObject( - "SELECT str_type FROM filter WHERE pk_filter=?", - String.class, - f.getFilterId())); - filterDao.updateSetFilterType(f, FilterType.MATCH_ALL); - assertEquals(FilterType.MATCH_ALL.toString(), jdbcTemplate.queryForObject( - "SELECT str_type FROM filter WHERE pk_filter=?", - String.class, - f.getFilterId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateSetFilterOrder() { - - ShowInterface show = createShow(); - int currentFilters = jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM filter WHERE pk_show=?", - Integer.class, show.getShowId()); - - FilterEntity f1 = buildFilter(show); - filterDao.insertFilter(f1); - - FilterEntity f2 = buildFilter(show); - f2.name = "TEST"; - filterDao.insertFilter(f2); - - assertEquals(Integer.valueOf(currentFilters+1), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f1.getFilterId())); - - assertEquals(Integer.valueOf(currentFilters+2), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f2.getFilterId())); - - filterDao.updateSetFilterOrder(f2,1); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f2.getFilterId())); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteFilter() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - filterDao.deleteFilter(f); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertFilter() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - } - - @Test - @Transactional - @Rollback(true) - public void testReorderFilters() { - buildFilter(createShow()); - filterDao.reorderFilters(getShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testLowerFilterOrder() { - - ShowInterface show = createShow(); - - FilterEntity f1 = buildFilter(show); - filterDao.insertFilter(f1); - - FilterEntity f2 = buildFilter(show); - f2.name = "TEST"; - filterDao.insertFilter(f2); - - - /** - * These could fail if the test DB has other filters. - */ - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f1.getFilterId())); - - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f2.getFilterId())); - - filterDao.lowerFilterOrder(f2,1); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f1.getFilterId())); - - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f2.getFilterId())); - } - - @Test - @Transactional - @Rollback(true) - public void testRaiseFilterOrder() { - - ShowInterface show = createShow(); - - FilterEntity f1 = buildFilter(show); - filterDao.insertFilter(f1); - - FilterEntity f2 = buildFilter(show); - f2.name = "TEST"; - filterDao.insertFilter(f2); - - /** - * These could fail if the test DB has other filters. - */ - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f1.getFilterId())); - - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f2.getFilterId())); - - filterDao.raiseFilterOrder(f1, 1); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f1.getFilterId())); - - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f2.getFilterId())); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFilter() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - - filterDao.getFilter(f); - filterDao.getFilter(f.getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFilter() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - - filterDao.findFilter(getShow(), FILTER_NAME); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class FilterDaoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + FilterDao filterDao; + + @Resource + ShowDao showDao; + + @Resource + AdminManager adminManager; + + private static String FILTER_NAME = "test_filter"; + + public ShowInterface createShow() { + ShowEntity show = new ShowEntity(); + show.name = "testtest"; + adminManager.createShow(show); + return show; + } + + public ShowInterface getShow() { + return showDao.findShowDetail("testtest"); + } + + public FilterEntity buildFilter(ShowInterface show) { + FilterEntity filter = new FilterEntity(); + filter.name = FILTER_NAME; + filter.showId = show.getId(); + filter.type = FilterType.MATCH_ANY; + filter.enabled = true; + + return filter; + } + + @Test + @Transactional + @Rollback(true) + public void testGetActiveFilters() { + filterDao.getActiveFilters(createShow()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFilters() { + filterDao.getFilters(createShow()); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateSetFilterEnabled() { + FilterEntity f = buildFilter(createShow()); + filterDao.insertFilter(f); + filterDao.updateSetFilterEnabled(f, false); + assertFalse(jdbcTemplate.queryForObject("SELECT b_enabled FROM filter WHERE pk_filter=?", + Boolean.class, f.getFilterId())); + filterDao.updateSetFilterEnabled(f, true); + assertTrue(jdbcTemplate.queryForObject("SELECT b_enabled FROM filter WHERE pk_filter=?", + Boolean.class, f.getFilterId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateSetFilterName() { + FilterEntity f = buildFilter(createShow()); + filterDao.insertFilter(f); + assertEquals(FILTER_NAME, jdbcTemplate.queryForObject( + "SELECT str_name FROM filter WHERE pk_filter=?", String.class, f.getFilterId())); + filterDao.updateSetFilterName(f, "TEST"); + assertEquals("TEST", jdbcTemplate.queryForObject( + "SELECT str_name FROM filter WHERE pk_filter=?", String.class, f.getFilterId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateSetFilterType() { + FilterEntity f = buildFilter(createShow()); + filterDao.insertFilter(f); + assertEquals(FilterType.MATCH_ANY.toString(), jdbcTemplate.queryForObject( + "SELECT str_type FROM filter WHERE pk_filter=?", String.class, f.getFilterId())); + filterDao.updateSetFilterType(f, FilterType.MATCH_ALL); + assertEquals(FilterType.MATCH_ALL.toString(), jdbcTemplate.queryForObject( + "SELECT str_type FROM filter WHERE pk_filter=?", String.class, f.getFilterId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateSetFilterOrder() { + + ShowInterface show = createShow(); + int currentFilters = jdbcTemplate.queryForObject("SELECT COUNT(*) FROM filter WHERE pk_show=?", + Integer.class, show.getShowId()); + + FilterEntity f1 = buildFilter(show); + filterDao.insertFilter(f1); + + FilterEntity f2 = buildFilter(show); + f2.name = "TEST"; + filterDao.insertFilter(f2); + + assertEquals(Integer.valueOf(currentFilters + 1), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f1.getFilterId())); + + assertEquals(Integer.valueOf(currentFilters + 2), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f2.getFilterId())); + + filterDao.updateSetFilterOrder(f2, 1); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f2.getFilterId())); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteFilter() { + FilterEntity f = buildFilter(createShow()); + filterDao.insertFilter(f); + filterDao.deleteFilter(f); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertFilter() { + FilterEntity f = buildFilter(createShow()); + filterDao.insertFilter(f); + } + + @Test + @Transactional + @Rollback(true) + public void testReorderFilters() { + buildFilter(createShow()); + filterDao.reorderFilters(getShow()); + } + + @Test + @Transactional + @Rollback(true) + public void testLowerFilterOrder() { + + ShowInterface show = createShow(); + + FilterEntity f1 = buildFilter(show); + filterDao.insertFilter(f1); + + FilterEntity f2 = buildFilter(show); + f2.name = "TEST"; + filterDao.insertFilter(f2); + + /** + * These could fail if the test DB has other filters. + */ + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f1.getFilterId())); + + assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f2.getFilterId())); + + filterDao.lowerFilterOrder(f2, 1); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f1.getFilterId())); + + assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f2.getFilterId())); + } + + @Test + @Transactional + @Rollback(true) + public void testRaiseFilterOrder() { + + ShowInterface show = createShow(); + + FilterEntity f1 = buildFilter(show); + filterDao.insertFilter(f1); + + FilterEntity f2 = buildFilter(show); + f2.name = "TEST"; + filterDao.insertFilter(f2); + + /** + * These could fail if the test DB has other filters. + */ + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f1.getFilterId())); -} + assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f2.getFilterId())); + filterDao.raiseFilterOrder(f1, 1); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f1.getFilterId())); + + assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f2.getFilterId())); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFilter() { + FilterEntity f = buildFilter(createShow()); + filterDao.insertFilter(f); + + filterDao.getFilter(f); + filterDao.getFilter(f.getId()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindFilter() { + FilterEntity f = buildFilter(createShow()); + filterDao.insertFilter(f); + + filterDao.findFilter(getShow(), FILTER_NAME); + } +} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FrameDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FrameDaoTests.java index 34070a3c5..d05d4d68b 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FrameDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FrameDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import java.io.File; @@ -68,619 +64,579 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; - @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class FrameDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - FrameDao frameDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - HostDao hostDao; - - @Resource - ProcDao procDao; - - @Resource - HostManager hostManager; - - @Resource - DependManager dependManager; - - @Resource - DispatchSupport dispatchSupport; - - @Resource - FrameSearchFactory frameSearchFactory; - - private static final String HOST = "beta"; - - public DispatchHost createHost() { - return hostDao.findDispatchHost(HOST); - } - - @BeforeTransaction - public void create() { - - RenderHost host = RenderHost.newBuilder() - .setName(HOST) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem(8173264) - .setTotalSwap(20960) - .setNimbyEnabled(false) - .setNumProcs(1) - .setCoresPerProc(100) - .addAllTags(ImmutableList.of("mcore", "4core", "8g")) - .setState(HardwareState.UP) - .setFacility("spi") - .setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512) - .build(); - - hostManager.createHost(host); - } - - @AfterTransaction - public void destroy() { - jdbcTemplate.update( - "DELETE FROM host WHERE str_name=?",HOST); - } - - public JobDetail launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFrameDetail() { - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - FrameDetail frame = frameDao.getFrameDetail(f); - frame = frameDao.getFrameDetail(f.getFrameId()); - assertEquals("0001-pass_1", frame.name); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFrameDetail() { - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - assertEquals("0001-pass_1", frame.name); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFrame() { - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - FrameInterface frame = frameDao.getFrame(f.getFrameId()); - assertEquals("0001-pass_1", frame.getName()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFrameByLayer() { - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - FrameInterface f2 = frameDao.findFrame((LayerInterface) f, 1); - - assertEquals(f.getFrameId(), f2.getFrameId()); - assertEquals(f.getLayerId(), f2.getLayerId()); - assertEquals(f.getJobId(), f2.getJobId()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFrame() { - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - assertEquals(f.getName(),"0001-pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFrames() { - JobDetail job = launchJob(); - FrameSearchInterface r = frameSearchFactory.create(job); - FrameSearchCriteria criteria = r.getCriteria(); - r.setCriteria(criteria.toBuilder() - .addFrames("0001-pass_1") - .build()); - assertEquals(1, frameDao.findFrames(r).size()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFrameDetails() { - JobDetail job = launchJob(); - FrameSearchInterface r = frameSearchFactory.create(job); - FrameSearchCriteria criteria = r.getCriteria(); - r.setCriteria(criteria.toBuilder() - .addFrames("0001-pass_1") - .build()); - assertEquals(1, frameDao.findFrameDetails(r).size()); - } - - @Test - @Transactional - @Rollback(true) - public void testgetOrphanedFrames() { - assertEquals(0, frameDao.getOrphanedFrames().size()); - - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - - /* - * Update the first frame to the orphan state, which is a frame - * that is in the running state, has no corresponding proc entry - * and has not been updated in the last 5 min. - */ - jdbcTemplate.update( - "UPDATE frame SET str_state = 'RUNNING', " + - "ts_updated = current_timestamp - interval '301' second WHERE pk_frame = ?", - f.getFrameId()); - - assertEquals(1, frameDao.getOrphanedFrames().size()); - assertTrue(frameDao.isOrphan(f)); - - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameState() { - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - assertTrue(frameDao.updateFrameState(f, FrameState.RUNNING)); - - assertEquals(FrameState.RUNNING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM frame WHERE pk_frame=?", - String.class, - f.getFrameId())); - } - - @Test - @Transactional - @Rollback(true) - public void testFailUpdateFrameState() { - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - - /** Change the version so the update fails **/ - jdbcTemplate.update( - "UPDATE frame SET int_version = int_version + 1 WHERE pk_frame=?", - - f.getFrameId()); - - assertEquals(false, frameDao.updateFrameState(f, FrameState.RUNNING)); - } - - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameStarted() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame fd = frameDao.getDispatchFrame(frame.getId()); - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.allocationId; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - assertEquals(FrameState.WAITING, frame.state); - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - frameDao.updateFrameStarted(proc, fd); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameStopped() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame fd = frameDao.getDispatchFrame(frame.getId()); - - assertEquals("0001-pass_1_preprocess",frame.getName()); - assertEquals(FrameState.WAITING,frame.state); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.allocationId; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - - frameDao.updateFrameStarted(proc, fd); - - try { - Thread.sleep(1001); - } catch (InterruptedException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - - DispatchFrame fd2 = frameDao.getDispatchFrame(frame.getId()); - assertTrue(frameDao.updateFrameStopped(fd2, FrameState.DEAD, 1, 1000l)); - - assertEquals(FrameState.DEAD.toString(),jdbcTemplate.queryForObject( - "SELECT str_state FROM frame WHERE pk_frame=?", - String.class, frame.getFrameId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameFixed() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame fd = frameDao.getDispatchFrame(frame.getId()); - - assertEquals("0001-pass_1_preprocess",frame.getName()); - assertEquals(FrameState.WAITING,frame.state); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.allocationId; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - frameDao.updateFrameStarted(proc, fd); - - try { - Thread.sleep(1001); - } catch (InterruptedException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - frameDao.updateFrameState(frame, FrameState.WAITING); - frameDao.updateFrameFixed(proc, frame); - - assertEquals(FrameState.RUNNING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM frame WHERE pk_frame=?", - String.class, frame.getFrameId())); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDispatchFrame() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.allocationId; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - - DispatchFrame dframe = frameDao.getDispatchFrame(frame.id); - assertEquals(dframe.id, frame.id); - } - - @Test - @Transactional - @Rollback(true) - public void testMarkFrameAsWaiting() { - JobDetail job = launchJob(); - - FrameInterface f = frameDao.findFrameDetail(job, "0001-pass_1"); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_depend_count FROM frame WHERE pk_frame=?", - Integer.class, f.getFrameId())); - - frameDao.markFrameAsWaiting(f); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_depend_count FROM frame WHERE pk_frame=?", - Integer.class, f.getFrameId())); - } - - @Test - @Transactional - @Rollback(true) - public void testMarkFrameAsDepend() { - JobDetail job = launchJob(); - - FrameInterface f = frameDao.findFrameDetail(job, "0001-pass_1"); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_depend_count FROM frame WHERE pk_frame=?", - Integer.class, f.getFrameId())); - - assertTrue(jdbcTemplate.queryForObject( - "SELECT b_active FROM depend WHERE pk_layer_depend_er=?", - Boolean.class, f.getLayerId())); - - frameDao.markFrameAsWaiting(f); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_depend_count FROM frame WHERE pk_frame=?", - Integer.class, f.getFrameId())); - - /* - * Need to grab new version of frame - * object once the state has changed. - */ - f = frameDao.findFrameDetail(job, "0001-pass_1"); - - frameDao.markFrameAsDepend(f); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_depend_count FROM frame WHERE pk_frame=?", - Integer.class, f.getFrameId())); - } - - @Test(expected=org.springframework.dao.EmptyResultDataAccessException.class) - @Transactional - @Rollback(true) - public void testFindLongestFrame() { - JobDetail job = launchJob(); - frameDao.findLongestFrame(job); - } - - @Test(expected=org.springframework.dao.EmptyResultDataAccessException.class) - @Transactional - @Rollback(true) - public void testFindShortestFrame() { - JobDetail job = launchJob(); - frameDao.findShortestFrame(job); - } - - @Test(expected=org.springframework.dao.EmptyResultDataAccessException.class) - @Transactional - @Rollback(true) - public void findHighestMemoryFrame() { - JobDetail job = launchJob(); - frameDao.findHighestMemoryFrame(job); - } - - @Test(expected=org.springframework.dao.EmptyResultDataAccessException.class) - @Transactional - @Rollback(true) - public void findLowestMemoryFrame() { - JobDetail job = launchJob(); - frameDao.findLowestMemoryFrame(job); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDependentFrames() { - JobDetail job = launchJob(); - FrameInterface frame_a = frameDao.findFrame(job, "0001-pass_1"); - FrameInterface frame_b = frameDao.findFrame(job, "0002-pass_1"); - - dependManager.createDepend(new FrameOnFrame( - frame_a, frame_b)); - - assertEquals(1, frameDao.getDependentFrames( - dependManager.getWhatDependsOn(frame_b).get(0)).size(),1); - } - - @Test - @Transactional - @Rollback(true) - public void testGetResourceUsage() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.allocationId; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - - DispatchFrame dframe = frameDao.getDispatchFrame(frame.id); - frameDao.getResourceUsage(dframe); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameCleared() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.allocationId; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - - /* - * Only frames without active procs can be cleared. - */ - - DispatchFrame dframe = frameDao.getDispatchFrame(frame.id); - assertFalse(frameDao.updateFrameCleared(dframe)); - - dispatchSupport.unbookProc(proc); - assertTrue(frameDao.updateFrameCleared(dframe)); - - } - - @Test - @Transactional - @Rollback(true) - public void testGetStaleCheckpoints() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - assertEquals(0, frameDao.getStaleCheckpoints(300).size()); - jdbcTemplate.update("UPDATE frame SET str_state = ?, " + - "ts_stopped = current_timestamp - interval '400' second WHERE pk_frame = ?", - FrameState.CHECKPOINT.toString(), frame.getFrameId()); - assertEquals(1, frameDao.getStaleCheckpoints(300).size()); - } - - @Test - @Transactional - @Rollback(true) - public void testSetCheckpointState() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - frameDao.updateFrameCheckpointState(frame, CheckpointState.ENABLED); - - String state = jdbcTemplate.queryForObject( - "SELECT str_checkpoint_state FROM frame WHERE pk_frame=?", - String.class, frame.getFrameId()); - - assertEquals(CheckpointState.ENABLED.toString(), state); - - /** - * To set a checkpoint complete the frame state must be in the checkpoint state. - */ - frameDao.updateFrameState(frame, FrameState.CHECKPOINT); - jdbcTemplate.update( - "UPDATE frame SET ts_started=current_timestamp, ts_stopped=current_timestamp + INTERVAL '20' second WHERE pk_frame=?", - frame.getFrameId()); - - assertTrue(frameDao.updateFrameCheckpointState(frame, CheckpointState.COMPLETE)); - Map result = jdbcTemplate.queryForMap( - "SELECT int_checkpoint_count FROM frame WHERE pk_frame=?", - frame.getFrameId()); - - Integer checkPointCount = (Integer) result.get("int_checkpoint_count"); - assertEquals(1, checkPointCount.intValue()); - } - - @Test - @Transactional - @Rollback(true) - public void testIsFrameComplete() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - frameDao.updateFrameState(frame, FrameState.EATEN); - assertTrue(frameDao.isFrameComplete(frame)); - - frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - frameDao.updateFrameState(frame, FrameState.SUCCEEDED); - assertTrue(frameDao.isFrameComplete(frame)); - - frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - frameDao.updateFrameState(frame, FrameState.WAITING); - assertFalse(frameDao.isFrameComplete(frame)); - } - - private FrameStateDisplayOverride createFrameStateDisplayOverride(FrameState state, String text, - int red, int green, int blue) { - FrameStateDisplayOverride override = FrameStateDisplayOverride.newBuilder() - .setState(state) - .setText(text) - .setColor(FrameStateDisplayOverride.RGB.newBuilder() - .setRed(red) - .setGreen(green) - .setBlue(blue) - .build()) - .build(); - - return override; - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameOverride() { - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - // Create override - FrameStateDisplayOverride override = createFrameStateDisplayOverride(FrameState.SUCCEEDED, - "FINISHED", 200, 200, 123); - frameDao.setFrameStateDisplayOverride(frame.getFrameId(), override); - FrameStateDisplayOverrideSeq results = frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); - assertEquals(1, results.getOverridesCount()); - assertEquals(override, results.getOverridesList().get(0)); - - // Try to update override - FrameStateDisplayOverride overrideUpdate = createFrameStateDisplayOverride(FrameState.SUCCEEDED, - "DONE", 100, 100, 100); - frameDao.updateFrameStateDisplayOverride(frame.getFrameId(), overrideUpdate); - results = frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); - assertEquals(1, results.getOverridesCount()); - assertEquals(overrideUpdate, results.getOverridesList().get(0)); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class FrameDaoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + FrameDao frameDao; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + HostDao hostDao; + + @Resource + ProcDao procDao; + + @Resource + HostManager hostManager; + + @Resource + DependManager dependManager; + + @Resource + DispatchSupport dispatchSupport; + + @Resource + FrameSearchFactory frameSearchFactory; + + private static final String HOST = "beta"; + + public DispatchHost createHost() { + return hostDao.findDispatchHost(HOST); + } + + @BeforeTransaction + public void create() { + + RenderHost host = RenderHost.newBuilder().setName(HOST).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) + .setNumProcs(1).setCoresPerProc(100).addAllTags(ImmutableList.of("mcore", "4core", "8g")) + .setState(HardwareState.UP).setFacility("spi").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); + + hostManager.createHost(host); + } + + @AfterTransaction + public void destroy() { + jdbcTemplate.update("DELETE FROM host WHERE str_name=?", HOST); + } + + public JobDetail launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFrameDetail() { + JobDetail job = launchJob(); + FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); + FrameDetail frame = frameDao.getFrameDetail(f); + frame = frameDao.getFrameDetail(f.getFrameId()); + assertEquals("0001-pass_1", frame.name); + } + + @Test + @Transactional + @Rollback(true) + public void testFindFrameDetail() { + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + assertEquals("0001-pass_1", frame.name); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFrame() { + JobDetail job = launchJob(); + FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); + FrameInterface frame = frameDao.getFrame(f.getFrameId()); + assertEquals("0001-pass_1", frame.getName()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFrameByLayer() { + JobDetail job = launchJob(); + FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); + FrameInterface f2 = frameDao.findFrame((LayerInterface) f, 1); + + assertEquals(f.getFrameId(), f2.getFrameId()); + assertEquals(f.getLayerId(), f2.getLayerId()); + assertEquals(f.getJobId(), f2.getJobId()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindFrame() { + JobDetail job = launchJob(); + FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); + assertEquals(f.getName(), "0001-pass_1"); + } + + @Test + @Transactional + @Rollback(true) + public void testFindFrames() { + JobDetail job = launchJob(); + FrameSearchInterface r = frameSearchFactory.create(job); + FrameSearchCriteria criteria = r.getCriteria(); + r.setCriteria(criteria.toBuilder().addFrames("0001-pass_1").build()); + assertEquals(1, frameDao.findFrames(r).size()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindFrameDetails() { + JobDetail job = launchJob(); + FrameSearchInterface r = frameSearchFactory.create(job); + FrameSearchCriteria criteria = r.getCriteria(); + r.setCriteria(criteria.toBuilder().addFrames("0001-pass_1").build()); + assertEquals(1, frameDao.findFrameDetails(r).size()); + } + + @Test + @Transactional + @Rollback(true) + public void testgetOrphanedFrames() { + assertEquals(0, frameDao.getOrphanedFrames().size()); + + JobDetail job = launchJob(); + FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); + + /* + * Update the first frame to the orphan state, which is a frame that is in the running state, + * has no corresponding proc entry and has not been updated in the last 5 min. + */ + jdbcTemplate.update( + "UPDATE frame SET str_state = 'RUNNING', " + + "ts_updated = current_timestamp - interval '301' second WHERE pk_frame = ?", + f.getFrameId()); + + assertEquals(1, frameDao.getOrphanedFrames().size()); + assertTrue(frameDao.isOrphan(f)); + + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateFrameState() { + JobDetail job = launchJob(); + FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); + assertTrue(frameDao.updateFrameState(f, FrameState.RUNNING)); + + assertEquals(FrameState.RUNNING.toString(), jdbcTemplate.queryForObject( + "SELECT str_state FROM frame WHERE pk_frame=?", String.class, f.getFrameId())); + } + + @Test + @Transactional + @Rollback(true) + public void testFailUpdateFrameState() { + JobDetail job = launchJob(); + FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); + + /** Change the version so the update fails **/ + jdbcTemplate.update("UPDATE frame SET int_version = int_version + 1 WHERE pk_frame=?", + + f.getFrameId()); + + assertEquals(false, frameDao.updateFrameState(f, FrameState.RUNNING)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateFrameStarted() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + DispatchFrame fd = frameDao.getDispatchFrame(frame.getId()); + VirtualProc proc = new VirtualProc(); + proc.allocationId = host.allocationId; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + assertEquals(FrameState.WAITING, frame.state); + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + frameDao.updateFrameStarted(proc, fd); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateFrameStopped() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + DispatchFrame fd = frameDao.getDispatchFrame(frame.getId()); + + assertEquals("0001-pass_1_preprocess", frame.getName()); + assertEquals(FrameState.WAITING, frame.state); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = host.allocationId; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + + frameDao.updateFrameStarted(proc, fd); + + try { + Thread.sleep(1001); + } catch (InterruptedException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + + DispatchFrame fd2 = frameDao.getDispatchFrame(frame.getId()); + assertTrue(frameDao.updateFrameStopped(fd2, FrameState.DEAD, 1, 1000l)); + + assertEquals(FrameState.DEAD.toString(), jdbcTemplate.queryForObject( + "SELECT str_state FROM frame WHERE pk_frame=?", String.class, frame.getFrameId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateFrameFixed() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + DispatchFrame fd = frameDao.getDispatchFrame(frame.getId()); + + assertEquals("0001-pass_1_preprocess", frame.getName()); + assertEquals(FrameState.WAITING, frame.state); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = host.allocationId; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + frameDao.updateFrameStarted(proc, fd); + + try { + Thread.sleep(1001); + } catch (InterruptedException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + frameDao.updateFrameState(frame, FrameState.WAITING); + frameDao.updateFrameFixed(proc, frame); + + assertEquals(FrameState.RUNNING.toString(), jdbcTemplate.queryForObject( + "SELECT str_state FROM frame WHERE pk_frame=?", String.class, frame.getFrameId())); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDispatchFrame() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = host.allocationId; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + + DispatchFrame dframe = frameDao.getDispatchFrame(frame.id); + assertEquals(dframe.id, frame.id); + } + + @Test + @Transactional + @Rollback(true) + public void testMarkFrameAsWaiting() { + JobDetail job = launchJob(); + + FrameInterface f = frameDao.findFrameDetail(job, "0001-pass_1"); + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT int_depend_count FROM frame WHERE pk_frame=?", Integer.class, f.getFrameId())); + + frameDao.markFrameAsWaiting(f); + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT int_depend_count FROM frame WHERE pk_frame=?", Integer.class, f.getFrameId())); + } + + @Test + @Transactional + @Rollback(true) + public void testMarkFrameAsDepend() { + JobDetail job = launchJob(); + + FrameInterface f = frameDao.findFrameDetail(job, "0001-pass_1"); + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT int_depend_count FROM frame WHERE pk_frame=?", Integer.class, f.getFrameId())); + + assertTrue(jdbcTemplate.queryForObject("SELECT b_active FROM depend WHERE pk_layer_depend_er=?", + Boolean.class, f.getLayerId())); + + frameDao.markFrameAsWaiting(f); + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT int_depend_count FROM frame WHERE pk_frame=?", Integer.class, f.getFrameId())); + + /* + * Need to grab new version of frame object once the state has changed. + */ + f = frameDao.findFrameDetail(job, "0001-pass_1"); + + frameDao.markFrameAsDepend(f); + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT int_depend_count FROM frame WHERE pk_frame=?", Integer.class, f.getFrameId())); + } + + @Test(expected = org.springframework.dao.EmptyResultDataAccessException.class) + @Transactional + @Rollback(true) + public void testFindLongestFrame() { + JobDetail job = launchJob(); + frameDao.findLongestFrame(job); + } + + @Test(expected = org.springframework.dao.EmptyResultDataAccessException.class) + @Transactional + @Rollback(true) + public void testFindShortestFrame() { + JobDetail job = launchJob(); + frameDao.findShortestFrame(job); + } + + @Test(expected = org.springframework.dao.EmptyResultDataAccessException.class) + @Transactional + @Rollback(true) + public void findHighestMemoryFrame() { + JobDetail job = launchJob(); + frameDao.findHighestMemoryFrame(job); + } + + @Test(expected = org.springframework.dao.EmptyResultDataAccessException.class) + @Transactional + @Rollback(true) + public void findLowestMemoryFrame() { + JobDetail job = launchJob(); + frameDao.findLowestMemoryFrame(job); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDependentFrames() { + JobDetail job = launchJob(); + FrameInterface frame_a = frameDao.findFrame(job, "0001-pass_1"); + FrameInterface frame_b = frameDao.findFrame(job, "0002-pass_1"); + + dependManager.createDepend(new FrameOnFrame(frame_a, frame_b)); + + assertEquals(1, + frameDao.getDependentFrames(dependManager.getWhatDependsOn(frame_b).get(0)).size(), 1); + } + + @Test + @Transactional + @Rollback(true) + public void testGetResourceUsage() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = host.allocationId; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + + DispatchFrame dframe = frameDao.getDispatchFrame(frame.id); + frameDao.getResourceUsage(dframe); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateFrameCleared() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = host.allocationId; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + + /* + * Only frames without active procs can be cleared. + */ + + DispatchFrame dframe = frameDao.getDispatchFrame(frame.id); + assertFalse(frameDao.updateFrameCleared(dframe)); + + dispatchSupport.unbookProc(proc); + assertTrue(frameDao.updateFrameCleared(dframe)); + + } + + @Test + @Transactional + @Rollback(true) + public void testGetStaleCheckpoints() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + assertEquals(0, frameDao.getStaleCheckpoints(300).size()); + jdbcTemplate.update( + "UPDATE frame SET str_state = ?, " + + "ts_stopped = current_timestamp - interval '400' second WHERE pk_frame = ?", + FrameState.CHECKPOINT.toString(), frame.getFrameId()); + assertEquals(1, frameDao.getStaleCheckpoints(300).size()); + } + + @Test + @Transactional + @Rollback(true) + public void testSetCheckpointState() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + frameDao.updateFrameCheckpointState(frame, CheckpointState.ENABLED); + + String state = + jdbcTemplate.queryForObject("SELECT str_checkpoint_state FROM frame WHERE pk_frame=?", + String.class, frame.getFrameId()); + + assertEquals(CheckpointState.ENABLED.toString(), state); + + /** + * To set a checkpoint complete the frame state must be in the checkpoint state. + */ + frameDao.updateFrameState(frame, FrameState.CHECKPOINT); + jdbcTemplate.update( + "UPDATE frame SET ts_started=current_timestamp, ts_stopped=current_timestamp + INTERVAL '20' second WHERE pk_frame=?", + frame.getFrameId()); + + assertTrue(frameDao.updateFrameCheckpointState(frame, CheckpointState.COMPLETE)); + Map result = jdbcTemplate + .queryForMap("SELECT int_checkpoint_count FROM frame WHERE pk_frame=?", frame.getFrameId()); + + Integer checkPointCount = (Integer) result.get("int_checkpoint_count"); + assertEquals(1, checkPointCount.intValue()); + } + + @Test + @Transactional + @Rollback(true) + public void testIsFrameComplete() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + frameDao.updateFrameState(frame, FrameState.EATEN); + assertTrue(frameDao.isFrameComplete(frame)); + + frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + frameDao.updateFrameState(frame, FrameState.SUCCEEDED); + assertTrue(frameDao.isFrameComplete(frame)); + + frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + frameDao.updateFrameState(frame, FrameState.WAITING); + assertFalse(frameDao.isFrameComplete(frame)); + } + + private FrameStateDisplayOverride createFrameStateDisplayOverride(FrameState state, String text, + int red, int green, int blue) { + FrameStateDisplayOverride override = FrameStateDisplayOverride.newBuilder().setState(state) + .setText(text).setColor(FrameStateDisplayOverride.RGB.newBuilder().setRed(red) + .setGreen(green).setBlue(blue).build()) + .build(); + + return override; + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateFrameOverride() { + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + // Create override + FrameStateDisplayOverride override = + createFrameStateDisplayOverride(FrameState.SUCCEEDED, "FINISHED", 200, 200, 123); + frameDao.setFrameStateDisplayOverride(frame.getFrameId(), override); + FrameStateDisplayOverrideSeq results = + frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); + assertEquals(1, results.getOverridesCount()); + assertEquals(override, results.getOverridesList().get(0)); + + // Try to update override + FrameStateDisplayOverride overrideUpdate = + createFrameStateDisplayOverride(FrameState.SUCCEEDED, "DONE", 100, 100, 100); + frameDao.updateFrameStateDisplayOverride(frame.getFrameId(), overrideUpdate); + results = frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); + assertEquals(1, results.getOverridesCount()); + assertEquals(overrideUpdate, results.getOverridesList().get(0)); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/GroupDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/GroupDaoTests.java index 8700204d9..8db486696 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/GroupDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/GroupDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import java.io.File; @@ -51,408 +47,402 @@ import static org.junit.Assert.assertTrue; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class GroupDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - GroupDao groupDao; - - @Resource - ShowDao showDao; - - @Resource - DepartmentDao departmentDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Before - public void before() { - jobLauncher.testMode = true; +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class GroupDaoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + GroupDao groupDao; + + @Resource + ShowDao showDao; + + @Resource + DepartmentDao departmentDao; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Before + public void before() { + jobLauncher.testMode = true; + } + + public ShowInterface getShow() { + return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); + } + + public JobDetail launchJob() { + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + } + + public GroupDetail createGroup() { + GroupDetail group = new GroupDetail(); + group.name = "Shit"; + group.parentId = groupDao.getRootGroupId(getShow()); + group.showId = getShow().getId(); + group.deptId = departmentDao.getDefaultDepartment().getId(); + groupDao.insertGroup(group, groupDao.getRootGroupDetail(getShow())); + return group; + } + + public GroupDetail createSubGroup(GroupDetail parent) { + GroupDetail group = new GroupDetail(); + group.name = "SubShit"; + group.parentId = parent.id; + group.showId = getShow().getId(); + group.deptId = departmentDao.getDefaultDepartment().getId(); + groupDao.insertGroup(group, groupDao.getGroup(parent.id)); + return group; + } + + @Test + @Transactional + @Rollback(true) + public void testGetGroup() { + GroupDetail group = createGroup(); + GroupInterface g = groupDao.getGroup(group.id); + assertEquals(group.id, g.getGroupId()); + assertEquals(group.id, g.getId()); + assertEquals(group.name, g.getName()); + assertEquals(group.showId, g.getShowId()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetGroups() { + GroupDetail group = createGroup(); + List l = new ArrayList(); + l.add(group.id); + List g = groupDao.getGroups(l); + assertEquals(1, g.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetRootGroupId() { + groupDao.getRootGroupId(getShow()); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertGroup() { + GroupDetail group = createGroup(); + assertFalse(group.isNew()); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertGroupAlternateMethod() { + GroupDetail group = new GroupDetail(); + group.name = "Shit"; + group.parentId = groupDao.getRootGroupId(getShow()); + group.showId = getShow().getId(); + group.deptId = departmentDao.getDefaultDepartment().getId(); + groupDao.insertGroup(group); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteGroup() { + // Can't delete groups yet, will fail + GroupDetail group = createGroup(); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM folder WHERE pk_folder=?", Integer.class, group.getId())); + + groupDao.deleteGroup(group); + + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM folder WHERE pk_folder=?", Integer.class, group.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateGroupParent() { + GroupDetail group = createGroup(); + GroupDetail subgroup = createSubGroup(group); + groupDao.updateGroupParent(subgroup, + groupDao.getGroupDetail(groupDao.getRootGroupId(getShow()))); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT int_level FROM folder_level WHERE pk_folder=?", Integer.class, subgroup.getId())); + + groupDao.updateGroupParent(subgroup, group); + + assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( + "SELECT int_level FROM folder_level WHERE pk_folder=?", Integer.class, subgroup.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateDefaultJobMaxCores() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject("SELECT int_job_max_cores FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateDefaultJobMaxCores(group, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject("SELECT int_job_max_cores FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateDefaultJobMaxCores(group, -1); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject("SELECT int_job_max_cores FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateDefaultJobMinCores() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject("SELECT int_job_min_cores FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateDefaultJobMinCores(group, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject("SELECT int_job_min_cores FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateDefaultJobMaxGpus() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject("SELECT int_job_max_gpus FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateDefaultJobMaxGpus(group, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject("SELECT int_job_max_gpus FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateDefaultJobMaxGpus(group, -1); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject("SELECT int_job_max_gpus FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateDefaultJobMinGpus() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject("SELECT int_job_min_gpus FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateDefaultJobMinGpus(group, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject("SELECT int_job_min_gpus FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateDefaultJobPriority() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject("SELECT int_job_priority FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateDefaultJobPriority(group, 1000); + assertEquals(Integer.valueOf(1000), + jdbcTemplate.queryForObject("SELECT int_job_priority FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateMinCores() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject("SELECT int_min_cores FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateMinCores(group, 10); + assertEquals(Integer.valueOf(10), + jdbcTemplate.queryForObject("SELECT int_min_cores FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateMaxCores() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject("SELECT int_max_cores FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateMaxCores(group, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject("SELECT int_max_cores FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateMaxCores(group, -5); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject("SELECT int_max_cores FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateMinGpus() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject("SELECT int_min_gpus FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateMinGpus(group, 10); + assertEquals(Integer.valueOf(10), + jdbcTemplate.queryForObject("SELECT int_min_gpus FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateMaxGpus() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject("SELECT int_max_gpus FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateMaxGpus(group, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject("SELECT int_max_gpus FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateMaxGpus(group, -5); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject("SELECT int_max_gpus FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + } + + @Test + @Transactional + @Rollback(true) + public void testIsManaged() { + GroupDetail group = createGroup(); + assertEquals(false, groupDao.isManaged(group)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateName() { + GroupDetail group = createGroup(); + groupDao.updateName(group, "NewName"); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateDepartment() { + GroupDetail group = createGroup(); + groupDao.updateDepartment(group, departmentDao.findDepartment("Lighting")); + } + + @Test + @Transactional + @Rollback(true) + public void testGetGroupDetail() { + GroupDetail group = createGroup(); + GroupDetail group2 = groupDao.getGroupDetail(group.id); + } + + @Test + @Transactional + @Rollback(true) + public void testGetChildrenRecursive() { + boolean is_test2 = false; + boolean is_test3 = false; + + GroupDetail g1 = new GroupDetail(); + g1.name = "Test1"; + g1.showId = getShow().getId(); + g1.deptId = departmentDao.getDefaultDepartment().getId(); + groupDao.insertGroup(g1, groupDao.getRootGroupDetail(getShow())); + + GroupDetail g2 = new GroupDetail(); + g2.name = "Test2"; + g2.showId = getShow().getId(); + g2.deptId = departmentDao.getDefaultDepartment().getId(); + groupDao.insertGroup(g2, groupDao.getRootGroupDetail(getShow())); + + for (GroupInterface g : groupDao + .getChildrenRecursive(groupDao.getGroup("A0000000-0000-0000-0000-000000000000"))) { + if (g.getName().equals("Test1")) { + is_test2 = true; + } + if (g.getName().equals("Test2")) { + is_test3 = true; + } } - - public ShowInterface getShow() { - return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); + assertTrue(is_test2); + assertTrue(is_test3); + } + + @Test + @Transactional + @Rollback(true) + public void testGetChildren() { + boolean is_testuserA = false; + boolean is_testuserB = false; + + GroupDetail g1 = new GroupDetail(); + g1.name = "testuserA"; + g1.showId = getShow().getId(); + g1.deptId = departmentDao.getDefaultDepartment().getId(); + groupDao.insertGroup(g1, groupDao.getRootGroupDetail(getShow())); + + GroupDetail g2 = new GroupDetail(); + g2.name = "testuserB"; + g2.showId = getShow().getId(); + g2.deptId = departmentDao.getDefaultDepartment().getId(); + groupDao.insertGroup(g2, groupDao.getRootGroupDetail(getShow())); + + List groups = + groupDao.getChildren(groupDao.getGroup("A0000000-0000-0000-0000-000000000000")); + for (GroupInterface g : groups) { + if (g.getName().equals("testuserA")) { + is_testuserA = true; + } + if (g.getName().equals("testuserB")) { + is_testuserB = true; + } } + assertTrue(is_testuserA); + assertTrue(is_testuserB); + } - public JobDetail launchJob() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } + @Test + @Transactional + @Rollback(true) + public void testIsOverMinCores() { - public GroupDetail createGroup() { - GroupDetail group = new GroupDetail(); - group.name = "Shit"; - group.parentId = groupDao.getRootGroupId(getShow()); - group.showId = getShow().getId(); - group.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(group, groupDao.getRootGroupDetail(getShow())); - return group; - } - - public GroupDetail createSubGroup(GroupDetail parent) { - GroupDetail group = new GroupDetail(); - group.name = "SubShit"; - group.parentId = parent.id; - group.showId = getShow().getId(); - group.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(group, groupDao.getGroup(parent.id)); - return group; - } + JobDetail job = launchJob(); + assertFalse(groupDao.isOverMinCores(job)); - @Test - @Transactional - @Rollback(true) - public void testGetGroup() { - GroupDetail group = createGroup(); - GroupInterface g = groupDao.getGroup(group.id); - assertEquals(group.id,g.getGroupId()); - assertEquals(group.id,g.getId()); - assertEquals(group.name, g.getName()); - assertEquals(group.showId, g.getShowId()); - } - - - @Test - @Transactional - @Rollback(true) - public void testGetGroups() { - GroupDetail group = createGroup(); - List l = new ArrayList(); - l.add(group.id); - List g = groupDao.getGroups(l); - assertEquals(1, g.size()); - } + String groupid = jdbcTemplate.queryForObject("SELECT pk_folder FROM job WHERE pk_job=?", + String.class, job.getJobId()); - @Test - @Transactional - @Rollback(true) - public void testGetRootGroupId() { - groupDao.getRootGroupId(getShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertGroup() { - GroupDetail group = createGroup(); - assertFalse(group.isNew()); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertGroupAlternateMethod() { - GroupDetail group = new GroupDetail(); - group.name = "Shit"; - group.parentId = groupDao.getRootGroupId(getShow()); - group.showId = getShow().getId(); - group.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(group); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteGroup() { - // Can't delete groups yet, will fail - GroupDetail group = createGroup(); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM folder WHERE pk_folder=?", - Integer.class, group.getId())); - - groupDao.deleteGroup(group); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM folder WHERE pk_folder=?", - Integer.class, group.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateGroupParent() { - GroupDetail group = createGroup(); - GroupDetail subgroup = createSubGroup(group); - groupDao.updateGroupParent(subgroup, - groupDao.getGroupDetail( - groupDao.getRootGroupId(getShow()))); - - assertEquals(Integer.valueOf(1),jdbcTemplate.queryForObject( - "SELECT int_level FROM folder_level WHERE pk_folder=?", - Integer.class, subgroup.getId())); - - groupDao.updateGroupParent(subgroup, group); - - assertEquals(Integer.valueOf(2),jdbcTemplate.queryForObject( - "SELECT int_level FROM folder_level WHERE pk_folder=?", - Integer.class, subgroup.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateDefaultJobMaxCores() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( - "SELECT int_job_max_cores FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateDefaultJobMaxCores(group, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_job_max_cores FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateDefaultJobMaxCores(group, -1); - assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( - "SELECT int_job_max_cores FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateDefaultJobMinCores() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( - "SELECT int_job_min_cores FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateDefaultJobMinCores(group, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_job_min_cores FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - } + // Now update some values so it returns true. + jdbcTemplate.update( + "UPDATE folder_resource SET int_cores = int_min_cores + 1 WHERE pk_folder=?", groupid); - @Test - @Transactional - @Rollback(true) - public void testUpdateDefaultJobMaxGpus() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( - "SELECT int_job_max_gpus FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateDefaultJobMaxGpus(group, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_job_max_gpus FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateDefaultJobMaxGpus(group, -1); - assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( - "SELECT int_job_max_gpus FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateDefaultJobMinGpus() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( - "SELECT int_job_min_gpus FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateDefaultJobMinGpus(group, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_job_min_gpus FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateDefaultJobPriority() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( - "SELECT int_job_priority FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateDefaultJobPriority(group, 1000); - assertEquals(Integer.valueOf(1000), jdbcTemplate.queryForObject( - "SELECT int_job_priority FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateMinCores() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateMinCores(group, 10); - assertEquals(Integer.valueOf(10), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateMaxCores() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( - "SELECT int_max_cores FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateMaxCores(group, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_max_cores FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateMaxCores(group, -5); - assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( - "SELECT int_max_cores FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateMinGpus() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_min_gpus FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateMinGpus(group, 10); - assertEquals(Integer.valueOf(10), jdbcTemplate.queryForObject( - "SELECT int_min_gpus FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateMaxGpus() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( - "SELECT int_max_gpus FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateMaxGpus(group, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_max_gpus FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateMaxGpus(group, -5); - assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( - "SELECT int_max_gpus FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - } - - @Test - @Transactional - @Rollback(true) - public void testIsManaged() { - GroupDetail group = createGroup(); - assertEquals(false, groupDao.isManaged(group)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateName() { - GroupDetail group = createGroup(); - groupDao.updateName(group, "NewName"); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateDepartment() { - GroupDetail group = createGroup(); - groupDao.updateDepartment(group, departmentDao.findDepartment("Lighting")); - } - - @Test - @Transactional - @Rollback(true) - public void testGetGroupDetail() { - GroupDetail group = createGroup(); - GroupDetail group2 = groupDao.getGroupDetail(group.id); - } - - @Test - @Transactional - @Rollback(true) - public void testGetChildrenRecursive() { - boolean is_test2 = false; - boolean is_test3 = false; - - GroupDetail g1 = new GroupDetail(); - g1.name = "Test1"; - g1.showId = getShow().getId(); - g1.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(g1, groupDao.getRootGroupDetail(getShow())); - - GroupDetail g2 = new GroupDetail(); - g2.name = "Test2"; - g2.showId = getShow().getId(); - g2.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(g2, groupDao.getRootGroupDetail(getShow())); - - for ( GroupInterface g: groupDao.getChildrenRecursive(groupDao.getGroup("A0000000-0000-0000-0000-000000000000"))) { - if (g.getName().equals("Test1")) { - is_test2 = true; - } - if (g.getName().equals("Test2")) { - is_test3 = true; - } - } - assertTrue(is_test2); - assertTrue(is_test3); - } - - @Test - @Transactional - @Rollback(true) - public void testGetChildren() { - boolean is_testuserA = false; - boolean is_testuserB = false; - - GroupDetail g1 = new GroupDetail(); - g1.name = "testuserA"; - g1.showId = getShow().getId(); - g1.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(g1, groupDao.getRootGroupDetail(getShow())); - - GroupDetail g2 = new GroupDetail(); - g2.name = "testuserB"; - g2.showId = getShow().getId(); - g2.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(g2, groupDao.getRootGroupDetail(getShow())); - - List groups = groupDao.getChildren(groupDao.getGroup("A0000000-0000-0000-0000-000000000000")); - for (GroupInterface g : groups) { - if (g.getName().equals("testuserA")) { - is_testuserA = true; - } - if (g.getName().equals("testuserB")) { - is_testuserB = true; - } - } - assertTrue(is_testuserA); - assertTrue(is_testuserB); - } - - @Test - @Transactional - @Rollback(true) - public void testIsOverMinCores() { - - JobDetail job = launchJob(); - assertFalse(groupDao.isOverMinCores(job)); - - String groupid = jdbcTemplate.queryForObject("SELECT pk_folder FROM job WHERE pk_job=?", - String.class, job.getJobId()); - - // Now update some values so it returns true. - jdbcTemplate.update("UPDATE folder_resource SET int_cores = int_min_cores + 1 WHERE pk_folder=?", - groupid); - - assertTrue(groupDao.isOverMinCores(job)); - } + assertTrue(groupDao.isOverMinCores(job)); + } } - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HistoricalDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HistoricalDaoTests.java index 566986ab6..30b8f1406 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HistoricalDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HistoricalDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import java.io.File; @@ -41,42 +37,39 @@ import static org.junit.Assert.assertEquals; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class HistoricalDaoTests extends - AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - private JobManager jobManager; - - @Resource - private JobLauncher jobLauncher; - - @Resource - private HistoricalDao historicalDao; - - @Test - @Transactional - @Rollback(true) - public void testGetFinishedJobs() { - historicalDao.getFinishedJobs(24); - } - - @Test - @Transactional - @Rollback(true) - public void testTransferJob() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail j = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - jobManager.shutdownJob(j); - historicalDao.transferJob(j); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM job_history WHERE pk_job=?", - Integer.class, j.getJobId())); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class HistoricalDaoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + private JobManager jobManager; + + @Resource + private JobLauncher jobLauncher; + + @Resource + private HistoricalDao historicalDao; + + @Test + @Transactional + @Rollback(true) + public void testGetFinishedJobs() { + historicalDao.getFinishedJobs(24); + } + + @Test + @Transactional + @Rollback(true) + public void testTransferJob() { + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail j = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + jobManager.shutdownJob(j); + historicalDao.transferJob(j); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM job_history WHERE pk_job=?", Integer.class, j.getJobId())); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HostDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HostDaoTests.java index 09b3d3ccd..5dca888c3 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HostDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HostDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import java.sql.Timestamp; @@ -58,574 +54,493 @@ import static org.junit.Assert.assertTrue; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class HostDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - private static final String TEST_HOST = "beta"; - - @Resource - protected AllocationDao allocationDao; - - @Resource - protected HostDao hostDao; - - @Resource - protected HostManager hostManager; - - @Resource - protected FacilityDao facilityDao; - - public HostDaoTests() { } - - // Hardcoded value of dispatcher.memory.mem_reserved_system - // to avoid having to read opencue.properties on a test setting - private final long MEM_RESERVED_SYSTEM = 524288; - - public static RenderHost buildRenderHost(String name) { - RenderHost host = RenderHost.newBuilder() - .setName(name) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(15290520) - .setFreeSwap((int) CueUtil.MB512) - .setLoad(1) - .setNimbyEnabled(false) - .setTotalMcp(CueUtil.GB4) - .setTotalMem((int) CueUtil.GB16) - .setTotalSwap((int) CueUtil.GB2) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(400) - .addAllTags(ImmutableList.of("linux", "64bit")) - .setState(HardwareState.UP) - .setFacility("spi") - .setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512) - .build(); - - return host; - } - - @Test - public void testInit() { } - - @BeforeTransaction - public void clear() { - jdbcTemplate.update( - "DELETE FROM host WHERE str_name=?", TEST_HOST); - } - - @AfterTransaction - public void destroy() { - jdbcTemplate.update( - "DELETE FROM host WHERE str_name=?", TEST_HOST); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHost() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - assertEquals(Long.valueOf(CueUtil.GB16 - this.MEM_RESERVED_SYSTEM), jdbcTemplate.queryForObject( - "SELECT int_mem FROM host WHERE str_name=?", - Long.class, TEST_HOST)); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostFQDN1() { - String TEST_HOST_NEW = "ice-ns1.yvr"; - String FQDN_HOST = TEST_HOST_NEW + ".spimageworks.com"; - hostDao.insertRenderHost(buildRenderHost(FQDN_HOST), - hostManager.getDefaultAllocationDetail(), - true); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); - assertEquals(TEST_HOST_NEW, hostDetail.name); - - HostInterface host = hostDao.findHost(FQDN_HOST); - HostEntity hostDetail2 = hostDao.getHostDetail(host); - assertEquals(TEST_HOST_NEW, hostDetail2.name); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostFQDN2() { - String TEST_HOST_NEW = "compile21"; - String FQDN_HOST = TEST_HOST_NEW + ".spimageworks.com"; - hostDao.insertRenderHost(buildRenderHost(FQDN_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); - assertEquals(TEST_HOST_NEW, hostDetail.name); - - HostInterface host = hostDao.findHost(FQDN_HOST); - HostEntity hostDetail2 = hostDao.getHostDetail(host); - assertEquals(TEST_HOST_NEW, hostDetail2.name); - - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostFQDN3() { - String TEST_HOST_NEW = "hostname"; - String FQDN_HOST = TEST_HOST_NEW + ".fake.co.uk"; - hostDao.insertRenderHost(buildRenderHost(FQDN_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); - assertEquals(TEST_HOST_NEW, hostDetail.name); - - HostInterface host = hostDao.findHost(FQDN_HOST); - HostEntity hostDetail2 = hostDao.getHostDetail(host); - assertEquals(TEST_HOST_NEW, hostDetail2.name); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostFQDN4() { - String TEST_HOST_NEW = "10.0.1.18"; - hostDao.insertRenderHost(buildRenderHost(TEST_HOST_NEW), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); - assertEquals(TEST_HOST_NEW, hostDetail.name); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostIPv61() { - String TEST_HOST_NEW = "::1"; - hostDao.insertRenderHost(buildRenderHost(TEST_HOST_NEW), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); - assertEquals(TEST_HOST_NEW, hostDetail.name); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostIPv62() { - String TEST_HOST_NEW = "ABCD:ABCD:ABCD:ABCD:ABCD:ABCD:ABCD:ABCD"; - hostDao.insertRenderHost(buildRenderHost(TEST_HOST_NEW), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); - assertEquals(TEST_HOST_NEW, hostDetail.name); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostIPv63() { - String TEST_HOST_NEW = "ABCD:ABCD:ABCD:ABCD:ABCD:ABCD:192.168.100.180"; - hostDao.insertRenderHost(buildRenderHost(TEST_HOST_NEW), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); - assertEquals(TEST_HOST_NEW, hostDetail.name); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostAlternateOS() { - - RenderHost host = buildRenderHost(TEST_HOST).toBuilder() - .putAttributes("SP_OS", "spinux1") - .build(); - - hostDao.insertRenderHost(host, - hostManager.getDefaultAllocationDetail(), - false); - - assertEquals("spinux1",jdbcTemplate.queryForObject( - "SELECT str_os FROM host_stat, host " + - "WHERE host.pk_host = host_stat.pk_host " + - "AND host.str_name=?",String.class, TEST_HOST), "spinux1"); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostDesktop() { - - RenderHost host = buildRenderHost(TEST_HOST); - hostDao.insertRenderHost(host, - hostManager.getDefaultAllocationDetail(), - false); - - assertEquals(Long.valueOf(CueUtil.GB16 - this.MEM_RESERVED_SYSTEM), jdbcTemplate.queryForObject( - "SELECT int_mem FROM host WHERE str_name=?", - Long.class, TEST_HOST)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateThreadMode() { - - RenderHost host = buildRenderHost(TEST_HOST); - host.toBuilder().setNimbyEnabled(true).build(); - hostDao.insertRenderHost(host, - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity d = hostDao.findHostDetail(TEST_HOST); - hostDao.updateThreadMode(d, ThreadMode.AUTO); - - assertEquals(Integer.valueOf(ThreadMode.AUTO_VALUE), jdbcTemplate.queryForObject( - "SELECT int_thread_mode FROM host WHERE pk_host=?", - Integer.class, d.id)); - - hostDao.updateThreadMode(d, ThreadMode.ALL); - - assertEquals(Integer.valueOf(ThreadMode.ALL_VALUE), jdbcTemplate.queryForObject( - "SELECT int_thread_mode FROM host WHERE pk_host=?", - Integer.class, d.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetHostDetail() { - - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity host = hostDao.findHostDetail(TEST_HOST); - hostDao.getHostDetail(host); - hostDao.getHostDetail(host.getHostId()); - } - - @Test - @Transactional - @Rollback(true) - public void testIsHostLocked() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity host = hostDao.findHostDetail(TEST_HOST); - assertEquals(hostDao.isHostLocked(host),false); - - hostDao.updateHostLock(host, LockState.LOCKED, new Source("TEST")); - assertEquals(hostDao.isHostLocked(host),true); +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class HostDaoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + private static final String TEST_HOST = "beta"; + + @Resource + protected AllocationDao allocationDao; + + @Resource + protected HostDao hostDao; + + @Resource + protected HostManager hostManager; + + @Resource + protected FacilityDao facilityDao; + + public HostDaoTests() {} + + // Hardcoded value of dispatcher.memory.mem_reserved_system + // to avoid having to read opencue.properties on a test setting + private final long MEM_RESERVED_SYSTEM = 524288; + + public static RenderHost buildRenderHost(String name) { + RenderHost host = RenderHost.newBuilder().setName(name).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(15290520).setFreeSwap((int) CueUtil.MB512).setLoad(1) + .setNimbyEnabled(false).setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16) + .setTotalSwap((int) CueUtil.GB2).setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(400) + .addAllTags(ImmutableList.of("linux", "64bit")).setState(HardwareState.UP) + .setFacility("spi").setFreeGpuMem((int) CueUtil.MB512).setTotalGpuMem((int) CueUtil.MB512) + .build(); + + return host; + } + + @Test + public void testInit() {} + + @BeforeTransaction + public void clear() { + jdbcTemplate.update("DELETE FROM host WHERE str_name=?", TEST_HOST); + } + + @AfterTransaction + public void destroy() { + jdbcTemplate.update("DELETE FROM host WHERE str_name=?", TEST_HOST); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHost() { + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), + false); + + assertEquals(Long.valueOf(CueUtil.GB16 - this.MEM_RESERVED_SYSTEM), jdbcTemplate + .queryForObject("SELECT int_mem FROM host WHERE str_name=?", Long.class, TEST_HOST)); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHostFQDN1() { + String TEST_HOST_NEW = "ice-ns1.yvr"; + String FQDN_HOST = TEST_HOST_NEW + ".spimageworks.com"; + hostDao.insertRenderHost(buildRenderHost(FQDN_HOST), hostManager.getDefaultAllocationDetail(), + true); + + HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); + assertEquals(TEST_HOST_NEW, hostDetail.name); + + HostInterface host = hostDao.findHost(FQDN_HOST); + HostEntity hostDetail2 = hostDao.getHostDetail(host); + assertEquals(TEST_HOST_NEW, hostDetail2.name); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHostFQDN2() { + String TEST_HOST_NEW = "compile21"; + String FQDN_HOST = TEST_HOST_NEW + ".spimageworks.com"; + hostDao.insertRenderHost(buildRenderHost(FQDN_HOST), hostManager.getDefaultAllocationDetail(), + false); + + HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); + assertEquals(TEST_HOST_NEW, hostDetail.name); + + HostInterface host = hostDao.findHost(FQDN_HOST); + HostEntity hostDetail2 = hostDao.getHostDetail(host); + assertEquals(TEST_HOST_NEW, hostDetail2.name); + + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHostFQDN3() { + String TEST_HOST_NEW = "hostname"; + String FQDN_HOST = TEST_HOST_NEW + ".fake.co.uk"; + hostDao.insertRenderHost(buildRenderHost(FQDN_HOST), hostManager.getDefaultAllocationDetail(), + false); + + HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); + assertEquals(TEST_HOST_NEW, hostDetail.name); + + HostInterface host = hostDao.findHost(FQDN_HOST); + HostEntity hostDetail2 = hostDao.getHostDetail(host); + assertEquals(TEST_HOST_NEW, hostDetail2.name); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHostFQDN4() { + String TEST_HOST_NEW = "10.0.1.18"; + hostDao.insertRenderHost(buildRenderHost(TEST_HOST_NEW), + hostManager.getDefaultAllocationDetail(), false); + + HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); + assertEquals(TEST_HOST_NEW, hostDetail.name); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHostIPv61() { + String TEST_HOST_NEW = "::1"; + hostDao.insertRenderHost(buildRenderHost(TEST_HOST_NEW), + hostManager.getDefaultAllocationDetail(), false); + + HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); + assertEquals(TEST_HOST_NEW, hostDetail.name); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHostIPv62() { + String TEST_HOST_NEW = "ABCD:ABCD:ABCD:ABCD:ABCD:ABCD:ABCD:ABCD"; + hostDao.insertRenderHost(buildRenderHost(TEST_HOST_NEW), + hostManager.getDefaultAllocationDetail(), false); + + HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); + assertEquals(TEST_HOST_NEW, hostDetail.name); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHostIPv63() { + String TEST_HOST_NEW = "ABCD:ABCD:ABCD:ABCD:ABCD:ABCD:192.168.100.180"; + hostDao.insertRenderHost(buildRenderHost(TEST_HOST_NEW), + hostManager.getDefaultAllocationDetail(), false); + + HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); + assertEquals(TEST_HOST_NEW, hostDetail.name); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHostAlternateOS() { + + RenderHost host = + buildRenderHost(TEST_HOST).toBuilder().putAttributes("SP_OS", "spinux1").build(); + + hostDao.insertRenderHost(host, hostManager.getDefaultAllocationDetail(), false); + + assertEquals("spinux1", + jdbcTemplate.queryForObject("SELECT str_os FROM host_stat, host " + + "WHERE host.pk_host = host_stat.pk_host " + "AND host.str_name=?", String.class, + TEST_HOST), + "spinux1"); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHostDesktop() { + + RenderHost host = buildRenderHost(TEST_HOST); + hostDao.insertRenderHost(host, hostManager.getDefaultAllocationDetail(), false); + + assertEquals(Long.valueOf(CueUtil.GB16 - this.MEM_RESERVED_SYSTEM), jdbcTemplate + .queryForObject("SELECT int_mem FROM host WHERE str_name=?", Long.class, TEST_HOST)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateThreadMode() { + + RenderHost host = buildRenderHost(TEST_HOST); + host.toBuilder().setNimbyEnabled(true).build(); + hostDao.insertRenderHost(host, hostManager.getDefaultAllocationDetail(), false); + + HostEntity d = hostDao.findHostDetail(TEST_HOST); + hostDao.updateThreadMode(d, ThreadMode.AUTO); + + assertEquals(Integer.valueOf(ThreadMode.AUTO_VALUE), jdbcTemplate + .queryForObject("SELECT int_thread_mode FROM host WHERE pk_host=?", Integer.class, d.id)); + + hostDao.updateThreadMode(d, ThreadMode.ALL); + + assertEquals(Integer.valueOf(ThreadMode.ALL_VALUE), jdbcTemplate + .queryForObject("SELECT int_thread_mode FROM host WHERE pk_host=?", Integer.class, d.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetHostDetail() { + + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), + false); + + HostEntity host = hostDao.findHostDetail(TEST_HOST); + hostDao.getHostDetail(host); + hostDao.getHostDetail(host.getHostId()); + } + + @Test + @Transactional + @Rollback(true) + public void testIsHostLocked() { + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), + false); + + HostEntity host = hostDao.findHostDetail(TEST_HOST); + assertEquals(hostDao.isHostLocked(host), false); + + hostDao.updateHostLock(host, LockState.LOCKED, new Source("TEST")); + assertEquals(hostDao.isHostLocked(host), true); + } + + @Test + @Transactional + @Rollback(true) + public void testIsHostUp() { + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), + false); + + assertTrue(hostDao.isHostUp(hostDao.findHostDetail(TEST_HOST))); + + hostDao.updateHostState(hostDao.findHostDetail(TEST_HOST), HardwareState.DOWN); + assertFalse(hostDao.isHostUp(hostDao.findHostDetail(TEST_HOST))); + } + + @Test + @Transactional + @Rollback(true) + public void testHostExists() { + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), + false); + + assertEquals(hostDao.hostExists(TEST_HOST), true); + assertEquals(hostDao.hostExists("frickjack"), false); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteHost() { + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), + false); + + HostEntity host = hostDao.findHostDetail(TEST_HOST); + assertEquals(hostDao.hostExists(TEST_HOST), true); + hostDao.deleteHost(host); + assertEquals(hostDao.hostExists(TEST_HOST), false); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteDownHosts() { + for (int i = 0; i < 3; i++) { + String name = TEST_HOST + i; + hostDao.insertRenderHost(buildRenderHost(name), hostManager.getDefaultAllocationDetail(), + false); + if (i != 1) { + HostEntity host = hostDao.findHostDetail(name); + assertEquals(name, host.name); + hostDao.updateHostState(host, HardwareState.DOWN); + } } - @Test - @Transactional - @Rollback(true) - public void testIsHostUp() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); + hostDao.deleteDownHosts(); - assertTrue(hostDao.isHostUp(hostDao.findHostDetail(TEST_HOST))); - - hostDao.updateHostState(hostDao.findHostDetail(TEST_HOST), - HardwareState.DOWN); - assertFalse(hostDao.isHostUp(hostDao.findHostDetail(TEST_HOST))); - } - - @Test - @Transactional - @Rollback(true) - public void testHostExists() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - assertEquals(hostDao.hostExists(TEST_HOST),true); - assertEquals(hostDao.hostExists("frickjack"),false); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteHost() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity host = hostDao.findHostDetail(TEST_HOST); - assertEquals(hostDao.hostExists(TEST_HOST),true); - hostDao.deleteHost(host); - assertEquals(hostDao.hostExists(TEST_HOST),false); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteDownHosts() { - for (int i = 0; i < 3; i++) { - String name = TEST_HOST + i; - hostDao.insertRenderHost(buildRenderHost(name), - hostManager.getDefaultAllocationDetail(), - false); - if (i != 1) { - HostEntity host = hostDao.findHostDetail(name); - assertEquals(name, host.name); - hostDao.updateHostState(host, HardwareState.DOWN); - } - } - - hostDao.deleteDownHosts(); - - for (int i = 0; i < 3; i++) { - String name = TEST_HOST + i; - assertEquals(hostDao.hostExists(name), i == 1); - } - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateHostRebootWhenIdle() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity host = hostDao.findHostDetail(TEST_HOST); - assertFalse(jdbcTemplate.queryForObject( - "SELECT b_reboot_idle FROM host WHERE pk_host=?", - Boolean.class, host.getHostId())); - hostDao.updateHostRebootWhenIdle(host, true); - assertTrue(jdbcTemplate.queryForObject( - "SELECT b_reboot_idle FROM host WHERE pk_host=?", - Boolean.class, host.getHostId())); - } - - @Test - @Transactional - @Rollback(true) - public void updateHostStats() { - - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - DispatchHost dispatchHost = hostDao.findDispatchHost(TEST_HOST); - hostDao.updateHostStats(dispatchHost, - CueUtil.GB8, - CueUtil.GB8, - CueUtil.GB8, - CueUtil.GB8, - CueUtil.GB8, - CueUtil.GB8, - 1, - 1, - 100, - new Timestamp(1247526000 * 1000l), - "spinux1"); - - Map result = jdbcTemplate.queryForMap( - "SELECT * FROM host_stat WHERE pk_host=?", - dispatchHost.getHostId()); - - assertEquals(CueUtil.GB8, ((Long) - (result.get("int_mem_total"))).longValue()); - assertEquals(CueUtil.GB8, ((Long) - (result.get("int_mem_free"))).longValue()); - assertEquals(CueUtil.GB8, ((Long) - (result.get("int_swap_total"))).longValue()); - assertEquals(CueUtil.GB8, ((Long) - (result.get("int_swap_free"))).longValue()); - assertEquals(CueUtil.GB8, ((Long) - (result.get("int_mcp_total"))).longValue()); - assertEquals(CueUtil.GB8, ((Long) - (result.get("int_mcp_free"))).longValue()); - assertEquals(100, ((Long) - (result.get("int_load"))).intValue()); - assertEquals(new Timestamp(1247526000 * 1000l), - (Timestamp) result.get("ts_booted")); - - } - - @Test - @Transactional - @Rollback(true) - public void updateHostResources() { - - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - DispatchHost dispatchHost = hostDao.findDispatchHost(TEST_HOST); - HostReport report = HostReport.newBuilder() - .setHost( - buildRenderHost(TEST_HOST).toBuilder() - .setCoresPerProc(1200) - .setNumProcs(2) - .setTotalMem((int) CueUtil.GB32) - ).build(); - hostDao.updateHostResources(dispatchHost, report); - - // Verify what the original values are - assertEquals(800, dispatchHost.cores); - assertEquals(800, dispatchHost.idleCores); - assertEquals(CueUtil.GB16 - this.MEM_RESERVED_SYSTEM, - dispatchHost.idleMemory); - assertEquals(CueUtil.GB16- this.MEM_RESERVED_SYSTEM, - dispatchHost.memory); - - dispatchHost = hostDao.findDispatchHost(TEST_HOST); - - // Now verify they've changed. - assertEquals(2400, dispatchHost.cores); - assertEquals(2400, dispatchHost.idleCores); - assertEquals(CueUtil.GB32 - this.MEM_RESERVED_SYSTEM, - dispatchHost.idleMemory); - assertEquals(CueUtil.GB32- this.MEM_RESERVED_SYSTEM, - dispatchHost.memory); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDispatchHost() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST); - DispatchHost dispatchHost = hostDao.findDispatchHost(TEST_HOST); - - assertEquals(dispatchHost.name, TEST_HOST); - assertEquals(dispatchHost.allocationId, hostDetail.getAllocationId()); - assertEquals(dispatchHost.id, hostDetail.getHostId()); - assertEquals(dispatchHost.cores, hostDetail.cores); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateHostSetAllocation() { - - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST); - - hostDao.updateHostSetAllocation(hostDetail, - hostManager.getDefaultAllocationDetail()); - - hostDetail = hostDao.findHostDetail(TEST_HOST); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateHostSetManualTags() { - DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); - - hostDao.tagHost(host,"frick", HostTagType.MANUAL); - hostDao.tagHost(host,"jack", HostTagType.MANUAL); - hostDao.recalcuateTags(host.id); - - String tag = jdbcTemplate.queryForObject( - "SELECT str_tags FROM host WHERE pk_host=?",String.class, host.id); - assertEquals("unassigned beta 64bit frick jack linux", tag); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateHostSetOS() { - DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); - hostDao.updateHostOs(host, "foo"); - String tag = jdbcTemplate.queryForObject( - "SELECT str_os FROM host_stat WHERE pk_host=?",String.class, host.id); - assertEquals("foo", tag); - } - - @Test - @Transactional - @Rollback(true) - public void testChangeTags() { - DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); - - String tag = jdbcTemplate.queryForObject( - "SELECT str_tags FROM host WHERE pk_host=?",String.class, host.id); - assertEquals("unassigned beta 64bit linux", tag); - - hostDao.removeTag(host, "linux"); - hostDao.recalcuateTags(host.id); - - assertEquals("unassigned beta 64bit", jdbcTemplate.queryForObject( - "SELECT str_tags FROM host WHERE pk_host=?",String.class, host.id)); - - hostDao.tagHost(host, "32bit", HostTagType.MANUAL); - hostDao.recalcuateTags(host.id); - - assertEquals("unassigned beta 32bit 64bit", jdbcTemplate.queryForObject( - "SELECT str_tags FROM host WHERE pk_host=?",String.class, host.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetStrandedCoreUnits() { - DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); - - jdbcTemplate.update( - "UPDATE host SET int_mem_idle = ? WHERE pk_host = ?", - CueUtil.GB, host.getHostId()); - - assertEquals(host.idleCores, hostDao.getStrandedCoreUnits(host)); - - jdbcTemplate.update( - "UPDATE host SET int_mem_idle = ? WHERE pk_host = ?", - CueUtil.GB2, host.getHostId()); - - assertEquals(0, hostDao.getStrandedCoreUnits(host)); - - // Check to see if fractional cores is rounded to the lowest - // whole core properly. - jdbcTemplate.update( - "UPDATE host SET int_cores_idle=150, int_mem_idle = ? WHERE pk_host = ?", - CueUtil.GB, host.getHostId()); - - assertEquals(100, hostDao.getStrandedCoreUnits(host)); - } - - @Test - @Transactional - @Rollback(true) - public void testIsPreferShow() { - DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); - assertFalse(hostDao.isPreferShow(host)); - } - - @Test - @Transactional - @Rollback(true) - public void testIsNimby() { - DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); - assertFalse(hostDao.isNimbyHost(host)); + for (int i = 0; i < 3; i++) { + String name = TEST_HOST + i; + assertEquals(hostDao.hostExists(name), i == 1); } + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateHostRebootWhenIdle() { + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), + false); + + HostEntity host = hostDao.findHostDetail(TEST_HOST); + assertFalse(jdbcTemplate.queryForObject("SELECT b_reboot_idle FROM host WHERE pk_host=?", + Boolean.class, host.getHostId())); + hostDao.updateHostRebootWhenIdle(host, true); + assertTrue(jdbcTemplate.queryForObject("SELECT b_reboot_idle FROM host WHERE pk_host=?", + Boolean.class, host.getHostId())); + } + + @Test + @Transactional + @Rollback(true) + public void updateHostStats() { + + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), + false); + + DispatchHost dispatchHost = hostDao.findDispatchHost(TEST_HOST); + hostDao.updateHostStats(dispatchHost, CueUtil.GB8, CueUtil.GB8, CueUtil.GB8, CueUtil.GB8, + CueUtil.GB8, CueUtil.GB8, 1, 1, 100, new Timestamp(1247526000 * 1000l), "spinux1"); + + Map result = jdbcTemplate.queryForMap("SELECT * FROM host_stat WHERE pk_host=?", + dispatchHost.getHostId()); + + assertEquals(CueUtil.GB8, ((Long) (result.get("int_mem_total"))).longValue()); + assertEquals(CueUtil.GB8, ((Long) (result.get("int_mem_free"))).longValue()); + assertEquals(CueUtil.GB8, ((Long) (result.get("int_swap_total"))).longValue()); + assertEquals(CueUtil.GB8, ((Long) (result.get("int_swap_free"))).longValue()); + assertEquals(CueUtil.GB8, ((Long) (result.get("int_mcp_total"))).longValue()); + assertEquals(CueUtil.GB8, ((Long) (result.get("int_mcp_free"))).longValue()); + assertEquals(100, ((Long) (result.get("int_load"))).intValue()); + assertEquals(new Timestamp(1247526000 * 1000l), (Timestamp) result.get("ts_booted")); + + } + + @Test + @Transactional + @Rollback(true) + public void updateHostResources() { + + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), + false); + + DispatchHost dispatchHost = hostDao.findDispatchHost(TEST_HOST); + HostReport report = HostReport.newBuilder().setHost(buildRenderHost(TEST_HOST).toBuilder() + .setCoresPerProc(1200).setNumProcs(2).setTotalMem((int) CueUtil.GB32)).build(); + hostDao.updateHostResources(dispatchHost, report); + + // Verify what the original values are + assertEquals(800, dispatchHost.cores); + assertEquals(800, dispatchHost.idleCores); + assertEquals(CueUtil.GB16 - this.MEM_RESERVED_SYSTEM, dispatchHost.idleMemory); + assertEquals(CueUtil.GB16 - this.MEM_RESERVED_SYSTEM, dispatchHost.memory); + + dispatchHost = hostDao.findDispatchHost(TEST_HOST); + + // Now verify they've changed. + assertEquals(2400, dispatchHost.cores); + assertEquals(2400, dispatchHost.idleCores); + assertEquals(CueUtil.GB32 - this.MEM_RESERVED_SYSTEM, dispatchHost.idleMemory); + assertEquals(CueUtil.GB32 - this.MEM_RESERVED_SYSTEM, dispatchHost.memory); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDispatchHost() { + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), + false); + + HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST); + DispatchHost dispatchHost = hostDao.findDispatchHost(TEST_HOST); + + assertEquals(dispatchHost.name, TEST_HOST); + assertEquals(dispatchHost.allocationId, hostDetail.getAllocationId()); + assertEquals(dispatchHost.id, hostDetail.getHostId()); + assertEquals(dispatchHost.cores, hostDetail.cores); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateHostSetAllocation() { + + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), + false); + + HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST); + + hostDao.updateHostSetAllocation(hostDetail, hostManager.getDefaultAllocationDetail()); + + hostDetail = hostDao.findHostDetail(TEST_HOST); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateHostSetManualTags() { + DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); + + hostDao.tagHost(host, "frick", HostTagType.MANUAL); + hostDao.tagHost(host, "jack", HostTagType.MANUAL); + hostDao.recalcuateTags(host.id); + + String tag = jdbcTemplate.queryForObject("SELECT str_tags FROM host WHERE pk_host=?", + String.class, host.id); + assertEquals("unassigned beta 64bit frick jack linux", tag); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateHostSetOS() { + DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); + hostDao.updateHostOs(host, "foo"); + String tag = jdbcTemplate.queryForObject("SELECT str_os FROM host_stat WHERE pk_host=?", + String.class, host.id); + assertEquals("foo", tag); + } + + @Test + @Transactional + @Rollback(true) + public void testChangeTags() { + DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); + + String tag = jdbcTemplate.queryForObject("SELECT str_tags FROM host WHERE pk_host=?", + String.class, host.id); + assertEquals("unassigned beta 64bit linux", tag); + + hostDao.removeTag(host, "linux"); + hostDao.recalcuateTags(host.id); + + assertEquals("unassigned beta 64bit", jdbcTemplate + .queryForObject("SELECT str_tags FROM host WHERE pk_host=?", String.class, host.id)); + + hostDao.tagHost(host, "32bit", HostTagType.MANUAL); + hostDao.recalcuateTags(host.id); + + assertEquals("unassigned beta 32bit 64bit", jdbcTemplate + .queryForObject("SELECT str_tags FROM host WHERE pk_host=?", String.class, host.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetStrandedCoreUnits() { + DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); + + jdbcTemplate.update("UPDATE host SET int_mem_idle = ? WHERE pk_host = ?", CueUtil.GB, + host.getHostId()); + + assertEquals(host.idleCores, hostDao.getStrandedCoreUnits(host)); + + jdbcTemplate.update("UPDATE host SET int_mem_idle = ? WHERE pk_host = ?", CueUtil.GB2, + host.getHostId()); + + assertEquals(0, hostDao.getStrandedCoreUnits(host)); + + // Check to see if fractional cores is rounded to the lowest + // whole core properly. + jdbcTemplate.update("UPDATE host SET int_cores_idle=150, int_mem_idle = ? WHERE pk_host = ?", + CueUtil.GB, host.getHostId()); + + assertEquals(100, hostDao.getStrandedCoreUnits(host)); + } + + @Test + @Transactional + @Rollback(true) + public void testIsPreferShow() { + DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); + assertFalse(hostDao.isPreferShow(host)); + } + + @Test + @Transactional + @Rollback(true) + public void testIsNimby() { + DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); + assertFalse(hostDao.isNimbyHost(host)); + } } - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/JobDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/JobDaoTests.java index 3bfff75d6..9d8c9a89b 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/JobDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/JobDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import java.io.File; @@ -65,671 +61,636 @@ import static org.junit.Assert.assertTrue; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class JobDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - JobDao jobDao; - - @Resource - PointDao pointDao; - - @Resource - ShowDao showDao; - - @Resource - TaskDao taskDao; - - @Resource - GroupDao groupDao; - - @Resource - FacilityDao facilityDao; - - @Resource - DepartmentDao departmentDao; - - @Resource - JobLogUtil jobLogUtil; - - private static String ROOT_FOLDER = "A0000000-0000-0000-0000-000000000000"; - private static String ROOT_SHOW = "00000000-0000-0000-0000-000000000000"; - private static String JOB_NAME = "pipe-dev.cue-testuser_shell_v1"; - - @Before - public void testMode() { - jobLauncher.testMode = true; - } - - public JobDetail buildJobDetail() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return spec.getJobs().get(0).detail; - } - - public JobDetail insertJob() { - JobDetail job = this.buildJobDetail(); - job.groupId = ROOT_FOLDER; - job.showId = ROOT_SHOW; - job.showName = "pipe"; - job.logDir = jobLogUtil.getJobLogPath(job); - job.deptId = departmentDao.getDefaultDepartment().getId(); - job.deptName = departmentDao.getDefaultDepartment().getName(); - job.facilityId = facilityDao.getDefaultFacility().getId(); - job.facilityName = facilityDao.getDefaultFacility().getName(); - job.state = JobState.PENDING; - job.maxCoreUnits = 10000; - jobDao.insertJob(job, jobLogUtil); - return job; - } - - - public JobDetail launchJob() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDispatchJob() { - JobDetail job = insertJob(); - DispatchJob djob = jobDao.getDispatchJob(job.id); - assertEquals(djob.id, job.id); - } - - @Test - @Transactional - @Rollback(true) - public void testIsJobComplete() { - JobDetail job = insertJob(); - // returns true because there are no dispatchable frames - assertEquals(true,jobDao.isJobComplete(job)); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertJob() { - JobDetail job = this.buildJobDetail(); - job.groupId = ROOT_FOLDER; - job.showId = ROOT_SHOW; - job.logDir = jobLogUtil.getJobLogPath(job); - job.deptId = departmentDao.getDefaultDepartment().getId(); - job.facilityId= facilityDao.getDefaultFacility().getId(); - jobDao.insertJob(job, jobLogUtil); - assertNotNull(job.id); - } - - @Test - @Transactional - @Rollback(true) - public void testFindJob() { - JobDetail job = insertJob(); - JobInterface j1 = jobDao.findJob(job.name); - JobDetail j2 = jobDao.findJobDetail(job.name); - assertEquals(job.name, j1.getName()); - assertEquals(job.name, j2.getName()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJob() { - JobDetail job = insertJob(); - jobDao.getJobDetail(job.id); - jobDao.getJob(job.id); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJobDetail() { - JobDetail src = insertJob(); - JobDetail job = jobDao.getJobDetail(src.id); - assertEquals(job.id, src.id); - assertEquals(job.name, src.name); - assertEquals(job.showId, src.showId); - assertEquals(job.facilityId, src.facilityId); - assertEquals(job.groupId, src.groupId); - assertEquals(job.deptId, src.deptId); - assertEquals(job.state, src.state); - assertEquals(job.shot, src.shot); - assertEquals(job.user, src.user); - assertEquals(job.email, src.email); - assertEquals(job.uid, src.uid); - assertEquals(job.logDir, src.logDir); - assertEquals(job.isPaused, src.isPaused); - assertEquals(job.isAutoEat, src.isAutoEat); - assertEquals(job.totalFrames, src.totalFrames); - assertEquals(job.totalLayers, src.totalLayers); - assertEquals(job.startTime, src.startTime); - assertEquals(job.stopTime, src.stopTime); - assertEquals(job.maxRetries, src.maxRetries); - assertEquals(job.os, src.os); - assertEquals(job.facilityName, src.facilityName); - assertEquals(job.deptName, src.deptName); - assertEquals(job.showName, src.showName); - assertEquals(job.priority, src.priority); - assertEquals(job.minCoreUnits, src.minCoreUnits); - assertEquals(job.maxCoreUnits, src.maxCoreUnits); - assertEquals(job.isLocal, src.isLocal); - assertEquals(job.localHostName, src.localHostName); - assertEquals(job.localMaxCores, src.localMaxCores); - assertEquals(job.localMaxMemory, src.localMaxMemory); - assertEquals(job.localThreadNumber, src.localThreadNumber); - assertEquals(job.localMaxGpus, src.localMaxGpus); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJobsByTask() { - - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - taskDao.insertTask(t); - jobDao.getJobs(t); - } - - @Test - @Transactional - @Rollback(true) - public void testJobExists() { - assertFalse(jobDao.exists(JOB_NAME)); - JobDetail job = insertJob(); - jdbcTemplate.update("UPDATE job SET str_state='PENDING' WHERE pk_job=?", - job.id); - assertTrue(jobDao.exists(JOB_NAME)); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteJob() { - jobDao.deleteJob(insertJob()); - } - - @Test - @Transactional - @Rollback(true) - public void testActivateJob() { - jobDao.activateJob(insertJob(), JobState.PENDING); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobState() { - JobDetail job = insertJob(); - assertEquals(JobState.PENDING, job.state); - jobDao.updateState(job, JobState.FINISHED); - assertEquals(JobState.FINISHED.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", - String.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobFinished() { - jobDao.updateJobFinished(insertJob()); - } - - @Test - @Transactional - @Rollback(true) - public void testIsJobOverMinProc() { - JobDetail job = insertJob(); - assertFalse(jobDao.isOverMinCores(job)); - } - - @Test - @Transactional - @Rollback(true) - public void testHasPendingFrames() { - assertFalse(jobDao.hasPendingFrames(insertJob())); - } - - @Test - @Transactional - @Rollback(true) - public void testIsJobOverMaxProc() { - JobDetail job = insertJob(); - assertFalse(jobDao.isOverMaxCores(job)); - } - - @Test - @Transactional - @Rollback(true) - public void testIsJobAtMaxCores() { - JobDetail job = insertJob(); - assertFalse(jobDao.isAtMaxCores(job)); - - jdbcTemplate.update( - "UPDATE job_resource SET int_cores = int_max_cores WHERE pk_job=?", - job.getJobId()); - - assertTrue(jobDao.isAtMaxCores(job)); - - } - - @Test - @Transactional - @Rollback(true) - public void testIsOverMaxCores() { - JobDetail job = insertJob(); - jobDao.updateMaxCores(job, 500); - jdbcTemplate.update( - "UPDATE job_resource SET int_cores = 450 WHERE pk_job=?", - job.getJobId()); - - assertFalse(jobDao.isOverMaxCores(job)); - assertFalse(jobDao.isOverMaxCores(job, 50)); - assertTrue(jobDao.isOverMaxCores(job, 100)); - - jdbcTemplate.update( - "UPDATE job_resource SET int_max_cores = 200 WHERE pk_job=?", - job.getJobId()); - assertTrue(jobDao.isOverMaxCores(job)); - } - - @Test(expected=org.springframework.jdbc.UncategorizedSQLException.class) - @Transactional - @Rollback(true) - public void testMaxCoreTrigger() { - JobDetail job = insertJob(); - int maxCores = jdbcTemplate.queryForObject( - "SELECT int_max_cores FROM job_resource WHERE pk_job=?", - Integer.class, job.getJobId()); - - jdbcTemplate.update( - "UPDATE job_resource SET int_cores = ? WHERE pk_job=?", - maxCores + 1, job.getJobId()); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobPriority() { - JobDetail job = insertJob(); - jobDao.updatePriority(job, 199); - assertEquals(Integer.valueOf(199), jdbcTemplate.queryForObject( - "SELECT int_priority FROM job_resource WHERE pk_job=?", - Integer.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobMinCores() { - JobDetail job = insertJob(); - jobDao.updateMinCores(job, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM job_resource WHERE pk_job=?", - Integer.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobMaxCores() { - JobDetail job = insertJob(); - jobDao.updateMaxCores(job, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_max_cores FROM job_resource WHERE pk_job=?", - Integer.class, job.getJobId())); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class JobDaoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + JobDao jobDao; + + @Resource + PointDao pointDao; + + @Resource + ShowDao showDao; + + @Resource + TaskDao taskDao; + + @Resource + GroupDao groupDao; + + @Resource + FacilityDao facilityDao; + + @Resource + DepartmentDao departmentDao; + + @Resource + JobLogUtil jobLogUtil; + + private static String ROOT_FOLDER = "A0000000-0000-0000-0000-000000000000"; + private static String ROOT_SHOW = "00000000-0000-0000-0000-000000000000"; + private static String JOB_NAME = "pipe-dev.cue-testuser_shell_v1"; + + @Before + public void testMode() { + jobLauncher.testMode = true; + } + + public JobDetail buildJobDetail() { + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); + return spec.getJobs().get(0).detail; + } + + public JobDetail insertJob() { + JobDetail job = this.buildJobDetail(); + job.groupId = ROOT_FOLDER; + job.showId = ROOT_SHOW; + job.showName = "pipe"; + job.logDir = jobLogUtil.getJobLogPath(job); + job.deptId = departmentDao.getDefaultDepartment().getId(); + job.deptName = departmentDao.getDefaultDepartment().getName(); + job.facilityId = facilityDao.getDefaultFacility().getId(); + job.facilityName = facilityDao.getDefaultFacility().getName(); + job.state = JobState.PENDING; + job.maxCoreUnits = 10000; + jobDao.insertJob(job, jobLogUtil); + return job; + } + + public JobDetail launchJob() { + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDispatchJob() { + JobDetail job = insertJob(); + DispatchJob djob = jobDao.getDispatchJob(job.id); + assertEquals(djob.id, job.id); + } + + @Test + @Transactional + @Rollback(true) + public void testIsJobComplete() { + JobDetail job = insertJob(); + // returns true because there are no dispatchable frames + assertEquals(true, jobDao.isJobComplete(job)); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertJob() { + JobDetail job = this.buildJobDetail(); + job.groupId = ROOT_FOLDER; + job.showId = ROOT_SHOW; + job.logDir = jobLogUtil.getJobLogPath(job); + job.deptId = departmentDao.getDefaultDepartment().getId(); + job.facilityId = facilityDao.getDefaultFacility().getId(); + jobDao.insertJob(job, jobLogUtil); + assertNotNull(job.id); + } + + @Test + @Transactional + @Rollback(true) + public void testFindJob() { + JobDetail job = insertJob(); + JobInterface j1 = jobDao.findJob(job.name); + JobDetail j2 = jobDao.findJobDetail(job.name); + assertEquals(job.name, j1.getName()); + assertEquals(job.name, j2.getName()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetJob() { + JobDetail job = insertJob(); + jobDao.getJobDetail(job.id); + jobDao.getJob(job.id); + } + + @Test + @Transactional + @Rollback(true) + public void testGetJobDetail() { + JobDetail src = insertJob(); + JobDetail job = jobDao.getJobDetail(src.id); + assertEquals(job.id, src.id); + assertEquals(job.name, src.name); + assertEquals(job.showId, src.showId); + assertEquals(job.facilityId, src.facilityId); + assertEquals(job.groupId, src.groupId); + assertEquals(job.deptId, src.deptId); + assertEquals(job.state, src.state); + assertEquals(job.shot, src.shot); + assertEquals(job.user, src.user); + assertEquals(job.email, src.email); + assertEquals(job.uid, src.uid); + assertEquals(job.logDir, src.logDir); + assertEquals(job.isPaused, src.isPaused); + assertEquals(job.isAutoEat, src.isAutoEat); + assertEquals(job.totalFrames, src.totalFrames); + assertEquals(job.totalLayers, src.totalLayers); + assertEquals(job.startTime, src.startTime); + assertEquals(job.stopTime, src.stopTime); + assertEquals(job.maxRetries, src.maxRetries); + assertEquals(job.os, src.os); + assertEquals(job.facilityName, src.facilityName); + assertEquals(job.deptName, src.deptName); + assertEquals(job.showName, src.showName); + assertEquals(job.priority, src.priority); + assertEquals(job.minCoreUnits, src.minCoreUnits); + assertEquals(job.maxCoreUnits, src.maxCoreUnits); + assertEquals(job.isLocal, src.isLocal); + assertEquals(job.localHostName, src.localHostName); + assertEquals(job.localMaxCores, src.localMaxCores); + assertEquals(job.localMaxMemory, src.localMaxMemory); + assertEquals(job.localThreadNumber, src.localThreadNumber); + assertEquals(job.localMaxGpus, src.localMaxGpus); + } + + @Test + @Transactional + @Rollback(true) + public void testGetJobsByTask() { + + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + + TaskEntity t = new TaskEntity(p, "dev.cue"); + taskDao.insertTask(t); + jobDao.getJobs(t); + } + + @Test + @Transactional + @Rollback(true) + public void testJobExists() { + assertFalse(jobDao.exists(JOB_NAME)); + JobDetail job = insertJob(); + jdbcTemplate.update("UPDATE job SET str_state='PENDING' WHERE pk_job=?", job.id); + assertTrue(jobDao.exists(JOB_NAME)); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteJob() { + jobDao.deleteJob(insertJob()); + } + + @Test + @Transactional + @Rollback(true) + public void testActivateJob() { + jobDao.activateJob(insertJob(), JobState.PENDING); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobState() { + JobDetail job = insertJob(); + assertEquals(JobState.PENDING, job.state); + jobDao.updateState(job, JobState.FINISHED); + assertEquals(JobState.FINISHED.toString(), jdbcTemplate + .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobFinished() { + jobDao.updateJobFinished(insertJob()); + } + + @Test + @Transactional + @Rollback(true) + public void testIsJobOverMinProc() { + JobDetail job = insertJob(); + assertFalse(jobDao.isOverMinCores(job)); + } + + @Test + @Transactional + @Rollback(true) + public void testHasPendingFrames() { + assertFalse(jobDao.hasPendingFrames(insertJob())); + } + + @Test + @Transactional + @Rollback(true) + public void testIsJobOverMaxProc() { + JobDetail job = insertJob(); + assertFalse(jobDao.isOverMaxCores(job)); + } + + @Test + @Transactional + @Rollback(true) + public void testIsJobAtMaxCores() { + JobDetail job = insertJob(); + assertFalse(jobDao.isAtMaxCores(job)); + + jdbcTemplate.update("UPDATE job_resource SET int_cores = int_max_cores WHERE pk_job=?", + job.getJobId()); + + assertTrue(jobDao.isAtMaxCores(job)); + + } + + @Test + @Transactional + @Rollback(true) + public void testIsOverMaxCores() { + JobDetail job = insertJob(); + jobDao.updateMaxCores(job, 500); + jdbcTemplate.update("UPDATE job_resource SET int_cores = 450 WHERE pk_job=?", job.getJobId()); + + assertFalse(jobDao.isOverMaxCores(job)); + assertFalse(jobDao.isOverMaxCores(job, 50)); + assertTrue(jobDao.isOverMaxCores(job, 100)); + + jdbcTemplate.update("UPDATE job_resource SET int_max_cores = 200 WHERE pk_job=?", + job.getJobId()); + assertTrue(jobDao.isOverMaxCores(job)); + } + + @Test(expected = org.springframework.jdbc.UncategorizedSQLException.class) + @Transactional + @Rollback(true) + public void testMaxCoreTrigger() { + JobDetail job = insertJob(); + int maxCores = jdbcTemplate.queryForObject( + "SELECT int_max_cores FROM job_resource WHERE pk_job=?", Integer.class, job.getJobId()); + + jdbcTemplate.update("UPDATE job_resource SET int_cores = ? WHERE pk_job=?", maxCores + 1, + job.getJobId()); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobPriority() { + JobDetail job = insertJob(); + jobDao.updatePriority(job, 199); + assertEquals(Integer.valueOf(199), jdbcTemplate.queryForObject( + "SELECT int_priority FROM job_resource WHERE pk_job=?", Integer.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobMinCores() { + JobDetail job = insertJob(); + jobDao.updateMinCores(job, 100); + assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( + "SELECT int_min_cores FROM job_resource WHERE pk_job=?", Integer.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobMaxCores() { + JobDetail job = insertJob(); + jobDao.updateMaxCores(job, 100); + assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( + "SELECT int_max_cores FROM job_resource WHERE pk_job=?", Integer.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobMinCoresByGroup() { + JobDetail job = insertJob(); + GroupInterface g = groupDao.getGroup(job.groupId); + jobDao.updateMinCores(g, 100); + assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( + "SELECT int_min_cores FROM job_resource WHERE pk_job=?", Integer.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobMaxCoresByGroup() { + JobDetail job = insertJob(); + GroupInterface g = groupDao.getGroup(job.groupId); + jobDao.updateMaxCores(g, 100); + assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( + "SELECT int_max_cores FROM job_resource WHERE pk_job=?", Integer.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobPriorityByGroup() { + JobDetail job = insertJob(); + GroupInterface g = groupDao.getGroup(job.groupId); + jobDao.updatePriority(g, 100); + assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( + "SELECT int_priority FROM job_resource WHERE pk_job=?", Integer.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobMaxRss() { + long maxRss = 100000; + JobDetail job = insertJob(); + jobDao.updateMaxRSS(job, maxRss); + assertEquals(Long.valueOf(maxRss), jdbcTemplate.queryForObject( + "SELECT int_max_rss FROM job_mem WHERE pk_job=?", Long.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobPaused() { + JobDetail job = insertJob(); + + assertTrue(jdbcTemplate.queryForObject("SELECT b_paused FROM job WHERE pk_job=?", Boolean.class, + job.getJobId())); + + jobDao.updatePaused(job, false); + + assertFalse(jdbcTemplate.queryForObject("SELECT b_paused FROM job WHERE pk_job=?", + Boolean.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobAutoEat() { + JobDetail job = insertJob(); + + assertFalse(jdbcTemplate.queryForObject("SELECT b_autoeat FROM job WHERE pk_job=?", + Boolean.class, job.getJobId())); + + jobDao.updateAutoEat(job, true); + + assertTrue(jdbcTemplate.queryForObject("SELECT b_autoeat FROM job WHERE pk_job=?", + Boolean.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobMaxRetries() { + JobDetail job = insertJob(); + jobDao.updateMaxFrameRetries(job, 10); + assertEquals(Integer.valueOf(10), jdbcTemplate.queryForObject( + "SELECT int_max_retries FROM job WHERE pk_job=?", Integer.class, job.getJobId())); + } + + @Test(expected = IllegalArgumentException.class) + @Transactional + @Rollback(true) + public void testUpdateJobMaxRetriesTooLow() { + JobDetail job = insertJob(); + jobDao.updateMaxFrameRetries(job, -1); + } + + @Test(expected = IllegalArgumentException.class) + @Transactional + @Rollback(true) + public void testUpdateJobMaxRetriesTooHigh() { + JobDetail job = insertJob(); + jobDao.updateMaxFrameRetries(job, 100000); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFrameStateTotals() { + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); + jobLauncher.launch(spec); + jobDao.getFrameStateTotals(spec.getJobs().get(0).detail); + } + + @Test + @Transactional + @Rollback(true) + public void testGetExecutionSummary() { + JobDetail job = launchJob(); + ExecutionSummary summary = jobDao.getExecutionSummary(job); + } + + @Test + @Transactional + @Rollback(true) + public void testGetJobEnvironment() { + JobDetail job = launchJob(); + Map map = jobDao.getEnvironment(job); + for (Map.Entry e : map.entrySet()) { + assertEquals("VNP_VCR_SESSION", e.getKey()); + assertEquals("9000", e.getValue()); + } + } + + @Test + @Transactional + @Rollback(true) + public void testInsertJobEnvironment() { + JobDetail job = launchJob(); + jobDao.insertEnvironment(job, "CHAMBERS", "123"); + Map map = jobDao.getEnvironment(job); + assertEquals(2, map.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertJobEnvironmentMap() { + JobDetail job = launchJob(); + Map map = new HashMap(); + map.put("CHAMBERS", "123"); + map.put("OVER9000", "123"); + + jobDao.insertEnvironment(job, map); + Map env = jobDao.getEnvironment(job); + assertEquals(3, env.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindLastJob() { + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); + jobLauncher.launch(spec); + + JobInterface job = spec.getJobs().get(0).detail; + jobDao.getFrameStateTotals(job); + jobManager.shutdownJob(job); + // this might fail + JobDetail oldJob = jobDao.findLastJob(job.getName()); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobLogPath() { + JobDetail job = launchJob(); + String newLogDir = "/path/to/nowhere"; + jobDao.updateLogPath(job, newLogDir); + assertEquals(newLogDir, jdbcTemplate + .queryForObject("SELECT str_log_dir FROM job WHERE pk_job=?", String.class, job.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobParent() { + JobDetail job = launchJob(); + + // Make a new test group. + GroupDetail root = groupDao.getRootGroupDetail(job); + + GroupDetail testGroup = new GroupDetail(); + testGroup.name = "testGroup"; + testGroup.deptId = departmentDao.getDefaultDepartment().getId(); + testGroup.showId = root.getShowId(); + + groupDao.insertGroup(testGroup, root); + + jdbcTemplate.update( + "UPDATE folder SET int_job_max_cores=-1, int_job_min_cores=-1, int_job_priority=-1 WHERE pk_folder=?", + testGroup.getId()); + + GroupDetail group = groupDao.getGroupDetail(testGroup.getId()); + jobDao.updateParent(job, group); + + assertEquals(-1, group.jobMaxCores); + assertEquals(-1, group.jobMinCores); + assertEquals(-1, group.jobPriority); + + assertEquals(group.getGroupId(), jdbcTemplate + .queryForObject("SELECT pk_folder FROM job WHERE pk_job=?", String.class, job.id)); + + assertEquals(group.getDepartmentId(), jdbcTemplate + .queryForObject("SELECT pk_dept FROM job WHERE pk_job=?", String.class, job.id)); + + group.jobMaxCores = 100; + group.jobMinCores = 100; + group.jobPriority = 100; + + jobDao.updateParent(job, group); + + assertEquals(Integer.valueOf(group.jobMaxCores), jdbcTemplate.queryForObject( + "SELECT int_max_cores FROM job_resource WHERE pk_job=?", Integer.class, job.id)); + + assertEquals(Integer.valueOf(group.jobMinCores), jdbcTemplate.queryForObject( + "SELECT int_min_cores FROM job_resource WHERE pk_job=?", Integer.class, job.id)); + + assertEquals(Integer.valueOf(group.jobPriority), jdbcTemplate.queryForObject( + "SELECT int_priority FROM job_resource WHERE pk_job=?", Integer.class, job.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testCueHasPendingJobs() { + jobDao.cueHasPendingJobs(new FacilityEntity("0")); + + } + + @Test + @Transactional + @Rollback(true) + public void mapPostJob() { + JobSpec spec = + jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_postframes.xml")); + jobLauncher.launch(spec); + + final String pk_job = spec.getJobs().get(0).detail.id; + + assertEquals(Integer.valueOf(1), jdbcTemplate + .queryForObject("SELECT COUNT(*) FROM job_post WHERE pk_job=?", Integer.class, pk_job)); + } + + @Test + @Transactional + @Rollback(true) + public void activatePostJob() { + JobSpec spec = + jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_postframes.xml")); + jobLauncher.launch(spec); + + jobDao.activatePostJob(spec.getJobs().get(0).detail); + + assertEquals(JobState.PENDING.toString(), + jdbcTemplate.queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, + spec.getJobs().get(0).getPostJob().detail.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateUsage() { + + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); + jobLauncher.launch(spec); + + JobInterface job = jobDao.findJob(spec.getJobs().get(0).detail.name); + + /** 60 seconds of 100 core units **/ + ResourceUsage usage = new ResourceUsage(60, 33, 0); + + assertTrue(usage.getClockTimeSeconds() > 0); + assertTrue(usage.getCoreTimeSeconds() > 0); + + /** + * Successful frame + */ + jobDao.updateUsage(job, usage, 0); + assertEquals(Long.valueOf(usage.getClockTimeSeconds()), jdbcTemplate.queryForObject( + "SELECT int_clock_time_success FROM job_usage WHERE pk_job=?", Long.class, job.getId())); - @Test - @Transactional - @Rollback(true) - public void testUpdateJobMinCoresByGroup() { - JobDetail job = insertJob(); - GroupInterface g = groupDao.getGroup(job.groupId); - jobDao.updateMinCores(g, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM job_resource WHERE pk_job=?", - Integer.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobMaxCoresByGroup() { - JobDetail job = insertJob(); - GroupInterface g = groupDao.getGroup(job.groupId); - jobDao.updateMaxCores(g, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_max_cores FROM job_resource WHERE pk_job=?", - Integer.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobPriorityByGroup() { - JobDetail job = insertJob(); - GroupInterface g = groupDao.getGroup(job.groupId); - jobDao.updatePriority(g, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_priority FROM job_resource WHERE pk_job=?", - Integer.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobMaxRss() { - long maxRss = 100000; - JobDetail job = insertJob(); - jobDao.updateMaxRSS(job, maxRss); - assertEquals(Long.valueOf(maxRss), jdbcTemplate.queryForObject( - "SELECT int_max_rss FROM job_mem WHERE pk_job=?", - Long.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobPaused() { - JobDetail job = insertJob(); - - assertTrue(jdbcTemplate.queryForObject( - "SELECT b_paused FROM job WHERE pk_job=?", - Boolean.class, job.getJobId())); - - jobDao.updatePaused(job, false); - - assertFalse(jdbcTemplate.queryForObject( - "SELECT b_paused FROM job WHERE pk_job=?", - Boolean.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobAutoEat() { - JobDetail job = insertJob(); - - assertFalse(jdbcTemplate.queryForObject( - "SELECT b_autoeat FROM job WHERE pk_job=?", - Boolean.class, job.getJobId())); - - jobDao.updateAutoEat(job, true); - - assertTrue(jdbcTemplate.queryForObject( - "SELECT b_autoeat FROM job WHERE pk_job=?", - Boolean.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobMaxRetries() { - JobDetail job = insertJob(); - jobDao.updateMaxFrameRetries(job,10); - assertEquals(Integer.valueOf(10), jdbcTemplate.queryForObject( - "SELECT int_max_retries FROM job WHERE pk_job=?", - Integer.class, job.getJobId())); - } - - @Test(expected=IllegalArgumentException.class) - @Transactional - @Rollback(true) - public void testUpdateJobMaxRetriesTooLow() { - JobDetail job = insertJob(); - jobDao.updateMaxFrameRetries(job,-1); - } - - @Test(expected=IllegalArgumentException.class) - @Transactional - @Rollback(true) - public void testUpdateJobMaxRetriesTooHigh() { - JobDetail job = insertJob(); - jobDao.updateMaxFrameRetries(job,100000); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFrameStateTotals() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - jobLauncher.launch(spec); - jobDao.getFrameStateTotals(spec.getJobs().get(0).detail); - } - - @Test - @Transactional - @Rollback(true) - public void testGetExecutionSummary() { - JobDetail job = launchJob(); - ExecutionSummary summary = jobDao.getExecutionSummary(job); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJobEnvironment() { - JobDetail job = launchJob(); - Map map = jobDao.getEnvironment(job); - for (Map.Entry e : map.entrySet()) { - assertEquals("VNP_VCR_SESSION", e.getKey()); - assertEquals( "9000", e.getValue()); - } - } - - @Test - @Transactional - @Rollback(true) - public void testInsertJobEnvironment() { - JobDetail job = launchJob(); - jobDao.insertEnvironment(job, "CHAMBERS","123"); - Map map = jobDao.getEnvironment(job); - assertEquals(2,map.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertJobEnvironmentMap() { - JobDetail job = launchJob(); - Map map = new HashMap(); - map.put("CHAMBERS","123"); - map.put("OVER9000","123"); - - jobDao.insertEnvironment(job, map); - Map env = jobDao.getEnvironment(job); - assertEquals(3,env.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindLastJob() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - jobLauncher.launch(spec); - - JobInterface job = spec.getJobs().get(0).detail; - jobDao.getFrameStateTotals(job); - jobManager.shutdownJob(job); - // this might fail - JobDetail oldJob = jobDao.findLastJob(job.getName()); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobLogPath() { - JobDetail job = launchJob(); - String newLogDir = "/path/to/nowhere"; - jobDao.updateLogPath(job,newLogDir); - assertEquals(newLogDir,jdbcTemplate.queryForObject( - "SELECT str_log_dir FROM job WHERE pk_job=?",String.class, job.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobParent() { - JobDetail job = launchJob(); - - // Make a new test group. - GroupDetail root = groupDao.getRootGroupDetail(job); - - GroupDetail testGroup = new GroupDetail(); - testGroup.name = "testGroup"; - testGroup.deptId = departmentDao.getDefaultDepartment().getId(); - testGroup.showId = root.getShowId(); - - groupDao.insertGroup(testGroup, root); - - jdbcTemplate.update( - "UPDATE folder SET int_job_max_cores=-1, int_job_min_cores=-1, int_job_priority=-1 WHERE pk_folder=?", - testGroup.getId()); - - GroupDetail group = groupDao.getGroupDetail(testGroup.getId()); - jobDao.updateParent(job, group); - - assertEquals(-1,group.jobMaxCores); - assertEquals(-1,group.jobMinCores); - assertEquals(-1,group.jobPriority); - - assertEquals(group.getGroupId(),jdbcTemplate.queryForObject( - "SELECT pk_folder FROM job WHERE pk_job=?",String.class, job.id)); - - assertEquals(group.getDepartmentId(),jdbcTemplate.queryForObject( - "SELECT pk_dept FROM job WHERE pk_job=?",String.class, job.id)); - - group.jobMaxCores = 100; - group.jobMinCores = 100; - group.jobPriority = 100; - - jobDao.updateParent(job, group); - - assertEquals(Integer.valueOf(group.jobMaxCores) ,jdbcTemplate.queryForObject( - "SELECT int_max_cores FROM job_resource WHERE pk_job=?", - Integer.class, job.id)); - - assertEquals(Integer.valueOf(group.jobMinCores) ,jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM job_resource WHERE pk_job=?", - Integer.class, job.id)); - - assertEquals(Integer.valueOf(group.jobPriority) ,jdbcTemplate.queryForObject( - "SELECT int_priority FROM job_resource WHERE pk_job=?", - Integer.class, job.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testCueHasPendingJobs() { - jobDao.cueHasPendingJobs(new FacilityEntity("0")); - - } - - @Test - @Transactional - @Rollback(true) - public void mapPostJob() { - JobSpec spec = jobLauncher.parse( - new File("src/test/resources/conf/jobspec/jobspec_postframes.xml")); - jobLauncher.launch(spec); - - final String pk_job = spec.getJobs().get(0).detail.id; - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM job_post WHERE pk_job=?", - Integer.class, pk_job)); - } - - @Test - @Transactional - @Rollback(true) - public void activatePostJob() { - JobSpec spec = jobLauncher.parse( - new File("src/test/resources/conf/jobspec/jobspec_postframes.xml")); - jobLauncher.launch(spec); - - jobDao.activatePostJob(spec.getJobs().get(0).detail); - - assertEquals(JobState.PENDING.toString(),jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", String.class, - spec.getJobs().get(0).getPostJob().detail.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateUsage() { - - JobSpec spec = jobLauncher.parse( - new File("src/test/resources/conf/jobspec/jobspec.xml")); - jobLauncher.launch(spec); - - JobInterface job = jobDao.findJob(spec.getJobs().get(0).detail.name); - - /** 60 seconds of 100 core units **/ - ResourceUsage usage = new ResourceUsage(60, 33, 0); - - assertTrue(usage.getClockTimeSeconds() > 0); - assertTrue(usage.getCoreTimeSeconds() > 0); - - /** - * Successful frame - */ - jobDao.updateUsage(job, usage, 0); - assertEquals(Long.valueOf(usage.getClockTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_clock_time_success FROM job_usage WHERE pk_job=?", - Long.class, job.getId())); - - assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_core_time_success FROM job_usage WHERE pk_job=?", - Long.class, job.getId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_frame_success_count FROM job_usage WHERE pk_job=?", - Integer.class, job.getId())); - - /** - * Failed frame - */ - jobDao.updateUsage(job, usage, 1); - assertEquals(Long.valueOf(usage.getClockTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_clock_time_fail FROM job_usage WHERE pk_job=?", - Long.class, job.getId())); - - assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_core_time_fail FROM job_usage WHERE pk_job=?", - Long.class, job.getId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_frame_fail_count FROM job_usage WHERE pk_job=?", - Integer.class, job.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testOverrideMaxCoresAndGpus() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/override_max_cores_gpus.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_test"); - assertEquals(job.maxCoreUnits, 42000); - assertEquals(job.maxGpuUnits, 42); - } + assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), jdbcTemplate.queryForObject( + "SELECT int_core_time_success FROM job_usage WHERE pk_job=?", Long.class, job.getId())); + + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject("SELECT int_frame_success_count FROM job_usage WHERE pk_job=?", + Integer.class, job.getId())); + + /** + * Failed frame + */ + jobDao.updateUsage(job, usage, 1); + assertEquals(Long.valueOf(usage.getClockTimeSeconds()), jdbcTemplate.queryForObject( + "SELECT int_clock_time_fail FROM job_usage WHERE pk_job=?", Long.class, job.getId())); + + assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), jdbcTemplate.queryForObject( + "SELECT int_core_time_fail FROM job_usage WHERE pk_job=?", Long.class, job.getId())); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT int_frame_fail_count FROM job_usage WHERE pk_job=?", Integer.class, job.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void testOverrideMaxCoresAndGpus() { + jobLauncher.launch(new File("src/test/resources/conf/jobspec/override_max_cores_gpus.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_test"); + assertEquals(job.maxCoreUnits, 42000); + assertEquals(job.maxGpuUnits, 42); + } } - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LayerDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LayerDaoTests.java index 6354595ec..32361cb26 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LayerDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LayerDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import java.io.File; @@ -71,693 +67,666 @@ import static org.junit.Assert.assertTrue; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class LayerDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - JobDao jobDao; - - @Resource - LayerDao layerDao; - - @Resource - LimitDao limitDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - DepartmentDao departmentDao; - - @Resource - FacilityDao facilityDao; - - @Resource - JobLogUtil jobLogUtil; - - private static String ROOT_FOLDER = "A0000000-0000-0000-0000-000000000000"; - private static String ROOT_SHOW = "00000000-0000-0000-0000-000000000000"; - private static String LAYER_NAME = "pass_1"; - private static String JOB_NAME = "pipe-dev.cue-testuser_shell_v1"; - private static String LIMIT_NAME = "test-limit"; - private static String LIMIT_TEST_A = "testlimita"; - private static String LIMIT_TEST_B = "testlimitb"; - private static String LIMIT_TEST_C = "testlimitc"; - private static int LIMIT_MAX_VALUE = 32; - - @Before - public void testMode() { - jobLauncher.testMode = true; - } - - public LayerDetail getLayer() { - List layers = getLayers(); - return layers.get(layers.size()-1); - } - - public List getLayers() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = spec.getJobs().get(0).detail; - job.groupId = ROOT_FOLDER; - job.showId = ROOT_SHOW; - job.logDir = jobLogUtil.getJobLogPath(job); - job.deptId = departmentDao.getDefaultDepartment().getId(); - job.facilityId = facilityDao.getDefaultFacility().getId(); - jobDao.insertJob(job, jobLogUtil); - - List result = new ArrayList<>(); - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - limitDao.createLimit(LIMIT_TEST_A, 1); - limitDao.createLimit(LIMIT_TEST_B, 2); - limitDao.createLimit(LIMIT_TEST_C, 3); - - for (BuildableLayer buildableLayer: spec.getJobs().get(0).getBuildableLayers()) { - LayerDetail layer = buildableLayer.layerDetail; - FrameSet frameSet = new FrameSet(layer.range); - int num_frames = frameSet.size(); - int chunk_size = layer.chunkSize; - - layer.jobId = job.id; - layer.showId = ROOT_SHOW; - layer.totalFrameCount = num_frames / chunk_size; - if (num_frames % chunk_size > 0) { layer.totalFrameCount++; } - - layerDao.insertLayerDetail(layer); - layerDao.insertLayerEnvironment(layer, buildableLayer.env); - layerDao.addLimit(layer, limitId); - result.add(layer); - } - - return result; - } - - public JobDetail getJob() { - return jobDao.findJobDetail(JOB_NAME); - } - - public String getTestLimitId(String name) { - return limitDao.findLimit(name).getLimitId(); - } - - @Test - @Transactional - @Rollback(true) - public void testIsLayerComplete() { - layerDao.isLayerComplete(getLayer()); - } - - @Test - @Transactional - @Rollback(true) - public void testIsLayerDispatchable() { - layerDao.isLayerDispatchable(getLayer()); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerDetail() { - LayerDetail layer = getLayer(); - assertEquals(LAYER_NAME, layer.name); - assertEquals(layer.chunkSize, 1); - assertEquals(layer.dispatchOrder,2); - assertNotNull(layer.id); - assertNotNull(layer.jobId); - assertEquals(layer.showId,ROOT_SHOW); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayerDetail() { - LayerDetail layer = getLayer(); - assertEquals(LAYER_NAME, layer.name); - assertEquals(layer.chunkSize, 1); - assertEquals(layer.dispatchOrder,2); - assertNotNull(layer.id); - assertNotNull(layer.jobId); - assertEquals(layer.showId,ROOT_SHOW); - - LayerDetail l2 = layerDao.getLayerDetail(layer); - LayerDetail l3 = layerDao.getLayerDetail(layer.id); - assertEquals(layer, l2); - assertEquals(layer, l3); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayerDetails() { - List wantLayers = getLayers(); - List gotLayers = layerDao.getLayerDetails(getJob()); - assertThat(gotLayers, containsInAnyOrder(wantLayers.toArray())); - } - - @Test - @Transactional - @Rollback(true) - public void testFindLayerDetail() { - LayerDetail layer = getLayer(); - layerDao.findLayer(getJob(), "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayer() { - LayerDetail layer = getLayer(); - layerDao.getLayer(layer.id); - layerDao.getLayerDetail(layer); - layerDao.getLayerDetail(layer.id); - } - - @Test - @Transactional - @Rollback(true) - public void testFindLayer() { - LayerDetail layer = getLayer(); - layerDao.findLayer(getJob(), "pass_1"); - layerDao.findLayerDetail(getJob(), "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateLayerMinCores() { - LayerDetail layer = getLayer(); - layerDao.updateLayerMinCores(layer, 200); - LayerDetail l2 = layerDao.findLayerDetail(getJob(), "pass_1"); - assertEquals(l2.minimumCores,200); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateLayerThreadable() { - LayerDetail layer = getLayer(); - layerDao.updateThreadable(layer, false); - assertFalse(jdbcTemplate.queryForObject( - "SELECT b_threadable FROM layer WHERE pk_layer=?", - Boolean.class, layer.getLayerId())); - } - - - @Test - @Transactional - @Rollback(true) - public void testUpdateLayerMinMemory() { - LayerDetail layer = getLayer(); - - /* - * Check to ensure going below Dispatcher.MEM_RESERVED_MIN is - * not allowed. - */ - layerDao.updateLayerMinMemory(layer, 8096); - LayerDetail l2 = layerDao.findLayerDetail(getJob(), "pass_1"); - // Hardcoded value of dispatcher.memory.mem_reserved_min - // to avoid having to read opencue.properties on a test setting - assertEquals(l2.minimumMemory, 262144); - - /* - * Check regular operation. - */ - layerDao.updateLayerMinMemory(layer, CueUtil.GB); - LayerDetail l3 = layerDao.findLayerDetail(getJob(), "pass_1"); - assertEquals(l3.minimumMemory, CueUtil.GB); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateLayerTags() { - LayerDetail layer = getLayer(); - - HashSet tags = new HashSet(); - tags.add("frickjack"); - tags.add("pancake"); - - layerDao.updateLayerTags(layer, tags); - LayerDetail l2 = layerDao.findLayerDetail(getJob(), "pass_1"); - assertEquals(StringUtils.join(l2.tags," | "), "frickjack | pancake"); - - tags.clear(); - tags.add("frickjack"); - - layerDao.updateLayerTags(layer, tags); - l2 = layerDao.findLayerDetail(getJob(), "pass_1"); - assertEquals(StringUtils.join(l2.tags," | "), "frickjack"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFrameStateTotals() { - LayerDetail layer = getLayer(); - layerDao.getFrameStateTotals(layer); - jobDao.getFrameStateTotals(layer); - } - - @Test - @Transactional - @Rollback(true) - public void testGetExecutionSummary() { - LayerDetail layer = getLayer(); - layerDao.getExecutionSummary(layer); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayerEnvironment() { - LayerDetail layer = getLayer(); - Map map = layerDao.getLayerEnvironment(layer); - for (Map.Entry e : map.entrySet()) { - - } - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerEnvironment() { - LayerDetail layer = getLayer(); - layerDao.insertLayerEnvironment(layer, "CHAMBERS","123"); - Map env = layerDao.getLayerEnvironment(layer); - assertEquals(2,env.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerEnvironmentMap() { - LayerDetail layer = getLayer(); - Map map = new HashMap(); - map.put("CHAMBERS","123"); - map.put("OVER9000","123"); - - layerDao.insertLayerEnvironment(layer, map); - Map env = layerDao.getLayerEnvironment(layer); - assertEquals(3,env.size()); - } - - - @Test - @Transactional - @Rollback(true) - public void testFindPastSameNameMaxRSS() { - getLayer(); - jobDao.updateState(getJob(), JobState.FINISHED); - assertEquals(JobState.FINISHED, getJob().state); - - JobDetail lastJob = null; - lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v1"); - long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindPastTimeStampMaxRSS() { - getLayer(); - jobDao.updateState(getJob(), JobState.FINISHED); - assertEquals(JobState.FINISHED, getJob().state); - - JobDetail lastJob = null; - lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v1_2011_05_03_16_03"); - long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindPastNewVersionMaxRSS() { - getLayer(); - jobDao.updateState(getJob(), JobState.FINISHED); - assertEquals(JobState.FINISHED, getJob().state); - - JobDetail lastJob = null; - lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v2"); - long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindPastNewVersionTimeStampMaxRSS() { - getLayer(); - jobDao.updateState(getJob(), JobState.FINISHED); - assertEquals(JobState.FINISHED, getJob().state); - - JobDetail lastJob = null; - lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v2_2011_05_03_16_03"); - long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); - } - - @Test(expected=org.springframework.dao.EmptyResultDataAccessException.class) - @Transactional - @Rollback(true) - public void testFindPastNewVersionFailMaxRSS() { - getLayer(); - jobDao.updateState(getJob(), JobState.FINISHED); - assertEquals(JobState.FINISHED, getJob().state); - - JobDetail lastJob = null; - lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_vfail_v2"); - long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateLayerMaxRSS() { - LayerDetail layer = getLayer(); - - layerDao.updateLayerMaxRSS(layer, 1000, true); - assertEquals(Long.valueOf(1000), jdbcTemplate.queryForObject( - "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", - Long.class, layer.getId())); - - layerDao.updateLayerMaxRSS(layer, 999, true); - assertEquals(Long.valueOf(999), jdbcTemplate.queryForObject( - "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", - Long.class, layer.getId())); - - layerDao.updateLayerMaxRSS(layer, 900, false); - assertEquals(Long.valueOf(999), jdbcTemplate.queryForObject( - "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", - Long.class, layer.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void updateTags() { - String tag = "dillweed"; - LayerDetail layer = getLayer(); - layerDao.updateTags(layer, tag, LayerType.RENDER); - assertEquals(tag,jdbcTemplate.queryForObject( - "SELECT str_tags FROM layer WHERE pk_layer=?", String.class, layer.getLayerId())); - } - - @Test - @Transactional - @Rollback(true) - public void updateMinMemory() { - long mem = CueUtil.GB; - LayerDetail layer = getLayer(); - layerDao.updateMinMemory(layer, mem, LayerType.RENDER); - assertEquals(Long.valueOf(mem), jdbcTemplate.queryForObject( - "SELECT int_mem_min FROM layer WHERE pk_layer=?", - Long.class, layer.getLayerId())); - } - - @Test - @Transactional - @Rollback(true) - public void updateMinGpuMemory() { - long mem = CueUtil.GB; - LayerDetail layer = getLayer(); - layerDao.updateMinGpuMemory(layer, mem, LayerType.RENDER); - assertEquals(Long.valueOf(mem),jdbcTemplate.queryForObject( - "SELECT int_gpu_mem_min FROM layer WHERE pk_layer=?", - Long.class, layer.getLayerId())); - } - - @Test - @Transactional - @Rollback(true) - public void updateMinCores() { - int cores = CueUtil.ONE_CORE * 2; - LayerDetail layer = getLayer(); - layerDao.updateMinCores(layer, cores, LayerType.RENDER); - assertEquals(Integer.valueOf(cores), jdbcTemplate.queryForObject( - "SELECT int_cores_min FROM layer WHERE pk_layer=?", - Integer.class, layer.getLayerId())); - } - - @Test - @Transactional - @Rollback(true) - public void updateMaxCores() { - int cores = CueUtil.ONE_CORE * 2; - LayerDetail layer = getLayer(); - layerDao.updateLayerMaxCores(layer, cores); - assertEquals(Integer.valueOf(cores), jdbcTemplate.queryForObject( - "SELECT int_cores_max FROM layer WHERE pk_layer=?", - Integer.class, layer.getLayerId())); - } - - @Test - @Transactional - @Rollback(true) - public void isOptimizable() { - LayerDetail layer = getLayer(); - - assertFalse(layerDao.isOptimizable(layer, 5, 3600)); - - /* - * The succeeded count is good but the frames are too long - * Assert False - */ - jdbcTemplate.update("UPDATE layer_stat SET int_succeeded_count = 5 WHERE pk_layer=?", - layer.getLayerId()); - - jdbcTemplate.update( - "UPDATE layer_usage SET int_core_time_success = 3600 * 6 " + - "WHERE pk_layer=?", layer.getLayerId()); - - assertFalse(layerDao.isOptimizable(layer, 5, 3600)); - - /* - * Set the frame times lower, so now we meet the criteria - * Assert True - */ - jdbcTemplate.update( - "UPDATE layer_usage SET int_core_time_success = 3500 * 5 " + - "WHERE pk_layer=?", layer.getLayerId()); - - assertTrue(layerDao.isOptimizable(layer, 5, 3600)); - - /* - * Take the general tag away. If a layer is not a general layer - * it cannot be optmiized. - * Assert False - */ - jdbcTemplate.update( - "UPDATE layer SET str_tags=? WHERE pk_layer=?", - "desktop",layer.getLayerId()); - - assertFalse(layerDao.isOptimizable(layer, 5, 3600)); - - /* - * Layers that are already tagged util should return - * false as well. - * - * Assert False - */ - jdbcTemplate.update( - "UPDATE layer SET str_tags=? WHERE pk_layer=?", - "general | util",layer.getLayerId()); - - assertFalse(layerDao.isOptimizable(layer, 5, 3600)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateUsage() { - LayerDetail layer = getLayer(); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_clock_time_success FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_core_time_success FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_frame_success_count FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_clock_time_fail FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_core_time_fail FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_frame_fail_count FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - - /** 60 seconds of 100 core units **/ - ResourceUsage usage = new ResourceUsage(60, 33, 0); - - assertTrue(usage.getClockTimeSeconds() > 0); - assertTrue(usage.getCoreTimeSeconds() > 0); - - /** - * Successful frame - */ - layerDao.updateUsage(layer, usage, 0); - assertEquals(Long.valueOf(usage.getClockTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_clock_time_success FROM layer_usage WHERE pk_layer=?", - Long.class, layer.getId())); - - assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_core_time_success FROM layer_usage WHERE pk_layer=?", - Long.class, layer.getId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_frame_success_count FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - - /** - * Failed frame - */ - layerDao.updateUsage(layer, usage, 1); - assertEquals(Long.valueOf(usage.getClockTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_clock_time_fail FROM layer_usage WHERE pk_layer=?", - Long.class, layer.getId())); - - assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_core_time_fail FROM layer_usage WHERE pk_layer=?", - Long.class, layer.getId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_frame_fail_count FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void isLayerThreadable() { - LayerDetail layer = getLayer(); - jdbcTemplate.update( - "UPDATE layer set b_threadable = false WHERE pk_layer = ?", - layer.getId()); - - assertFalse(layerDao.isThreadable(layer)); - - jdbcTemplate.update( - "UPDATE layer set b_threadable = true WHERE pk_layer = ?", - layer.getId()); - - assertTrue(layerDao.isThreadable(layer)); - } - - @Test - @Transactional - @Rollback(true) - public void enableMemoryOptimizer() { - LayerDetail layer = getLayer(); - layerDao.enableMemoryOptimizer(layer, false); - assertFalse(jdbcTemplate.queryForObject( - "SELECT b_optimize FROM layer WHERE pk_layer=?", - Boolean.class, layer.getLayerId())); - - layerDao.enableMemoryOptimizer(layer, true); - assertTrue(jdbcTemplate.queryForObject( - "SELECT b_optimize FROM layer WHERE pk_layer=?", - Boolean.class, layer.getLayerId())); - } - - @Test - @Transactional - @Rollback(true) - public void testBalanceMemory() { - LayerDetail layer = getLayer(); - assertTrue(layerDao.balanceLayerMinMemory(layer, CueUtil.GB)); - jdbcTemplate.update("UPDATE layer_mem SET int_max_rss=? WHERE pk_layer=?", - CueUtil.GB8, layer.getId()); - assertFalse(layerDao.balanceLayerMinMemory(layer, CueUtil.MB512)); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerOutput() { - LayerDetail layer = getLayer(); - layerDao.insertLayerOutput(layer, "filespec1"); - layerDao.insertLayerOutput(layer, "filespec2"); - layerDao.insertLayerOutput(layer, "filespec3"); - assertEquals(3, layerDao.getLayerOutputs(layer).size()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLimits() { - LayerDetail layer = getLayer(); - List limits = layerDao.getLimits(layer); - assertEquals(limits.size(), 1); - assertEquals(limits.get(0).id, getTestLimitId(LIMIT_NAME)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLimitNames() { - LayerDetail layer = getLayer(); - List limits = layerDao.getLimitNames(layer); - assertEquals(limits.size(), 1); - assertEquals(limits.get(0), LIMIT_NAME); - } - - @Test - @Transactional - @Rollback(true) - public void testAddLimit() { - LayerDetail layer = getLayer(); - layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_A)); - layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_B)); - layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_C)); - LayerInterface layerResult = layerDao.getLayer(layer.getLayerId()); - List limits = layerDao.getLimits(layerResult); - assertEquals(limits.size(), 4); - List sourceIds = Arrays.asList( - getTestLimitId(LIMIT_NAME), - getTestLimitId(LIMIT_TEST_A), - getTestLimitId(LIMIT_TEST_B), - getTestLimitId(LIMIT_TEST_C) - ); - List resultIds = Arrays.asList( - limits.get(0).id, - limits.get(1).id, - limits.get(2).id, - limits.get(3).id - ); - Collections.sort(sourceIds); - Collections.sort(resultIds); - assertEquals(sourceIds, resultIds); - } - - @Test - @Transactional - @Rollback(true) - public void testDropLimit() { - LayerDetail layer = getLayer(); - layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_A)); - layerDao.dropLimit(layer, getTestLimitId(LIMIT_NAME)); - LayerInterface layerResult = layerDao.getLayer(layer.getLayerId()); - List limits = layerDao.getLimits(layerResult); - assertEquals(limits.size(), 1); - assertEquals(limits.get(0).id, getTestLimitId(LIMIT_TEST_A)); - layerDao.dropLimit(layer, getTestLimitId(LIMIT_TEST_A)); - LayerInterface layerResultB = layerDao.getLayer(layer.getLayerId()); - List limitsB = layerDao.getLimits(layerResultB); - assertEquals(limitsB.size(), 0); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class LayerDaoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + JobDao jobDao; + + @Resource + LayerDao layerDao; + + @Resource + LimitDao limitDao; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + DepartmentDao departmentDao; + + @Resource + FacilityDao facilityDao; + + @Resource + JobLogUtil jobLogUtil; + + private static String ROOT_FOLDER = "A0000000-0000-0000-0000-000000000000"; + private static String ROOT_SHOW = "00000000-0000-0000-0000-000000000000"; + private static String LAYER_NAME = "pass_1"; + private static String JOB_NAME = "pipe-dev.cue-testuser_shell_v1"; + private static String LIMIT_NAME = "test-limit"; + private static String LIMIT_TEST_A = "testlimita"; + private static String LIMIT_TEST_B = "testlimitb"; + private static String LIMIT_TEST_C = "testlimitc"; + private static int LIMIT_MAX_VALUE = 32; + + @Before + public void testMode() { + jobLauncher.testMode = true; + } + + public LayerDetail getLayer() { + List layers = getLayers(); + return layers.get(layers.size() - 1); + } + + public List getLayers() { + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = spec.getJobs().get(0).detail; + job.groupId = ROOT_FOLDER; + job.showId = ROOT_SHOW; + job.logDir = jobLogUtil.getJobLogPath(job); + job.deptId = departmentDao.getDefaultDepartment().getId(); + job.facilityId = facilityDao.getDefaultFacility().getId(); + jobDao.insertJob(job, jobLogUtil); + + List result = new ArrayList<>(); + String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); + limitDao.createLimit(LIMIT_TEST_A, 1); + limitDao.createLimit(LIMIT_TEST_B, 2); + limitDao.createLimit(LIMIT_TEST_C, 3); + + for (BuildableLayer buildableLayer : spec.getJobs().get(0).getBuildableLayers()) { + LayerDetail layer = buildableLayer.layerDetail; + FrameSet frameSet = new FrameSet(layer.range); + int num_frames = frameSet.size(); + int chunk_size = layer.chunkSize; + + layer.jobId = job.id; + layer.showId = ROOT_SHOW; + layer.totalFrameCount = num_frames / chunk_size; + if (num_frames % chunk_size > 0) { + layer.totalFrameCount++; + } + + layerDao.insertLayerDetail(layer); + layerDao.insertLayerEnvironment(layer, buildableLayer.env); + layerDao.addLimit(layer, limitId); + result.add(layer); + } + + return result; + } + + public JobDetail getJob() { + return jobDao.findJobDetail(JOB_NAME); + } + + public String getTestLimitId(String name) { + return limitDao.findLimit(name).getLimitId(); + } + + @Test + @Transactional + @Rollback(true) + public void testIsLayerComplete() { + layerDao.isLayerComplete(getLayer()); + } + + @Test + @Transactional + @Rollback(true) + public void testIsLayerDispatchable() { + layerDao.isLayerDispatchable(getLayer()); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertLayerDetail() { + LayerDetail layer = getLayer(); + assertEquals(LAYER_NAME, layer.name); + assertEquals(layer.chunkSize, 1); + assertEquals(layer.dispatchOrder, 2); + assertNotNull(layer.id); + assertNotNull(layer.jobId); + assertEquals(layer.showId, ROOT_SHOW); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLayerDetail() { + LayerDetail layer = getLayer(); + assertEquals(LAYER_NAME, layer.name); + assertEquals(layer.chunkSize, 1); + assertEquals(layer.dispatchOrder, 2); + assertNotNull(layer.id); + assertNotNull(layer.jobId); + assertEquals(layer.showId, ROOT_SHOW); + + LayerDetail l2 = layerDao.getLayerDetail(layer); + LayerDetail l3 = layerDao.getLayerDetail(layer.id); + assertEquals(layer, l2); + assertEquals(layer, l3); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLayerDetails() { + List wantLayers = getLayers(); + List gotLayers = layerDao.getLayerDetails(getJob()); + assertThat(gotLayers, containsInAnyOrder(wantLayers.toArray())); + } + + @Test + @Transactional + @Rollback(true) + public void testFindLayerDetail() { + LayerDetail layer = getLayer(); + layerDao.findLayer(getJob(), "pass_1"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLayer() { + LayerDetail layer = getLayer(); + layerDao.getLayer(layer.id); + layerDao.getLayerDetail(layer); + layerDao.getLayerDetail(layer.id); + } + + @Test + @Transactional + @Rollback(true) + public void testFindLayer() { + LayerDetail layer = getLayer(); + layerDao.findLayer(getJob(), "pass_1"); + layerDao.findLayerDetail(getJob(), "pass_1"); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateLayerMinCores() { + LayerDetail layer = getLayer(); + layerDao.updateLayerMinCores(layer, 200); + LayerDetail l2 = layerDao.findLayerDetail(getJob(), "pass_1"); + assertEquals(l2.minimumCores, 200); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateLayerThreadable() { + LayerDetail layer = getLayer(); + layerDao.updateThreadable(layer, false); + assertFalse(jdbcTemplate.queryForObject("SELECT b_threadable FROM layer WHERE pk_layer=?", + Boolean.class, layer.getLayerId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateLayerMinMemory() { + LayerDetail layer = getLayer(); + + /* + * Check to ensure going below Dispatcher.MEM_RESERVED_MIN is not allowed. + */ + layerDao.updateLayerMinMemory(layer, 8096); + LayerDetail l2 = layerDao.findLayerDetail(getJob(), "pass_1"); + // Hardcoded value of dispatcher.memory.mem_reserved_min + // to avoid having to read opencue.properties on a test setting + assertEquals(l2.minimumMemory, 262144); + + /* + * Check regular operation. + */ + layerDao.updateLayerMinMemory(layer, CueUtil.GB); + LayerDetail l3 = layerDao.findLayerDetail(getJob(), "pass_1"); + assertEquals(l3.minimumMemory, CueUtil.GB); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateLayerTags() { + LayerDetail layer = getLayer(); + + HashSet tags = new HashSet(); + tags.add("frickjack"); + tags.add("pancake"); + + layerDao.updateLayerTags(layer, tags); + LayerDetail l2 = layerDao.findLayerDetail(getJob(), "pass_1"); + assertEquals(StringUtils.join(l2.tags, " | "), "frickjack | pancake"); + + tags.clear(); + tags.add("frickjack"); + + layerDao.updateLayerTags(layer, tags); + l2 = layerDao.findLayerDetail(getJob(), "pass_1"); + assertEquals(StringUtils.join(l2.tags, " | "), "frickjack"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFrameStateTotals() { + LayerDetail layer = getLayer(); + layerDao.getFrameStateTotals(layer); + jobDao.getFrameStateTotals(layer); + } + + @Test + @Transactional + @Rollback(true) + public void testGetExecutionSummary() { + LayerDetail layer = getLayer(); + layerDao.getExecutionSummary(layer); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLayerEnvironment() { + LayerDetail layer = getLayer(); + Map map = layerDao.getLayerEnvironment(layer); + for (Map.Entry e : map.entrySet()) { + + } + } + + @Test + @Transactional + @Rollback(true) + public void testInsertLayerEnvironment() { + LayerDetail layer = getLayer(); + layerDao.insertLayerEnvironment(layer, "CHAMBERS", "123"); + Map env = layerDao.getLayerEnvironment(layer); + assertEquals(2, env.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertLayerEnvironmentMap() { + LayerDetail layer = getLayer(); + Map map = new HashMap(); + map.put("CHAMBERS", "123"); + map.put("OVER9000", "123"); + + layerDao.insertLayerEnvironment(layer, map); + Map env = layerDao.getLayerEnvironment(layer); + assertEquals(3, env.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindPastSameNameMaxRSS() { + getLayer(); + jobDao.updateState(getJob(), JobState.FINISHED); + assertEquals(JobState.FINISHED, getJob().state); + + JobDetail lastJob = null; + lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v1"); + long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); + } + + @Test + @Transactional + @Rollback(true) + public void testFindPastTimeStampMaxRSS() { + getLayer(); + jobDao.updateState(getJob(), JobState.FINISHED); + assertEquals(JobState.FINISHED, getJob().state); + + JobDetail lastJob = null; + lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v1_2011_05_03_16_03"); + long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); + } + + @Test + @Transactional + @Rollback(true) + public void testFindPastNewVersionMaxRSS() { + getLayer(); + jobDao.updateState(getJob(), JobState.FINISHED); + assertEquals(JobState.FINISHED, getJob().state); + + JobDetail lastJob = null; + lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v2"); + long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); + } + + @Test + @Transactional + @Rollback(true) + public void testFindPastNewVersionTimeStampMaxRSS() { + getLayer(); + jobDao.updateState(getJob(), JobState.FINISHED); + assertEquals(JobState.FINISHED, getJob().state); + + JobDetail lastJob = null; + lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v2_2011_05_03_16_03"); + long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); + } + + @Test(expected = org.springframework.dao.EmptyResultDataAccessException.class) + @Transactional + @Rollback(true) + public void testFindPastNewVersionFailMaxRSS() { + getLayer(); + jobDao.updateState(getJob(), JobState.FINISHED); + assertEquals(JobState.FINISHED, getJob().state); + + JobDetail lastJob = null; + lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_vfail_v2"); + long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateLayerMaxRSS() { + LayerDetail layer = getLayer(); + + layerDao.updateLayerMaxRSS(layer, 1000, true); + assertEquals(Long.valueOf(1000), jdbcTemplate.queryForObject( + "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", Long.class, layer.getId())); + + layerDao.updateLayerMaxRSS(layer, 999, true); + assertEquals(Long.valueOf(999), jdbcTemplate.queryForObject( + "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", Long.class, layer.getId())); + + layerDao.updateLayerMaxRSS(layer, 900, false); + assertEquals(Long.valueOf(999), jdbcTemplate.queryForObject( + "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", Long.class, layer.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void updateTags() { + String tag = "dillweed"; + LayerDetail layer = getLayer(); + layerDao.updateTags(layer, tag, LayerType.RENDER); + assertEquals(tag, jdbcTemplate.queryForObject("SELECT str_tags FROM layer WHERE pk_layer=?", + String.class, layer.getLayerId())); + } + + @Test + @Transactional + @Rollback(true) + public void updateMinMemory() { + long mem = CueUtil.GB; + LayerDetail layer = getLayer(); + layerDao.updateMinMemory(layer, mem, LayerType.RENDER); + assertEquals(Long.valueOf(mem), jdbcTemplate.queryForObject( + "SELECT int_mem_min FROM layer WHERE pk_layer=?", Long.class, layer.getLayerId())); + } + + @Test + @Transactional + @Rollback(true) + public void updateMinGpuMemory() { + long mem = CueUtil.GB; + LayerDetail layer = getLayer(); + layerDao.updateMinGpuMemory(layer, mem, LayerType.RENDER); + assertEquals(Long.valueOf(mem), jdbcTemplate.queryForObject( + "SELECT int_gpu_mem_min FROM layer WHERE pk_layer=?", Long.class, layer.getLayerId())); + } + + @Test + @Transactional + @Rollback(true) + public void updateMinCores() { + int cores = CueUtil.ONE_CORE * 2; + LayerDetail layer = getLayer(); + layerDao.updateMinCores(layer, cores, LayerType.RENDER); + assertEquals(Integer.valueOf(cores), jdbcTemplate.queryForObject( + "SELECT int_cores_min FROM layer WHERE pk_layer=?", Integer.class, layer.getLayerId())); + } + + @Test + @Transactional + @Rollback(true) + public void updateMaxCores() { + int cores = CueUtil.ONE_CORE * 2; + LayerDetail layer = getLayer(); + layerDao.updateLayerMaxCores(layer, cores); + assertEquals(Integer.valueOf(cores), jdbcTemplate.queryForObject( + "SELECT int_cores_max FROM layer WHERE pk_layer=?", Integer.class, layer.getLayerId())); + } + + @Test + @Transactional + @Rollback(true) + public void isOptimizable() { + LayerDetail layer = getLayer(); + + assertFalse(layerDao.isOptimizable(layer, 5, 3600)); + + /* + * The succeeded count is good but the frames are too long Assert False + */ + jdbcTemplate.update("UPDATE layer_stat SET int_succeeded_count = 5 WHERE pk_layer=?", + layer.getLayerId()); + + jdbcTemplate.update( + "UPDATE layer_usage SET int_core_time_success = 3600 * 6 " + "WHERE pk_layer=?", + layer.getLayerId()); + + assertFalse(layerDao.isOptimizable(layer, 5, 3600)); + + /* + * Set the frame times lower, so now we meet the criteria Assert True + */ + jdbcTemplate.update( + "UPDATE layer_usage SET int_core_time_success = 3500 * 5 " + "WHERE pk_layer=?", + layer.getLayerId()); + + assertTrue(layerDao.isOptimizable(layer, 5, 3600)); + + /* + * Take the general tag away. If a layer is not a general layer it cannot be optmiized. Assert + * False + */ + jdbcTemplate.update("UPDATE layer SET str_tags=? WHERE pk_layer=?", "desktop", + layer.getLayerId()); + + assertFalse(layerDao.isOptimizable(layer, 5, 3600)); + + /* + * Layers that are already tagged util should return false as well. + * + * Assert False + */ + jdbcTemplate.update("UPDATE layer SET str_tags=? WHERE pk_layer=?", "general | util", + layer.getLayerId()); + + assertFalse(layerDao.isOptimizable(layer, 5, 3600)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateUsage() { + LayerDetail layer = getLayer(); + + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject( + "SELECT int_clock_time_success FROM layer_usage WHERE pk_layer=?", Integer.class, + layer.getId())); + + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject( + "SELECT int_core_time_success FROM layer_usage WHERE pk_layer=?", Integer.class, + layer.getId())); + + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject( + "SELECT int_frame_success_count FROM layer_usage WHERE pk_layer=?", Integer.class, + layer.getId())); + + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject("SELECT int_clock_time_fail FROM layer_usage WHERE pk_layer=?", + Integer.class, layer.getId())); + + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject("SELECT int_core_time_fail FROM layer_usage WHERE pk_layer=?", + Integer.class, layer.getId())); + + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject("SELECT int_frame_fail_count FROM layer_usage WHERE pk_layer=?", + Integer.class, layer.getId())); + + /** 60 seconds of 100 core units **/ + ResourceUsage usage = new ResourceUsage(60, 33, 0); + + assertTrue(usage.getClockTimeSeconds() > 0); + assertTrue(usage.getCoreTimeSeconds() > 0); + + /** + * Successful frame + */ + layerDao.updateUsage(layer, usage, 0); + assertEquals(Long.valueOf(usage.getClockTimeSeconds()), + jdbcTemplate.queryForObject( + "SELECT int_clock_time_success FROM layer_usage WHERE pk_layer=?", Long.class, + layer.getId())); + + assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), + jdbcTemplate.queryForObject( + "SELECT int_core_time_success FROM layer_usage WHERE pk_layer=?", Long.class, + layer.getId())); + + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject( + "SELECT int_frame_success_count FROM layer_usage WHERE pk_layer=?", Integer.class, + layer.getId())); + + /** + * Failed frame + */ + layerDao.updateUsage(layer, usage, 1); + assertEquals(Long.valueOf(usage.getClockTimeSeconds()), jdbcTemplate.queryForObject( + "SELECT int_clock_time_fail FROM layer_usage WHERE pk_layer=?", Long.class, layer.getId())); + + assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), jdbcTemplate.queryForObject( + "SELECT int_core_time_fail FROM layer_usage WHERE pk_layer=?", Long.class, layer.getId())); + + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject("SELECT int_frame_fail_count FROM layer_usage WHERE pk_layer=?", + Integer.class, layer.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void isLayerThreadable() { + LayerDetail layer = getLayer(); + jdbcTemplate.update("UPDATE layer set b_threadable = false WHERE pk_layer = ?", layer.getId()); + + assertFalse(layerDao.isThreadable(layer)); + + jdbcTemplate.update("UPDATE layer set b_threadable = true WHERE pk_layer = ?", layer.getId()); + + assertTrue(layerDao.isThreadable(layer)); + } + + @Test + @Transactional + @Rollback(true) + public void enableMemoryOptimizer() { + LayerDetail layer = getLayer(); + layerDao.enableMemoryOptimizer(layer, false); + assertFalse(jdbcTemplate.queryForObject("SELECT b_optimize FROM layer WHERE pk_layer=?", + Boolean.class, layer.getLayerId())); + + layerDao.enableMemoryOptimizer(layer, true); + assertTrue(jdbcTemplate.queryForObject("SELECT b_optimize FROM layer WHERE pk_layer=?", + Boolean.class, layer.getLayerId())); + } + + @Test + @Transactional + @Rollback(true) + public void testBalanceMemory() { + LayerDetail layer = getLayer(); + assertTrue(layerDao.balanceLayerMinMemory(layer, CueUtil.GB)); + jdbcTemplate.update("UPDATE layer_mem SET int_max_rss=? WHERE pk_layer=?", CueUtil.GB8, + layer.getId()); + assertFalse(layerDao.balanceLayerMinMemory(layer, CueUtil.MB512)); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertLayerOutput() { + LayerDetail layer = getLayer(); + layerDao.insertLayerOutput(layer, "filespec1"); + layerDao.insertLayerOutput(layer, "filespec2"); + layerDao.insertLayerOutput(layer, "filespec3"); + assertEquals(3, layerDao.getLayerOutputs(layer).size()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLimits() { + LayerDetail layer = getLayer(); + List limits = layerDao.getLimits(layer); + assertEquals(limits.size(), 1); + assertEquals(limits.get(0).id, getTestLimitId(LIMIT_NAME)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLimitNames() { + LayerDetail layer = getLayer(); + List limits = layerDao.getLimitNames(layer); + assertEquals(limits.size(), 1); + assertEquals(limits.get(0), LIMIT_NAME); + } + + @Test + @Transactional + @Rollback(true) + public void testAddLimit() { + LayerDetail layer = getLayer(); + layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_A)); + layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_B)); + layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_C)); + LayerInterface layerResult = layerDao.getLayer(layer.getLayerId()); + List limits = layerDao.getLimits(layerResult); + assertEquals(limits.size(), 4); + List sourceIds = Arrays.asList(getTestLimitId(LIMIT_NAME), getTestLimitId(LIMIT_TEST_A), + getTestLimitId(LIMIT_TEST_B), getTestLimitId(LIMIT_TEST_C)); + List resultIds = + Arrays.asList(limits.get(0).id, limits.get(1).id, limits.get(2).id, limits.get(3).id); + Collections.sort(sourceIds); + Collections.sort(resultIds); + assertEquals(sourceIds, resultIds); + } + + @Test + @Transactional + @Rollback(true) + public void testDropLimit() { + LayerDetail layer = getLayer(); + layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_A)); + layerDao.dropLimit(layer, getTestLimitId(LIMIT_NAME)); + LayerInterface layerResult = layerDao.getLayer(layer.getLayerId()); + List limits = layerDao.getLimits(layerResult); + assertEquals(limits.size(), 1); + assertEquals(limits.get(0).id, getTestLimitId(LIMIT_TEST_A)); + layerDao.dropLimit(layer, getTestLimitId(LIMIT_TEST_A)); + LayerInterface layerResultB = layerDao.getLayer(layer.getLayerId()); + List limitsB = layerDao.getLimits(layerResultB); + assertEquals(limitsB.size(), 0); + } } - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LimitDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LimitDaoTests.java index c0baac7f4..64fb456f7 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LimitDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LimitDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import javax.annotation.Resource; @@ -37,101 +33,98 @@ import static org.junit.Assert.assertEquals; - @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class LimitDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - LimitDao limitDao; - - private static String LIMIT_NAME = "test-limit"; - private static int LIMIT_MAX_VALUE = 32; - - @Test - @Transactional - @Rollback(true) - public void testCreateLimit() { - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - LimitEntity limit = limitDao.getLimit(limitId); - assertEquals(limit.id, limitId); - assertEquals(limit.name, LIMIT_NAME); - assertEquals(limit.maxValue, LIMIT_MAX_VALUE); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteLimit() { - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - LimitEntity limit = limitDao.getLimit(limitId); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM limit_record WHERE pk_limit_record=?", - Integer.class, limitId)); - - limitDao.deleteLimit(limit); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM limit_record WHERE pk_limit_record=?", - Integer.class, limitId)); - } - - @Test - @Transactional - @Rollback(true) - public void testFindLimit() { - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - - LimitEntity limit = limitDao.findLimit(LIMIT_NAME); - assertEquals(limit.name, LIMIT_NAME); - assertEquals(limit.maxValue, LIMIT_MAX_VALUE); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLimit() { - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - - LimitEntity limit = limitDao.getLimit(limitId); - assertEquals(limit.name, LIMIT_NAME); - assertEquals(limit.maxValue, LIMIT_MAX_VALUE); - } - - @Test - @Transactional - @Rollback(true) - public void testSetLimitName() { - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - LimitEntity limit = limitDao.getLimit(limitId); - String newName = "heyIChanged"; - - limitDao.setLimitName(limit, newName); - - limit = limitDao.getLimit(limitId); - assertEquals(limit.id, limitId); - assertEquals(limit.name, newName); - assertEquals(limit.maxValue, LIMIT_MAX_VALUE); - } - - @Test - @Transactional - @Rollback(true) - public void testSetMaxValue() { - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - LimitEntity limit = limitDao.getLimit(limitId); - int newValue = 600; - - limitDao.setMaxValue(limit, newValue); - - limit = limitDao.getLimit(limitId); - assertEquals(limit.id, limitId); - assertEquals(limit.name, LIMIT_NAME); - assertEquals(limit.maxValue, newValue); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class LimitDaoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + LimitDao limitDao; + + private static String LIMIT_NAME = "test-limit"; + private static int LIMIT_MAX_VALUE = 32; + + @Test + @Transactional + @Rollback(true) + public void testCreateLimit() { + String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); + LimitEntity limit = limitDao.getLimit(limitId); + assertEquals(limit.id, limitId); + assertEquals(limit.name, LIMIT_NAME); + assertEquals(limit.maxValue, LIMIT_MAX_VALUE); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteLimit() { + String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); + LimitEntity limit = limitDao.getLimit(limitId); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM limit_record WHERE pk_limit_record=?", Integer.class, limitId)); + + limitDao.deleteLimit(limit); + + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM limit_record WHERE pk_limit_record=?", Integer.class, limitId)); + } + + @Test + @Transactional + @Rollback(true) + public void testFindLimit() { + String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); + + LimitEntity limit = limitDao.findLimit(LIMIT_NAME); + assertEquals(limit.name, LIMIT_NAME); + assertEquals(limit.maxValue, LIMIT_MAX_VALUE); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLimit() { + String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); + + LimitEntity limit = limitDao.getLimit(limitId); + assertEquals(limit.name, LIMIT_NAME); + assertEquals(limit.maxValue, LIMIT_MAX_VALUE); + } + + @Test + @Transactional + @Rollback(true) + public void testSetLimitName() { + String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); + LimitEntity limit = limitDao.getLimit(limitId); + String newName = "heyIChanged"; + + limitDao.setLimitName(limit, newName); + + limit = limitDao.getLimit(limitId); + assertEquals(limit.id, limitId); + assertEquals(limit.name, newName); + assertEquals(limit.maxValue, LIMIT_MAX_VALUE); + } + + @Test + @Transactional + @Rollback(true) + public void testSetMaxValue() { + String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); + LimitEntity limit = limitDao.getLimit(limitId); + int newValue = 600; + + limitDao.setMaxValue(limit, newValue); + + limit = limitDao.getLimit(limitId); + assertEquals(limit.id, limitId); + assertEquals(limit.name, LIMIT_NAME); + assertEquals(limit.maxValue, newValue); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/MaintenanceDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/MaintenanceDaoTests.java index cd435a7e9..2b96fa039 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/MaintenanceDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/MaintenanceDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import javax.annotation.Resource; @@ -39,38 +35,36 @@ import static org.junit.Assert.assertTrue; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class MaintenanceDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - MaintenanceDao maintenanceDao; - - @Test - @Transactional - @Rollback(true) - public void testSetUpHostsToDown() { - maintenanceDao.setUpHostsToDown(); - } - - @Test - @Transactional - @Rollback(true) - public void testLockHistoricalTask() { - assertTrue(maintenanceDao.lockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER)); - assertFalse(maintenanceDao.lockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER)); - } - - @Test - @Transactional - @Rollback(true) - public void testUnlockHistoricalTask() { - assertTrue(maintenanceDao.lockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER)); - maintenanceDao.unlockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class MaintenanceDaoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + MaintenanceDao maintenanceDao; + + @Test + @Transactional + @Rollback(true) + public void testSetUpHostsToDown() { + maintenanceDao.setUpHostsToDown(); + } + + @Test + @Transactional + @Rollback(true) + public void testLockHistoricalTask() { + assertTrue(maintenanceDao.lockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER)); + assertFalse(maintenanceDao.lockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER)); + } + + @Test + @Transactional + @Rollback(true) + public void testUnlockHistoricalTask() { + assertTrue(maintenanceDao.lockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER)); + maintenanceDao.unlockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER); + } } - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/MatcherDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/MatcherDaoTests.java index 7867891a4..4cfbf8287 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/MatcherDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/MatcherDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import javax.annotation.Resource; @@ -44,97 +40,95 @@ import com.imageworks.spcue.test.AssumingPostgresEngine; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class MatcherDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - MatcherDao matcherDao; - - @Resource - FilterDao filterDao; - - @Resource - ShowDao showDao; - - @Resource - GroupDao groupDao; - - private static String FILTER_NAME = "test_filter"; - - public ShowEntity getShow() { - return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); - } - - public MatcherEntity createMatcher() { - FilterEntity filter = createFilter(); - MatcherEntity matcher = new MatcherEntity(); - matcher.filterId = filter.id; - matcher.name = null; - matcher.showId = getShow().getId(); - matcher.subject = MatchSubject.JOB_NAME; - matcher.type = MatchType.CONTAINS; - matcher.value = "testuser"; - matcherDao.insertMatcher(matcher); - return matcher; - } - - public FilterEntity createFilter() { - FilterEntity filter = new FilterEntity(); - filter.name = FILTER_NAME; - filter.showId = "00000000-0000-0000-0000-000000000000"; - filter.type = FilterType.MATCH_ANY; - filter.enabled = true; - filterDao.insertFilter(filter); - return filter; - } - - @Test - @Transactional - @Rollback(true) - public void testInsertMatcher() { - createMatcher(); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteMatcher() { - MatcherEntity matcher = createMatcher(); - matcherDao.deleteMatcher(matcher); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateMatcher() { - MatcherEntity matcher = createMatcher(); - matcher.subject = MatchSubject.USER; - matcher.value = "testuser"; - matcher.type = MatchType.IS; - matcherDao.updateMatcher(matcher); - } - - @Test - @Transactional - @Rollback(true) - public void testGetMatcher() { - MatcherEntity matcher = createMatcher(); - matcherDao.getMatcher(matcher); - matcherDao.getMatcher(matcher.getMatcherId()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetMatchers() { - MatcherEntity matcher = createMatcher(); - matcherDao.getMatchers(matcher); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class MatcherDaoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + MatcherDao matcherDao; + + @Resource + FilterDao filterDao; + + @Resource + ShowDao showDao; + + @Resource + GroupDao groupDao; + + private static String FILTER_NAME = "test_filter"; + + public ShowEntity getShow() { + return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); + } + + public MatcherEntity createMatcher() { + FilterEntity filter = createFilter(); + MatcherEntity matcher = new MatcherEntity(); + matcher.filterId = filter.id; + matcher.name = null; + matcher.showId = getShow().getId(); + matcher.subject = MatchSubject.JOB_NAME; + matcher.type = MatchType.CONTAINS; + matcher.value = "testuser"; + matcherDao.insertMatcher(matcher); + return matcher; + } + + public FilterEntity createFilter() { + FilterEntity filter = new FilterEntity(); + filter.name = FILTER_NAME; + filter.showId = "00000000-0000-0000-0000-000000000000"; + filter.type = FilterType.MATCH_ANY; + filter.enabled = true; + filterDao.insertFilter(filter); + return filter; + } + + @Test + @Transactional + @Rollback(true) + public void testInsertMatcher() { + createMatcher(); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteMatcher() { + MatcherEntity matcher = createMatcher(); + matcherDao.deleteMatcher(matcher); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateMatcher() { + MatcherEntity matcher = createMatcher(); + matcher.subject = MatchSubject.USER; + matcher.value = "testuser"; + matcher.type = MatchType.IS; + matcherDao.updateMatcher(matcher); + } + + @Test + @Transactional + @Rollback(true) + public void testGetMatcher() { + MatcherEntity matcher = createMatcher(); + matcherDao.getMatcher(matcher); + matcherDao.getMatcher(matcher.getMatcherId()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetMatchers() { + MatcherEntity matcher = createMatcher(); + matcherDao.getMatchers(matcher); + } } - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/NestedWhiteboardDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/NestedWhiteboardDaoTests.java index ac98d6ede..1e3ce4246 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/NestedWhiteboardDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/NestedWhiteboardDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import javax.annotation.Resource; @@ -37,32 +33,31 @@ import com.imageworks.spcue.test.AssumingPostgresEngine; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class NestedWhiteboardDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - NestedWhiteboardDao nestedWhiteboardDao; - - @Resource - ShowDao showDao; - - public ShowEntity getShow() { - return showDao.findShowDetail("pipe"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetNestedJobWhiteboard() { - nestedWhiteboardDao.getJobWhiteboard(getShow()); - nestedWhiteboardDao.getJobWhiteboard(getShow()); - nestedWhiteboardDao.getJobWhiteboard(getShow()); - nestedWhiteboardDao.getJobWhiteboard(getShow()); - nestedWhiteboardDao.getJobWhiteboard(getShow()); - } + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + NestedWhiteboardDao nestedWhiteboardDao; + + @Resource + ShowDao showDao; + + public ShowEntity getShow() { + return showDao.findShowDetail("pipe"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetNestedJobWhiteboard() { + nestedWhiteboardDao.getJobWhiteboard(getShow()); + nestedWhiteboardDao.getJobWhiteboard(getShow()); + nestedWhiteboardDao.getJobWhiteboard(getShow()); + nestedWhiteboardDao.getJobWhiteboard(getShow()); + nestedWhiteboardDao.getJobWhiteboard(getShow()); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/OwnerDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/OwnerDaoTests.java index 997bf2956..2b549d7eb 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/OwnerDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/OwnerDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import javax.annotation.Resource; @@ -41,91 +37,87 @@ import static org.junit.Assert.assertEquals; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class OwnerDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - OwnerDao ownerDao; - - @Resource - AdminManager adminManager; - - @Resource - HostManager hostManager; - - @Test - @Transactional - @Rollback(true) - public void testInsertOwner() { - ShowInterface show = adminManager.findShowEntity("pipe"); - OwnerEntity o = new OwnerEntity(); - o.name = "spongebob"; - ownerDao.insertOwner(o, show); - } - - @Test - @Transactional - @Rollback(true) - public void testIsOwner() { - ShowInterface show = adminManager.findShowEntity("pipe"); - OwnerEntity o = new OwnerEntity(); - o.name = "spongebob"; - ownerDao.insertOwner(o, show); - } - - @Test - @Transactional - @Rollback(true) - public void testGetOwner() { - ShowInterface show = adminManager.findShowEntity("pipe"); - OwnerEntity o = new OwnerEntity(); - o.name = "spongebob"; - ownerDao.insertOwner(o, show); - - assertEquals(o, ownerDao.findOwner("spongebob")); - assertEquals(o, ownerDao.getOwner(o.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteOwner() { - ShowInterface show = adminManager.findShowEntity("pipe"); - OwnerEntity o = new OwnerEntity(); - o.name = "spongebob"; - ownerDao.insertOwner(o, show); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM owner WHERE pk_owner=?", - Integer.class, o.id)); - - ownerDao.deleteOwner(o); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM owner WHERE pk_owner=?", - Integer.class, o.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateShow() { - ShowInterface show = adminManager.findShowEntity("pipe"); - OwnerEntity o = new OwnerEntity(); - o.name = "spongebob"; - ownerDao.insertOwner(o, show); - - ShowInterface newShow = adminManager.findShowEntity("edu"); - - ownerDao.updateShow(o, newShow); - - assertEquals(newShow.getShowId(), jdbcTemplate.queryForObject( - "SELECT pk_show FROM owner WHERE pk_owner=?", - String.class, o.id)); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class OwnerDaoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + OwnerDao ownerDao; + + @Resource + AdminManager adminManager; + + @Resource + HostManager hostManager; + + @Test + @Transactional + @Rollback(true) + public void testInsertOwner() { + ShowInterface show = adminManager.findShowEntity("pipe"); + OwnerEntity o = new OwnerEntity(); + o.name = "spongebob"; + ownerDao.insertOwner(o, show); + } + + @Test + @Transactional + @Rollback(true) + public void testIsOwner() { + ShowInterface show = adminManager.findShowEntity("pipe"); + OwnerEntity o = new OwnerEntity(); + o.name = "spongebob"; + ownerDao.insertOwner(o, show); + } + + @Test + @Transactional + @Rollback(true) + public void testGetOwner() { + ShowInterface show = adminManager.findShowEntity("pipe"); + OwnerEntity o = new OwnerEntity(); + o.name = "spongebob"; + ownerDao.insertOwner(o, show); + + assertEquals(o, ownerDao.findOwner("spongebob")); + assertEquals(o, ownerDao.getOwner(o.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteOwner() { + ShowInterface show = adminManager.findShowEntity("pipe"); + OwnerEntity o = new OwnerEntity(); + o.name = "spongebob"; + ownerDao.insertOwner(o, show); + + assertEquals(Integer.valueOf(1), jdbcTemplate + .queryForObject("SELECT COUNT(*) FROM owner WHERE pk_owner=?", Integer.class, o.id)); + + ownerDao.deleteOwner(o); + + assertEquals(Integer.valueOf(0), jdbcTemplate + .queryForObject("SELECT COUNT(*) FROM owner WHERE pk_owner=?", Integer.class, o.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateShow() { + ShowInterface show = adminManager.findShowEntity("pipe"); + OwnerEntity o = new OwnerEntity(); + o.name = "spongebob"; + ownerDao.insertOwner(o, show); + + ShowInterface newShow = adminManager.findShowEntity("edu"); + + ownerDao.updateShow(o, newShow); + + assertEquals(newShow.getShowId(), jdbcTemplate + .queryForObject("SELECT pk_show FROM owner WHERE pk_owner=?", String.class, o.id)); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/PointDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/PointDaoTests.java index d815f64b8..378fc2d34 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/PointDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/PointDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import java.io.File; @@ -48,124 +44,117 @@ import static org.junit.Assert.assertTrue; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class PointDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - DepartmentDao departmentDao; - - @Resource - AdminManager adminManager; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class PointDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - PointDao pointDao; + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; - public JobDetail launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } + @Resource + DepartmentDao departmentDao; - @Test - @Transactional - @Rollback(true) - public void insertDepartmentConfig() { - ShowEntity show = new ShowEntity(); - show.name = "testtest"; - adminManager.createShow(show); - DepartmentInterface dept = departmentDao.findDepartment("Lighting"); - PointInterface d = pointDao.insertPointConf(show, dept); + @Resource + AdminManager adminManager; - assertEquals(show.id, jdbcTemplate.queryForObject( - "SELECT pk_show FROM point WHERE pk_point=?", - String.class, d.getPointId())); + @Resource + JobManager jobManager; - assertEquals(dept.getDepartmentId(), jdbcTemplate.queryForObject( - "SELECT pk_dept FROM point WHERE pk_point=?", - String.class, d.getPointId())); - } + @Resource + JobLauncher jobLauncher; - @Test - @Transactional - @Rollback(true) - public void departmentConfigExists() { - ShowEntity show = new ShowEntity(); - show.name = "testtest"; - adminManager.createShow(show); + @Resource + PointDao pointDao; - assertTrue(pointDao.pointConfExists(show, - departmentDao.getDefaultDepartment())); + public JobDetail launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + } - assertFalse(pointDao.pointConfExists(show, - departmentDao.findDepartment("Lighting"))); - } + @Test + @Transactional + @Rollback(true) + public void insertDepartmentConfig() { + ShowEntity show = new ShowEntity(); + show.name = "testtest"; + adminManager.createShow(show); + DepartmentInterface dept = departmentDao.findDepartment("Lighting"); + PointInterface d = pointDao.insertPointConf(show, dept); - @Test - @Transactional - @Rollback(true) - public void updateEnableTiManaged() { - ShowEntity show = new ShowEntity(); - show.name = "testtest"; - adminManager.createShow(show); + assertEquals(show.id, jdbcTemplate.queryForObject("SELECT pk_show FROM point WHERE pk_point=?", + String.class, d.getPointId())); - PointInterface config = pointDao.getPointConfigDetail(show, - departmentDao.getDefaultDepartment()); + assertEquals(dept.getDepartmentId(), jdbcTemplate.queryForObject( + "SELECT pk_dept FROM point WHERE pk_point=?", String.class, d.getPointId())); + } - //pointDao.updateEnableManaged(config, "Lighting", 10); - } + @Test + @Transactional + @Rollback(true) + public void departmentConfigExists() { + ShowEntity show = new ShowEntity(); + show.name = "testtest"; + adminManager.createShow(show); - @Test - @Transactional - @Rollback(true) - public void getDepartmentConfig() { - ShowEntity show = new ShowEntity(); - show.name = "testtest"; - adminManager.createShow(show); + assertTrue(pointDao.pointConfExists(show, departmentDao.getDefaultDepartment())); - /* Tests both overlodaed methods */ - PointInterface configA = pointDao.getPointConfigDetail(show, - departmentDao.getDefaultDepartment()); + assertFalse(pointDao.pointConfExists(show, departmentDao.findDepartment("Lighting"))); + } - PointInterface configB = pointDao.getPointConfDetail( - configA.getPointId()); + @Test + @Transactional + @Rollback(true) + public void updateEnableTiManaged() { + ShowEntity show = new ShowEntity(); + show.name = "testtest"; + adminManager.createShow(show); - assertEquals(configA.getPointId(), configB.getPointId()); - assertEquals(configA.getDepartmentId(), configB.getDepartmentId()); - assertEquals(configA.getShowId(), configB.getShowId()); - } + PointInterface config = + pointDao.getPointConfigDetail(show, departmentDao.getDefaultDepartment()); + // pointDao.updateEnableManaged(config, "Lighting", 10); + } - @Test - @Transactional - @Rollback(true) - public void testIsOverMinCores() { + @Test + @Transactional + @Rollback(true) + public void getDepartmentConfig() { + ShowEntity show = new ShowEntity(); + show.name = "testtest"; + adminManager.createShow(show); - JobDetail job = launchJob(); - - PointInterface pointConfig = pointDao.getPointConfigDetail(job, - departmentDao.getDepartment(job.getDepartmentId())); + /* Tests both overlodaed methods */ + PointInterface configA = + pointDao.getPointConfigDetail(show, departmentDao.getDefaultDepartment()); - assertFalse(pointDao.isOverMinCores(job)); + PointInterface configB = pointDao.getPointConfDetail(configA.getPointId()); - // Now update some values so it returns true. - jdbcTemplate.update("UPDATE point SET int_cores = int_min_cores + 2000000 WHERE pk_point=?", - pointConfig.getId()); + assertEquals(configA.getPointId(), configB.getPointId()); + assertEquals(configA.getDepartmentId(), configB.getDepartmentId()); + assertEquals(configA.getShowId(), configB.getShowId()); + } - logger.info(jdbcTemplate.queryForObject("SELECT int_min_cores from point where pk_point=?", - Integer.class, pointConfig.getId())); - - assertTrue(pointDao.isOverMinCores(job)); - } + @Test + @Transactional + @Rollback(true) + public void testIsOverMinCores() { -} + JobDetail job = launchJob(); + + PointInterface pointConfig = + pointDao.getPointConfigDetail(job, departmentDao.getDepartment(job.getDepartmentId())); + + assertFalse(pointDao.isOverMinCores(job)); + // Now update some values so it returns true. + jdbcTemplate.update("UPDATE point SET int_cores = int_min_cores + 2000000 WHERE pk_point=?", + pointConfig.getId()); + + logger.info(jdbcTemplate.queryForObject("SELECT int_min_cores from point where pk_point=?", + Integer.class, pointConfig.getId())); + + assertTrue(pointDao.isOverMinCores(job)); + } + +} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ProcDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ProcDaoTests.java index 6620116c8..402ed745b 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ProcDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ProcDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import java.io.File; @@ -69,784 +65,745 @@ import static org.junit.Assert.assertTrue; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class ProcDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Autowired - private Environment env; - - @Resource - ProcDao procDao; - - @Resource - HostDao hostDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - FrameDao frameDao; - - @Resource - LayerDao layerDao; - - @Resource - DispatcherDao dispatcherDao; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - Dispatcher dispatcher; - - @Resource - FrameSearchFactory frameSearchFactory; - - @Resource - ProcSearchFactory procSearchFactory; - - private static String PK_ALLOC = "00000000-0000-0000-0000-000000000000"; - - private long MEM_RESERVED_DEFAULT; - private long MEM_GPU_RESERVED_DEFAULT; - - public DispatchHost createHost() { - - RenderHost host = RenderHost.newBuilder() - .setName("beta") - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem((int) CueUtil.GB32) - .setTotalSwap(20960) - .setNimbyEnabled(false) - .setNumProcs(8) - .setCoresPerProc(100) - .setState(HardwareState.UP) - .setFacility("spi") - .build(); - - DispatchHost dh = hostManager.createHost(host); - hostManager.setAllocation(dh, - adminManager.findAllocationDetail("spi", "general")); - - return hostDao.findDispatchHost("beta"); - } - - public JobDetail launchJob() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - @Before - public void setDispatcherTestMode() { - dispatcher.setTestMode(true); - jobLauncher.testMode = true; - this.MEM_RESERVED_DEFAULT = env.getRequiredProperty( - "dispatcher.memory.mem_reserved_default", - Long.class); - this.MEM_GPU_RESERVED_DEFAULT = env.getRequiredProperty( - "dispatcher.memory.mem_gpu_reserved_default", - Long.class); - } - - @Test - @Transactional - @Rollback(true) - public void testDontVerifyRunningProc() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail fd = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame frame = frameDao.getDispatchFrame(fd.getId()); - VirtualProc proc = VirtualProc.build(host, frame); - dispatcher.dispatch(frame, proc); - - // Confirm was have a running frame. - assertEquals("RUNNING", jdbcTemplate.queryForObject( - "SELECT str_state FROM frame WHERE pk_frame=?", - String.class, frame.id)); - - assertTrue(procDao.verifyRunningProc(proc.getId(), frame.getId())); - jobManager.shutdownJob(job); - - int result = jdbcTemplate.update( - "UPDATE job SET ts_stopped = " + - "current_timestamp - interval '10' minute " + - "WHERE pk_job=?", job.id); - - assertEquals(1, result); - assertFalse(procDao.verifyRunningProc(proc.getId(), frame.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertVirtualProc() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteVirtualProc() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - procDao.deleteVirtualProc(proc); - } - - @Test - @Transactional - @Rollback(true) - public void testClearVirtualProcAssignment() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - procDao.clearVirtualProcAssignment(proc); - } - - @Test - @Transactional - @Rollback(true) - public void testClearVirtualProcAssignmentByFrame() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - assertTrue(procDao.clearVirtualProcAssignment(frame)); - } - - - @Test - @Transactional - @Rollback(true) - public void testUpdateVirtualProcAssignment() { - - DispatchHost host = createHost(); - - JobDetail job = launchJob(); - FrameDetail frame1 = frameDao.findFrameDetail(job, "0001-pass_1"); - FrameDetail frame2 = frameDao.findFrameDetail(job, "0002-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame1.id; - proc.layerId = frame1.layerId; - proc.showId = frame1.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame1.getId()); - - proc.frameId = frame2.id; - - procDao.updateVirtualProcAssignment(proc); - procDao.verifyRunningProc(proc.getId(), frame2.getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateProcMemoryUsage() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - byte[] children = new byte[100]; - - procDao.updateProcMemoryUsage(frame, 100, 100, 1000, 1000, 0, 0, 0, children); - - } - - @Test - @Transactional - @Rollback(true) - public void testGetVirtualProc() { - DispatchHost host = createHost(); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM host WHERE pk_host=?", - Integer.class, host.id)); - - JobDetail job = launchJob(); - FrameDetail fd = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - DispatchFrame frame = frameDao.getDispatchFrame(fd.getId()); - VirtualProc proc = VirtualProc.build(host, frame); - dispatcher.dispatch(frame, proc); - - assertTrue(procDao.verifyRunningProc(proc.getId(), frame.getId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM proc WHERE pk_proc=?", - Integer.class, proc.id)); - - - VirtualProc verifyProc = procDao.getVirtualProc(proc.getId()); - assertEquals(host.allocationId, verifyProc.allocationId); - assertEquals(proc.coresReserved, verifyProc.coresReserved); - assertEquals(proc.frameId, verifyProc.frameId); - assertEquals(proc.hostId, verifyProc.hostId); - assertEquals(proc.id, verifyProc.id); - assertEquals(proc.jobId, verifyProc.jobId); - assertEquals(proc.layerId, verifyProc.layerId); - assertEquals(proc.showId, verifyProc.showId); - } - - @Test - @Transactional - @Rollback(true) - public void testFindVirtualProc() { - - DispatchHost host = createHost(); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM host WHERE pk_host=?", - Integer.class, host.id)); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); - - procDao.findVirtualProc(frame); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class ProcDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Test - @Transactional - @Rollback(true) - public void testFindVirtualProcs() { - - DispatchHost host = createHost(); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM host WHERE pk_host=?", - Integer.class, host.id)); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); - - assertEquals(1, procDao.findVirtualProcs(HardwareState.UP).size()); - assertEquals(1, procDao.findVirtualProcs(host).size()); - assertEquals(1, procDao.findVirtualProcs(job).size()); - assertEquals(1, procDao.findVirtualProcs(frame).size()); - assertEquals(1, procDao.findVirtualProcs(frameSearchFactory.create(job)).size()); - assertEquals(1, procDao.findVirtualProcs(frameSearchFactory.create((LayerInterface) frame)).size()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindOrphanedVirtualProcs() { - DispatchHost host = createHost(); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM host WHERE pk_host=?", - Integer.class, host.id)); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); - - assertEquals(0, procDao.findOrphanedVirtualProcs().size()); - - /** - * This is destructive to running jobs - */ - jdbcTemplate.update( - "UPDATE proc SET ts_ping = (current_timestamp - interval '30' day)"); - - assertEquals(1, procDao.findOrphanedVirtualProcs().size()); - assertTrue(procDao.isOrphan(proc)); - } - - @Test - @Transactional - @Rollback(true) - public void testUnbookProc() { - - DispatchHost host = createHost(); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); - - procDao.unbookProc(proc); - assertTrue(jdbcTemplate.queryForObject( - "SELECT b_unbooked FROM proc WHERE pk_proc=?", - Boolean.class, proc.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testUnbookVirtualProcs() { - - DispatchHost host = createHost(); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + @Autowired + private Environment env; - List procs = new ArrayList(); - procs.add(proc); + @Resource + ProcDao procDao; - procDao.unbookVirtualProcs(procs); - - assertTrue(jdbcTemplate.queryForObject( - "SELECT b_unbooked FROM proc WHERE pk_proc=?", - Boolean.class, proc.id)); - } + @Resource + HostDao hostDao; + @Resource + JobManager jobManager; - @Test(expected=ResourceReservationFailureException.class) - @Transactional - @Rollback(true) - public void testIncreaseReservedMemoryFail() { + @Resource + JobLauncher jobLauncher; - DispatchHost host = createHost(); - JobDetail job = launchJob(); + @Resource + FrameDao frameDao; - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + @Resource + LayerDao layerDao; + + @Resource + DispatcherDao dispatcherDao; - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); + @Resource + HostManager hostManager; - procDao.increaseReservedMemory(proc, 8173264l * 8); + @Resource + AdminManager adminManager; + + @Resource + Dispatcher dispatcher; + + @Resource + FrameSearchFactory frameSearchFactory; + + @Resource + ProcSearchFactory procSearchFactory; + + private static String PK_ALLOC = "00000000-0000-0000-0000-000000000000"; + + private long MEM_RESERVED_DEFAULT; + private long MEM_GPU_RESERVED_DEFAULT; + + public DispatchHost createHost() { + + RenderHost host = RenderHost.newBuilder().setName("beta").setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB32).setTotalSwap(20960) + .setNimbyEnabled(false).setNumProcs(8).setCoresPerProc(100).setState(HardwareState.UP) + .setFacility("spi").build(); + + DispatchHost dh = hostManager.createHost(host); + hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); + + return hostDao.findDispatchHost("beta"); + } + + public JobDetail launchJob() { + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + } + + @Before + public void setDispatcherTestMode() { + dispatcher.setTestMode(true); + jobLauncher.testMode = true; + this.MEM_RESERVED_DEFAULT = + env.getRequiredProperty("dispatcher.memory.mem_reserved_default", Long.class); + this.MEM_GPU_RESERVED_DEFAULT = + env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_default", Long.class); + } + + @Test + @Transactional + @Rollback(true) + public void testDontVerifyRunningProc() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail fd = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + DispatchFrame frame = frameDao.getDispatchFrame(fd.getId()); + VirtualProc proc = VirtualProc.build(host, frame); + dispatcher.dispatch(frame, proc); + + // Confirm was have a running frame. + assertEquals("RUNNING", jdbcTemplate + .queryForObject("SELECT str_state FROM frame WHERE pk_frame=?", String.class, frame.id)); + + assertTrue(procDao.verifyRunningProc(proc.getId(), frame.getId())); + jobManager.shutdownJob(job); + + int result = jdbcTemplate.update("UPDATE job SET ts_stopped = " + + "current_timestamp - interval '10' minute " + "WHERE pk_job=?", job.id); + + assertEquals(1, result); + assertFalse(procDao.verifyRunningProc(proc.getId(), frame.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertVirtualProc() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteVirtualProc() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + procDao.deleteVirtualProc(proc); + } + + @Test + @Transactional + @Rollback(true) + public void testClearVirtualProcAssignment() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + procDao.clearVirtualProcAssignment(proc); + } + + @Test + @Transactional + @Rollback(true) + public void testClearVirtualProcAssignmentByFrame() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + assertTrue(procDao.clearVirtualProcAssignment(frame)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateVirtualProcAssignment() { + + DispatchHost host = createHost(); + + JobDetail job = launchJob(); + FrameDetail frame1 = frameDao.findFrameDetail(job, "0001-pass_1"); + FrameDetail frame2 = frameDao.findFrameDetail(job, "0002-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame1.id; + proc.layerId = frame1.layerId; + proc.showId = frame1.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame1.getId()); + + proc.frameId = frame2.id; + + procDao.updateVirtualProcAssignment(proc); + procDao.verifyRunningProc(proc.getId(), frame2.getId()); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateProcMemoryUsage() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + byte[] children = new byte[100]; + + procDao.updateProcMemoryUsage(frame, 100, 100, 1000, 1000, 0, 0, 0, children); + + } + + @Test + @Transactional + @Rollback(true) + public void testGetVirtualProc() { + DispatchHost host = createHost(); + + assertEquals(Integer.valueOf(1), jdbcTemplate + .queryForObject("SELECT COUNT(*) FROM host WHERE pk_host=?", Integer.class, host.id)); + + JobDetail job = launchJob(); + FrameDetail fd = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + DispatchFrame frame = frameDao.getDispatchFrame(fd.getId()); + VirtualProc proc = VirtualProc.build(host, frame); + dispatcher.dispatch(frame, proc); + + assertTrue(procDao.verifyRunningProc(proc.getId(), frame.getId())); + + assertEquals(Integer.valueOf(1), jdbcTemplate + .queryForObject("SELECT COUNT(*) FROM proc WHERE pk_proc=?", Integer.class, proc.id)); + + VirtualProc verifyProc = procDao.getVirtualProc(proc.getId()); + assertEquals(host.allocationId, verifyProc.allocationId); + assertEquals(proc.coresReserved, verifyProc.coresReserved); + assertEquals(proc.frameId, verifyProc.frameId); + assertEquals(proc.hostId, verifyProc.hostId); + assertEquals(proc.id, verifyProc.id); + assertEquals(proc.jobId, verifyProc.jobId); + assertEquals(proc.layerId, verifyProc.layerId); + assertEquals(proc.showId, verifyProc.showId); + } + + @Test + @Transactional + @Rollback(true) + public void testFindVirtualProc() { + + DispatchHost host = createHost(); + + assertEquals(Integer.valueOf(1), jdbcTemplate + .queryForObject("SELECT COUNT(*) FROM host WHERE pk_host=?", Integer.class, host.id)); + + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + procDao.insertVirtualProc(proc); + + procDao.findVirtualProc(frame); + } + + @Test + @Transactional + @Rollback(true) + public void testFindVirtualProcs() { + + DispatchHost host = createHost(); + + assertEquals(Integer.valueOf(1), jdbcTemplate + .queryForObject("SELECT COUNT(*) FROM host WHERE pk_host=?", Integer.class, host.id)); + + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + procDao.insertVirtualProc(proc); + + assertEquals(1, procDao.findVirtualProcs(HardwareState.UP).size()); + assertEquals(1, procDao.findVirtualProcs(host).size()); + assertEquals(1, procDao.findVirtualProcs(job).size()); + assertEquals(1, procDao.findVirtualProcs(frame).size()); + assertEquals(1, procDao.findVirtualProcs(frameSearchFactory.create(job)).size()); + assertEquals(1, + procDao.findVirtualProcs(frameSearchFactory.create((LayerInterface) frame)).size()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindOrphanedVirtualProcs() { + DispatchHost host = createHost(); + + assertEquals(Integer.valueOf(1), jdbcTemplate + .queryForObject("SELECT COUNT(*) FROM host WHERE pk_host=?", Integer.class, host.id)); + + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + procDao.insertVirtualProc(proc); + + assertEquals(0, procDao.findOrphanedVirtualProcs().size()); + + /** + * This is destructive to running jobs + */ + jdbcTemplate.update("UPDATE proc SET ts_ping = (current_timestamp - interval '30' day)"); + + assertEquals(1, procDao.findOrphanedVirtualProcs().size()); + assertTrue(procDao.isOrphan(proc)); + } + + @Test + @Transactional + @Rollback(true) + public void testUnbookProc() { + + DispatchHost host = createHost(); + + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + procDao.insertVirtualProc(proc); + + procDao.unbookProc(proc); + assertTrue(jdbcTemplate.queryForObject("SELECT b_unbooked FROM proc WHERE pk_proc=?", + Boolean.class, proc.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testUnbookVirtualProcs() { + + DispatchHost host = createHost(); + + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + procDao.insertVirtualProc(proc); + + List procs = new ArrayList(); + procs.add(proc); + + procDao.unbookVirtualProcs(procs); + + assertTrue(jdbcTemplate.queryForObject("SELECT b_unbooked FROM proc WHERE pk_proc=?", + Boolean.class, proc.id)); + } + + @Test(expected = ResourceReservationFailureException.class) + @Transactional + @Rollback(true) + public void testIncreaseReservedMemoryFail() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + procDao.insertVirtualProc(proc); + + procDao.increaseReservedMemory(proc, 8173264l * 8); + } + + @Test + @Transactional + @Rollback(true) + public void testIncreaseReservedMemory() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + procDao.insertVirtualProc(proc); + + procDao.increaseReservedMemory(proc, 3145728); + } + + @Test + @Transactional + @Rollback(true) + public void testGetReservedMemory() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1"); + DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); + + VirtualProc proc = VirtualProc.build(host, frame); + proc.frameId = frame.id; + procDao.insertVirtualProc(proc); + + VirtualProc _proc = procDao.findVirtualProc(frame); + assertEquals(Long.valueOf(this.MEM_RESERVED_DEFAULT), jdbcTemplate + .queryForObject("SELECT int_mem_reserved FROM proc WHERE pk_proc=?", Long.class, _proc.id)); + assertEquals(this.MEM_RESERVED_DEFAULT, procDao.getReservedMemory(_proc)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetReservedGpuMemory() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1"); + DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); + + VirtualProc proc = VirtualProc.build(host, frame); + proc.frameId = frame.id; + procDao.insertVirtualProc(proc); + + VirtualProc _proc = procDao.findVirtualProc(frame); + assertEquals(Long.valueOf(this.MEM_GPU_RESERVED_DEFAULT), jdbcTemplate.queryForObject( + "SELECT int_gpu_mem_reserved FROM proc WHERE pk_proc=?", Long.class, _proc.id)); + assertEquals(this.MEM_GPU_RESERVED_DEFAULT, procDao.getReservedGpuMemory(_proc)); + } + + @Test + @Transactional + @Rollback(true) + public void testBalanceUnderUtilizedProcs() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frameDetail1 = frameDao.findFrameDetail(job, "0001-pass_1"); + DispatchFrame frame1 = frameDao.getDispatchFrame(frameDetail1.id); + + VirtualProc proc1 = VirtualProc.build(host, frame1); + proc1.frameId = frame1.id; + procDao.insertVirtualProc(proc1); + + byte[] children = new byte[100]; + procDao.updateProcMemoryUsage(frame1, 250000, 250000, 250000, 250000, 0, 0, 0, children); + layerDao.updateLayerMaxRSS(frame1, 250000, true); + + FrameDetail frameDetail2 = frameDao.findFrameDetail(job, "0002-pass_1"); + DispatchFrame frame2 = frameDao.getDispatchFrame(frameDetail2.id); + + VirtualProc proc2 = VirtualProc.build(host, frame2); + proc2.frameId = frame2.id; + procDao.insertVirtualProc(proc2); + + procDao.updateProcMemoryUsage(frame2, 255000, 255000, 255000, 255000, 0, 0, 0, children); + layerDao.updateLayerMaxRSS(frame2, 255000, true); + + FrameDetail frameDetail3 = frameDao.findFrameDetail(job, "0003-pass_1"); + DispatchFrame frame3 = frameDao.getDispatchFrame(frameDetail3.id); + + VirtualProc proc3 = VirtualProc.build(host, frame3); + proc3.frameId = frame3.id; + procDao.insertVirtualProc(proc3); + + procDao.updateProcMemoryUsage(frame3, 3145728, 3145728, 3145728, 3145728, 0, 0, 0, children); + layerDao.updateLayerMaxRSS(frame3, 300000, true); + + procDao.balanceUnderUtilizedProcs(proc3, 100000); + procDao.increaseReservedMemory(proc3, this.MEM_RESERVED_DEFAULT + 100000); + + // Check the target proc + VirtualProc targetProc = procDao.getVirtualProc(proc3.getId()); + assertEquals(this.MEM_RESERVED_DEFAULT + 100000, targetProc.memoryReserved); + + // Check other procs + VirtualProc firstProc = procDao.getVirtualProc(proc1.getId()); + assertEquals(this.MEM_RESERVED_DEFAULT - 50000 - 1, firstProc.memoryReserved); + + VirtualProc secondProc = procDao.getVirtualProc(proc2.getId()); + assertEquals(this.MEM_RESERVED_DEFAULT - 50000 - 1, secondProc.memoryReserved); + + } + + @Test + @Transactional + @Rollback(true) + public void testGetCurrentShowId() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); + + VirtualProc proc = VirtualProc.build(host, frame); + proc.frameId = frame.id; + procDao.insertVirtualProc(proc); + + assertEquals(job.getShowId(), procDao.getCurrentShowId(proc)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetCurrentJobId() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); + + VirtualProc proc = VirtualProc.build(host, frame); + proc.frameId = frame.id; + procDao.insertVirtualProc(proc); + + assertEquals(job.getJobId(), procDao.getCurrentJobId(proc)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetCurrentLayerId() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); + + VirtualProc proc = VirtualProc.build(host, frame); + proc.frameId = frame.id; + procDao.insertVirtualProc(proc); + + assertEquals(frame.getLayerId(), procDao.getCurrentLayerId(proc)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetCurrentFrameId() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); + + VirtualProc proc = VirtualProc.build(host, frame); + proc.frameId = frame.id; + procDao.insertVirtualProc(proc); + + assertEquals(frame.getFrameId(), procDao.getCurrentFrameId(proc)); + } + + @Test + @Transactional + @Rollback(true) + public void getProcsBySearch() { + DispatchHost host = createHost(); + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); + + /* + * Book 5 procs. + */ + for (int i = 1; i < 6; i++) { + FrameDetail f = frameDao.findFrameDetail(job, String.format("%04d-pass_1", i)); + VirtualProc proc = new VirtualProc(); + proc.allocationId = null; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = f.id; + proc.layerId = f.layerId; + proc.showId = f.showId; + proc.childProcesses = "".getBytes(); + procDao.insertVirtualProc(proc); } - @Test - @Transactional - @Rollback(true) - public void testIncreaseReservedMemory() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); - - procDao.increaseReservedMemory(proc, 3145728); - } - - @Test - @Transactional - @Rollback(true) - public void testGetReservedMemory() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); - - VirtualProc _proc = procDao.findVirtualProc(frame); - assertEquals(Long.valueOf(this.MEM_RESERVED_DEFAULT), jdbcTemplate.queryForObject( - "SELECT int_mem_reserved FROM proc WHERE pk_proc=?", - Long.class, _proc.id)); - assertEquals(this.MEM_RESERVED_DEFAULT, - procDao.getReservedMemory(_proc)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetReservedGpuMemory() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); - - VirtualProc _proc = procDao.findVirtualProc(frame); - assertEquals(Long.valueOf(this.MEM_GPU_RESERVED_DEFAULT), jdbcTemplate.queryForObject( - "SELECT int_gpu_mem_reserved FROM proc WHERE pk_proc=?", - Long.class, _proc.id)); - assertEquals(this.MEM_GPU_RESERVED_DEFAULT, - procDao.getReservedGpuMemory(_proc)); - } - - @Test - @Transactional - @Rollback(true) - public void testBalanceUnderUtilizedProcs() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail1 = frameDao.findFrameDetail(job, "0001-pass_1"); - DispatchFrame frame1 = frameDao.getDispatchFrame(frameDetail1.id); - - VirtualProc proc1 = VirtualProc.build(host, frame1); - proc1.frameId = frame1.id; - procDao.insertVirtualProc(proc1); - - byte[] children = new byte[100]; - procDao.updateProcMemoryUsage(frame1, 250000, 250000, 250000, 250000, 0, 0, 0, children); - layerDao.updateLayerMaxRSS(frame1, 250000, true); - - FrameDetail frameDetail2 = frameDao.findFrameDetail(job, "0002-pass_1"); - DispatchFrame frame2 = frameDao.getDispatchFrame(frameDetail2.id); - - VirtualProc proc2 = VirtualProc.build(host, frame2); - proc2.frameId = frame2.id; - procDao.insertVirtualProc(proc2); - - procDao.updateProcMemoryUsage(frame2, 255000, 255000,255000, 255000, 0, 0, 0, children); - layerDao.updateLayerMaxRSS(frame2, 255000, true); - - FrameDetail frameDetail3 = frameDao.findFrameDetail(job, "0003-pass_1"); - DispatchFrame frame3 = frameDao.getDispatchFrame(frameDetail3.id); - - VirtualProc proc3 = VirtualProc.build(host, frame3); - proc3.frameId = frame3.id; - procDao.insertVirtualProc(proc3); - - procDao.updateProcMemoryUsage(frame3, 3145728, 3145728,3145728, 3145728, 0, 0, 0, children); - layerDao.updateLayerMaxRSS(frame3,300000, true); - - procDao.balanceUnderUtilizedProcs(proc3, 100000); - procDao.increaseReservedMemory(proc3, this.MEM_RESERVED_DEFAULT + 100000); - - // Check the target proc - VirtualProc targetProc = procDao.getVirtualProc(proc3.getId()); - assertEquals( this.MEM_RESERVED_DEFAULT + 100000, targetProc.memoryReserved); - - // Check other procs - VirtualProc firstProc = procDao.getVirtualProc(proc1.getId()); - assertEquals( this.MEM_RESERVED_DEFAULT - 50000 -1 , firstProc.memoryReserved); - - VirtualProc secondProc = procDao.getVirtualProc(proc2.getId()); - assertEquals(this.MEM_RESERVED_DEFAULT - 50000 -1, secondProc.memoryReserved); - - } - - @Test - @Transactional - @Rollback(true) - public void testGetCurrentShowId() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); - - assertEquals(job.getShowId(), procDao.getCurrentShowId(proc)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetCurrentJobId() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); - - assertEquals(job.getJobId(), procDao.getCurrentJobId(proc)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetCurrentLayerId() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); - - assertEquals(frame.getLayerId(), procDao.getCurrentLayerId(proc)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetCurrentFrameId() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); - - assertEquals(frame.getFrameId(), procDao.getCurrentFrameId(proc)); - } - - @Test - @Transactional - @Rollback(true) - public void getProcsBySearch() { - DispatchHost host = createHost(); - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); - - /* - * Book 5 procs. - */ - for (int i=1; i<6; i++) { - FrameDetail f = frameDao.findFrameDetail(job, String.format("%04d-pass_1",i)); - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = f.id; - proc.layerId = f.layerId; - proc.showId = f.showId; - proc.childProcesses = "".getBytes(); - procDao.insertVirtualProc(proc); - } - - ProcSearchInterface r; - - /* - * Search for all 5 running procs - */ - r = procSearchFactory.create(); - r.addSort(new Sort("proc.ts_booked",Direction.ASC)); - ProcSearchCriteria criteriaA = r.getCriteria(); - r.setCriteria(criteriaA.toBuilder().addShows("pipe").build()); - assertEquals(5, procDao.findVirtualProcs(r).size()); - - /* - * Limit the result to 1 result. - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteriaB = r.getCriteria(); - r.setCriteria(criteriaB.toBuilder().addShows("pipe").addMaxResults(1).build()); - assertEquals(1, procDao.findVirtualProcs(r).size()); - - /* - * Change the first result to 1, which should limt - * the result to 4. - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteriaC = r.getCriteria(); - r.setCriteria(criteriaC.toBuilder().addShows("pipe").setFirstResult(2).build()); - r.addSort(new Sort("proc.ts_booked",Direction.ASC)); - assertEquals(4, procDao.findVirtualProcs(r).size()); - - /* - * Now try to do the eqivalent of a limit/offset - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteriaD = r.getCriteria(); - r.setCriteria(criteriaD.toBuilder() - .addShows("pipe") - .setFirstResult(3) - .addMaxResults(2) - .build()); - assertEquals(2, procDao.findVirtualProcs(r).size()); - - } - - @Test - @Transactional - @Rollback(true) - public void testVirtualProcWithSelfishService() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - frame.minCores = 250; - frame.threadable = true; - - // Frame from a non-selfish sevice - VirtualProc proc = VirtualProc.build(host, frame, "something-else"); - assertEquals(250, proc.coresReserved); - - // When no selfish service config is provided - proc = VirtualProc.build(host, frame); - assertEquals(250, proc.coresReserved); - - - // Frame with a selfish service - proc = VirtualProc.build(host, frame, "shell", "something-else"); - assertEquals(800, proc.coresReserved); - } + ProcSearchInterface r; + + /* + * Search for all 5 running procs + */ + r = procSearchFactory.create(); + r.addSort(new Sort("proc.ts_booked", Direction.ASC)); + ProcSearchCriteria criteriaA = r.getCriteria(); + r.setCriteria(criteriaA.toBuilder().addShows("pipe").build()); + assertEquals(5, procDao.findVirtualProcs(r).size()); + + /* + * Limit the result to 1 result. + */ + r = procSearchFactory.create(); + ProcSearchCriteria criteriaB = r.getCriteria(); + r.setCriteria(criteriaB.toBuilder().addShows("pipe").addMaxResults(1).build()); + assertEquals(1, procDao.findVirtualProcs(r).size()); + + /* + * Change the first result to 1, which should limt the result to 4. + */ + r = procSearchFactory.create(); + ProcSearchCriteria criteriaC = r.getCriteria(); + r.setCriteria(criteriaC.toBuilder().addShows("pipe").setFirstResult(2).build()); + r.addSort(new Sort("proc.ts_booked", Direction.ASC)); + assertEquals(4, procDao.findVirtualProcs(r).size()); + + /* + * Now try to do the eqivalent of a limit/offset + */ + r = procSearchFactory.create(); + ProcSearchCriteria criteriaD = r.getCriteria(); + r.setCriteria( + criteriaD.toBuilder().addShows("pipe").setFirstResult(3).addMaxResults(2).build()); + assertEquals(2, procDao.findVirtualProcs(r).size()); + + } + + @Test + @Transactional + @Rollback(true) + public void testVirtualProcWithSelfishService() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); + frame.minCores = 250; + frame.threadable = true; + + // Frame from a non-selfish sevice + VirtualProc proc = VirtualProc.build(host, frame, "something-else"); + assertEquals(250, proc.coresReserved); + + // When no selfish service config is provided + proc = VirtualProc.build(host, frame); + assertEquals(250, proc.coresReserved); + + // Frame with a selfish service + proc = VirtualProc.build(host, frame, "shell", "something-else"); + assertEquals(800, proc.coresReserved); + } } - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ServiceDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ServiceDaoTests.java index 811cb129d..0172d007e 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ServiceDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ServiceDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import javax.annotation.Resource; @@ -41,199 +37,196 @@ import static org.junit.Assert.assertEquals; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class ServiceDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - ServiceDao serviceDao; - - @Test - @Transactional - @Rollback(true) - public void testGetService() { - ServiceEntity s1 = serviceDao.get("default"); - ServiceEntity s2 = serviceDao.get("AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"); - assertEquals(s1, s2); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertService() { - ServiceEntity s = new ServiceEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.timeout = 0; - s.timeout_llu = 0; - s.minMemory = CueUtil.GB4; - s.minGpuMemory = CueUtil.GB; - s.threadable = false; - s.tags.addAll(Sets.newHashSet(new String[] { "general"})); - s.minMemoryIncrease = CueUtil.GB4; - - serviceDao.insert(s); - assertEquals(s, serviceDao.get("dillweed")); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateService() { - ServiceEntity s = new ServiceEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.timeout = 0; - s.timeout_llu = 0; - s.minMemory = CueUtil.GB4; - s.minGpuMemory = CueUtil.GB; - s.threadable = false; - s.tags.addAll(Sets.newHashSet(new String[] { "general"})); - s.minMemoryIncrease = CueUtil.GB; - - serviceDao.insert(s); - assertEquals(s, serviceDao.get("dillweed")); - - s.name = "smacktest"; - s.minCores = 200; - s.timeout = 0; - s.timeout_llu = 0; - s.minMemory = CueUtil.GB8; - s.minGpuMemory = CueUtil.GB2; - s.threadable = true; - s.tags = Sets.newLinkedHashSet(); - s.tags.add("linux"); - s.minMemoryIncrease = CueUtil.GB4 + CueUtil.GB2; - - serviceDao.update(s); - ServiceEntity s1 = serviceDao.get(s.getId()); - - assertEquals(s.name, s1.name); - assertEquals(s.minCores, s1.minCores); - assertEquals(s.minMemory, s1.minMemory); - assertEquals(s.threadable, s1.threadable); - assertEquals(s.tags.toArray()[0], s1.tags.toArray()[0]); - assertEquals(s.minMemoryIncrease, s1.minMemoryIncrease); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteService() { - ServiceEntity s = new ServiceEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.timeout = 0; - s.timeout_llu = 0; - s.minMemory = CueUtil.GB4; - s.minGpuMemory = CueUtil.GB; - s.threadable = false; - s.tags.addAll(Sets.newHashSet(new String[] { "general"})); - s.minMemoryIncrease = CueUtil.GB2; - - serviceDao.insert(s); - assertEquals(s, serviceDao.get("dillweed")); - - serviceDao.delete(s); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(1) FROM service WHERE pk_service=?", - Integer.class, s.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertServiceOverride() { - ServiceOverrideEntity s = new ServiceOverrideEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.timeout = 0; - s.timeout_llu = 0; - s.minMemory = CueUtil.GB4; - s.minGpuMemory = CueUtil.GB; - s.threadable = false; - s.tags.addAll(Sets.newHashSet(new String[] { "general"})); - s.showId = "00000000-0000-0000-0000-000000000000"; - s.minMemoryIncrease = CueUtil.GB2; - - serviceDao.insert(s); - assertEquals(s, serviceDao.getOverride("dillweed")); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateServiceOverride() { - ServiceOverrideEntity s = new ServiceOverrideEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.timeout = 0; - s.timeout_llu = 0; - s.minMemory = CueUtil.GB4; - s.minGpuMemory = CueUtil.GB2; - s.threadable = false; - s.tags.addAll(Sets.newHashSet(new String[] { "general"})); - s.showId = "00000000-0000-0000-0000-000000000000"; - s.minMemoryIncrease = CueUtil.GB2; - - serviceDao.insert(s); - assertEquals(s, serviceDao.getOverride("dillweed")); - assertEquals(s, serviceDao.getOverride("dillweed", s.showId)); - - s.name = "smacktest"; - s.minCores = 200; - s.timeout = 10; - s.timeout_llu = 10; - s.minMemory = CueUtil.GB8; - s.minGpuMemory = CueUtil.GB4; - s.threadable = true; - s.tags = Sets.newLinkedHashSet(); - s.tags.add("linux"); - s.minMemoryIncrease = CueUtil.GB4; - - serviceDao.update(s); - ServiceEntity s1 = serviceDao.getOverride(s.getId()); - - assertEquals(s.name, s1.name); - assertEquals(s.minCores, s1.minCores); - assertEquals(s.timeout, s1.timeout); - assertEquals(s.timeout_llu, s1.timeout_llu); - assertEquals(s.minMemory, s1.minMemory); - assertEquals(s.minGpuMemory, s1.minGpuMemory); - assertEquals(s.threadable, s1.threadable); - assertEquals(s.tags.toArray()[0], s1.tags.toArray()[0]); - assertEquals(s.minMemoryIncrease, s1.minMemoryIncrease); - assertEquals(s1.minMemoryIncrease, CueUtil.GB4); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteServiceOverride() { - ServiceOverrideEntity s = new ServiceOverrideEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.timeout = 0; - s.timeout_llu = 0; - s.minMemory = CueUtil.GB4; - s.minGpuMemory = CueUtil.GB; - s.threadable = false; - s.tags.addAll(Sets.newHashSet(new String[] { "general"})); - s.showId = "00000000-0000-0000-0000-000000000000"; - s.minMemoryIncrease = CueUtil.GB2; - - serviceDao.insert(s); - assertEquals(s, serviceDao.getOverride("dillweed")); - assertEquals(s, serviceDao.getOverride("dillweed", s.showId)); - serviceDao.delete(s); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(1) FROM show_service WHERE pk_show_service=?", - Integer.class, s.getId())); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class ServiceDaoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + ServiceDao serviceDao; + + @Test + @Transactional + @Rollback(true) + public void testGetService() { + ServiceEntity s1 = serviceDao.get("default"); + ServiceEntity s2 = serviceDao.get("AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"); + assertEquals(s1, s2); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertService() { + ServiceEntity s = new ServiceEntity(); + s.name = "dillweed"; + s.minCores = 100; + s.timeout = 0; + s.timeout_llu = 0; + s.minMemory = CueUtil.GB4; + s.minGpuMemory = CueUtil.GB; + s.threadable = false; + s.tags.addAll(Sets.newHashSet(new String[] {"general"})); + s.minMemoryIncrease = CueUtil.GB4; + + serviceDao.insert(s); + assertEquals(s, serviceDao.get("dillweed")); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateService() { + ServiceEntity s = new ServiceEntity(); + s.name = "dillweed"; + s.minCores = 100; + s.timeout = 0; + s.timeout_llu = 0; + s.minMemory = CueUtil.GB4; + s.minGpuMemory = CueUtil.GB; + s.threadable = false; + s.tags.addAll(Sets.newHashSet(new String[] {"general"})); + s.minMemoryIncrease = CueUtil.GB; + + serviceDao.insert(s); + assertEquals(s, serviceDao.get("dillweed")); + + s.name = "smacktest"; + s.minCores = 200; + s.timeout = 0; + s.timeout_llu = 0; + s.minMemory = CueUtil.GB8; + s.minGpuMemory = CueUtil.GB2; + s.threadable = true; + s.tags = Sets.newLinkedHashSet(); + s.tags.add("linux"); + s.minMemoryIncrease = CueUtil.GB4 + CueUtil.GB2; + + serviceDao.update(s); + ServiceEntity s1 = serviceDao.get(s.getId()); + + assertEquals(s.name, s1.name); + assertEquals(s.minCores, s1.minCores); + assertEquals(s.minMemory, s1.minMemory); + assertEquals(s.threadable, s1.threadable); + assertEquals(s.tags.toArray()[0], s1.tags.toArray()[0]); + assertEquals(s.minMemoryIncrease, s1.minMemoryIncrease); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteService() { + ServiceEntity s = new ServiceEntity(); + s.name = "dillweed"; + s.minCores = 100; + s.timeout = 0; + s.timeout_llu = 0; + s.minMemory = CueUtil.GB4; + s.minGpuMemory = CueUtil.GB; + s.threadable = false; + s.tags.addAll(Sets.newHashSet(new String[] {"general"})); + s.minMemoryIncrease = CueUtil.GB2; + + serviceDao.insert(s); + assertEquals(s, serviceDao.get("dillweed")); + + serviceDao.delete(s); + + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT COUNT(1) FROM service WHERE pk_service=?", Integer.class, s.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertServiceOverride() { + ServiceOverrideEntity s = new ServiceOverrideEntity(); + s.name = "dillweed"; + s.minCores = 100; + s.timeout = 0; + s.timeout_llu = 0; + s.minMemory = CueUtil.GB4; + s.minGpuMemory = CueUtil.GB; + s.threadable = false; + s.tags.addAll(Sets.newHashSet(new String[] {"general"})); + s.showId = "00000000-0000-0000-0000-000000000000"; + s.minMemoryIncrease = CueUtil.GB2; + + serviceDao.insert(s); + assertEquals(s, serviceDao.getOverride("dillweed")); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateServiceOverride() { + ServiceOverrideEntity s = new ServiceOverrideEntity(); + s.name = "dillweed"; + s.minCores = 100; + s.timeout = 0; + s.timeout_llu = 0; + s.minMemory = CueUtil.GB4; + s.minGpuMemory = CueUtil.GB2; + s.threadable = false; + s.tags.addAll(Sets.newHashSet(new String[] {"general"})); + s.showId = "00000000-0000-0000-0000-000000000000"; + s.minMemoryIncrease = CueUtil.GB2; + + serviceDao.insert(s); + assertEquals(s, serviceDao.getOverride("dillweed")); + assertEquals(s, serviceDao.getOverride("dillweed", s.showId)); + + s.name = "smacktest"; + s.minCores = 200; + s.timeout = 10; + s.timeout_llu = 10; + s.minMemory = CueUtil.GB8; + s.minGpuMemory = CueUtil.GB4; + s.threadable = true; + s.tags = Sets.newLinkedHashSet(); + s.tags.add("linux"); + s.minMemoryIncrease = CueUtil.GB4; + + serviceDao.update(s); + ServiceEntity s1 = serviceDao.getOverride(s.getId()); + + assertEquals(s.name, s1.name); + assertEquals(s.minCores, s1.minCores); + assertEquals(s.timeout, s1.timeout); + assertEquals(s.timeout_llu, s1.timeout_llu); + assertEquals(s.minMemory, s1.minMemory); + assertEquals(s.minGpuMemory, s1.minGpuMemory); + assertEquals(s.threadable, s1.threadable); + assertEquals(s.tags.toArray()[0], s1.tags.toArray()[0]); + assertEquals(s.minMemoryIncrease, s1.minMemoryIncrease); + assertEquals(s1.minMemoryIncrease, CueUtil.GB4); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteServiceOverride() { + ServiceOverrideEntity s = new ServiceOverrideEntity(); + s.name = "dillweed"; + s.minCores = 100; + s.timeout = 0; + s.timeout_llu = 0; + s.minMemory = CueUtil.GB4; + s.minGpuMemory = CueUtil.GB; + s.threadable = false; + s.tags.addAll(Sets.newHashSet(new String[] {"general"})); + s.showId = "00000000-0000-0000-0000-000000000000"; + s.minMemoryIncrease = CueUtil.GB2; + + serviceDao.insert(s); + assertEquals(s, serviceDao.getOverride("dillweed")); + assertEquals(s, serviceDao.getOverride("dillweed", s.showId)); + serviceDao.delete(s); + + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT COUNT(1) FROM show_service WHERE pk_show_service=?", Integer.class, s.getId())); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ShowDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ShowDaoTests.java index ea0ed67b8..99dea7b6a 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ShowDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ShowDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import javax.annotation.Resource; @@ -47,196 +43,169 @@ import static org.junit.Assert.assertTrue; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class ShowDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - ShowDao showDao; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - private static String SHOW_ID = "00000000-0000-0000-0000-000000000000"; - private static String SHOW_NAME= "pipe"; - - public DispatchHost createHost() { - - RenderHost host = RenderHost.newBuilder() - .setName("test_host") - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem((int) CueUtil.GB16) - .setTotalSwap((int) CueUtil.GB16) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(100) - .addTags("general") - .setState(HardwareState.UP) - .setFacility("spi") - .setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512) - .build(); - - DispatchHost dh = hostManager.createHost(host); - hostManager.setAllocation(dh, - adminManager.findAllocationDetail("spi", "general")); - - return dh; - } - - @Test - @Transactional - @Rollback(true) - public void testFindShowDetail() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - assertEquals(SHOW_ID, show.id); - assertEquals(SHOW_NAME,show.name); - assertFalse(show.paused); - } - - @Test(expected=EmptyResultDataAccessException.class) - @Transactional - @Rollback(true) - public void testFindShowDetailByHost() { - // TODO: Add code to setup a host and make the sow - // prefer the host, then check result again. - showDao.getShowDetail(createHost()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetShowDetail() { - ShowEntity show = showDao.getShowDetail(SHOW_ID); - assertEquals(SHOW_ID, show.id); - assertEquals(SHOW_NAME,show.name); - assertFalse(show.paused); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertShow() { - ShowEntity show = new ShowEntity(); - show.name = "uber"; - showDao.insertShow(show); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT count(*) FROM show where str_name=?", - Integer.class, show.name)); - - ShowEntity newShow = showDao.findShowDetail(show.name); - assertEquals(newShow.id, show.id); - assertEquals(newShow.name,show.name); - assertFalse(show.paused); - } - - @Test - @Transactional - @Rollback(true) - public void testShowExists() { - assertFalse(showDao.showExists("uber")); - assertTrue(showDao.showExists("pipe")); - assertTrue(showDao.showExists("fx")); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateShowDefaultMinCores() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - showDao.updateShowDefaultMinCores(show, 100); - assertTrue(jdbcTemplate.queryForObject( - "SELECT int_default_min_cores FROM show WHERE pk_show=?", - Integer.class, show.id) == 100); - - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateShowDefaultMaxCores() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - showDao.updateShowDefaultMaxCores(show, 1000); - assertTrue(jdbcTemplate.queryForObject( - "SELECT int_default_max_cores FROM show WHERE pk_show=?", - Integer.class, show.id) == 1000); - - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateShowCommentEmail() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - showDao.updateShowCommentEmail(show, new String[] {"test@imageworks.com"}); - String email = jdbcTemplate.queryForObject( - "SELECT str_comment_email FROM show WHERE pk_show=?", - String.class, show.id); - assertEquals("test@imageworks.com", email); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateBookingEnabled() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - showDao.updateBookingEnabled(show,false); - assertFalse(jdbcTemplate.queryForObject( - "SELECT b_booking_enabled FROM show WHERE pk_show=?", - Boolean.class, show.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateActive() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - showDao.updateActive(show, false); - assertFalse(jdbcTemplate.queryForObject( - "SELECT b_active FROM show WHERE pk_show=?", - Boolean.class, show.id)); - showDao.updateActive(show, true); - assertTrue(jdbcTemplate.queryForObject( - "SELECT b_active FROM show WHERE pk_show=?", - Boolean.class, show.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameCounters() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - int frameSuccess = jdbcTemplate.queryForObject( - "SELECT int_frame_success_count FROM show_stats WHERE pk_show=?", - Integer.class, show.id); - showDao.updateFrameCounters(show, 0); - int frameSucces2 = jdbcTemplate.queryForObject( - "SELECT int_frame_success_count FROM show_stats WHERE pk_show=?", - Integer.class, show.id); - assertEquals(frameSuccess + 1,frameSucces2); - - int frameFail= jdbcTemplate.queryForObject( - "SELECT int_frame_fail_count FROM show_stats WHERE pk_show=?", - Integer.class, show.id); - showDao.updateFrameCounters(show, 1); - int frameFail2 = jdbcTemplate.queryForObject( - "SELECT int_frame_fail_count FROM show_stats WHERE pk_show=?", - Integer.class, show.id); - assertEquals(frameFail+ 1,frameFail2); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class ShowDaoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + ShowDao showDao; + + @Resource + HostManager hostManager; + + @Resource + AdminManager adminManager; + + private static String SHOW_ID = "00000000-0000-0000-0000-000000000000"; + private static String SHOW_NAME = "pipe"; + + public DispatchHost createHost() { + + RenderHost host = RenderHost.newBuilder().setName("test_host").setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16).setTotalSwap((int) CueUtil.GB16) + .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(100).addTags("general") + .setState(HardwareState.UP).setFacility("spi").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); + + DispatchHost dh = hostManager.createHost(host); + hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); + + return dh; + } + + @Test + @Transactional + @Rollback(true) + public void testFindShowDetail() { + ShowEntity show = showDao.findShowDetail(SHOW_NAME); + assertEquals(SHOW_ID, show.id); + assertEquals(SHOW_NAME, show.name); + assertFalse(show.paused); + } + + @Test(expected = EmptyResultDataAccessException.class) + @Transactional + @Rollback(true) + public void testFindShowDetailByHost() { + // TODO: Add code to setup a host and make the sow + // prefer the host, then check result again. + showDao.getShowDetail(createHost()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetShowDetail() { + ShowEntity show = showDao.getShowDetail(SHOW_ID); + assertEquals(SHOW_ID, show.id); + assertEquals(SHOW_NAME, show.name); + assertFalse(show.paused); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertShow() { + ShowEntity show = new ShowEntity(); + show.name = "uber"; + showDao.insertShow(show); + + assertEquals(Integer.valueOf(1), jdbcTemplate + .queryForObject("SELECT count(*) FROM show where str_name=?", Integer.class, show.name)); + + ShowEntity newShow = showDao.findShowDetail(show.name); + assertEquals(newShow.id, show.id); + assertEquals(newShow.name, show.name); + assertFalse(show.paused); + } + + @Test + @Transactional + @Rollback(true) + public void testShowExists() { + assertFalse(showDao.showExists("uber")); + assertTrue(showDao.showExists("pipe")); + assertTrue(showDao.showExists("fx")); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateShowDefaultMinCores() { + ShowEntity show = showDao.findShowDetail(SHOW_NAME); + showDao.updateShowDefaultMinCores(show, 100); + assertTrue(jdbcTemplate.queryForObject("SELECT int_default_min_cores FROM show WHERE pk_show=?", + Integer.class, show.id) == 100); + + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateShowDefaultMaxCores() { + ShowEntity show = showDao.findShowDetail(SHOW_NAME); + showDao.updateShowDefaultMaxCores(show, 1000); + assertTrue(jdbcTemplate.queryForObject("SELECT int_default_max_cores FROM show WHERE pk_show=?", + Integer.class, show.id) == 1000); + + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateShowCommentEmail() { + ShowEntity show = showDao.findShowDetail(SHOW_NAME); + showDao.updateShowCommentEmail(show, new String[] {"test@imageworks.com"}); + String email = jdbcTemplate.queryForObject("SELECT str_comment_email FROM show WHERE pk_show=?", + String.class, show.id); + assertEquals("test@imageworks.com", email); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateBookingEnabled() { + ShowEntity show = showDao.findShowDetail(SHOW_NAME); + showDao.updateBookingEnabled(show, false); + assertFalse(jdbcTemplate.queryForObject("SELECT b_booking_enabled FROM show WHERE pk_show=?", + Boolean.class, show.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateActive() { + ShowEntity show = showDao.findShowDetail(SHOW_NAME); + showDao.updateActive(show, false); + assertFalse(jdbcTemplate.queryForObject("SELECT b_active FROM show WHERE pk_show=?", + Boolean.class, show.id)); + showDao.updateActive(show, true); + assertTrue(jdbcTemplate.queryForObject("SELECT b_active FROM show WHERE pk_show=?", + Boolean.class, show.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateFrameCounters() { + ShowEntity show = showDao.findShowDetail(SHOW_NAME); + int frameSuccess = jdbcTemplate.queryForObject( + "SELECT int_frame_success_count FROM show_stats WHERE pk_show=?", Integer.class, show.id); + showDao.updateFrameCounters(show, 0); + int frameSucces2 = jdbcTemplate.queryForObject( + "SELECT int_frame_success_count FROM show_stats WHERE pk_show=?", Integer.class, show.id); + assertEquals(frameSuccess + 1, frameSucces2); + + int frameFail = jdbcTemplate.queryForObject( + "SELECT int_frame_fail_count FROM show_stats WHERE pk_show=?", Integer.class, show.id); + showDao.updateFrameCounters(show, 1); + int frameFail2 = jdbcTemplate.queryForObject( + "SELECT int_frame_fail_count FROM show_stats WHERE pk_show=?", Integer.class, show.id); + assertEquals(frameFail + 1, frameFail2); + } } - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/SubscriptionDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/SubscriptionDaoTests.java index dfe1bd175..92fd02ce7 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/SubscriptionDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/SubscriptionDaoTests.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.test.dao.postgres; import javax.annotation.Resource; @@ -47,216 +44,203 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; - @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class SubscriptionDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - AllocationDao allocDao; - - @Resource - SubscriptionDao subscriptionDao; - - @Resource - AllocationDao allocationDao; - - @Resource - ShowDao showDao; - - @Resource - FacilityDao facilityDao; - - public static final String SUB_NAME = "test.pipe"; - public static final String ALLOC_NAME = "test"; - - private AllocationEntity alloc; - - public ShowInterface getShow() { - return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); - } - - public SubscriptionEntity buildSubscription(ShowInterface t, AllocationInterface a) { - SubscriptionEntity s = new SubscriptionEntity(); - s.allocationId = a.getId(); - s.showId = t.getId(); - s.burst = 500; - s.size = 100; - return s; - } - - public AllocationEntity buildAllocation() { - AllocationEntity a = new AllocationEntity(); - a.tag = "test"; - a.name = ALLOC_NAME; - a.facilityId = facilityDao.getDefaultFacility().getFacilityId(); - return a; - } - - @Before - public void before() { - alloc = new AllocationEntity(); - alloc.name = ALLOC_NAME; - alloc.tag = "test"; - allocationDao.insertAllocation( - facilityDao.getDefaultFacility(), alloc); - } - - @Test - @Transactional - @Rollback(true) - public void testHasRunningProcs() { - SubscriptionEntity s = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(s); - assertFalse(subscriptionDao.hasRunningProcs(s)); - } - - @Test - @Transactional - @Rollback(true) - public void testIsShowOverSize() { - - SubscriptionEntity sub = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(sub); - - assertFalse(this.subscriptionDao.isShowOverSize(getShow(), alloc)); - - jdbcTemplate.update( - "UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", - 100, sub.getSubscriptionId()); - - assertFalse(subscriptionDao.isShowOverSize(getShow(), alloc)); - - jdbcTemplate.update( - "UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", - 101, sub.getSubscriptionId()); - - assertEquals(true, subscriptionDao.isShowOverSize(getShow(), alloc)); - } - - @Test - @Transactional - @Rollback(true) - public void testIsShowAtOrOverSize() { - - SubscriptionEntity sub = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(sub); - assertFalse(this.subscriptionDao.isShowAtOrOverSize(getShow(), alloc)); - - jdbcTemplate.update( - "UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", - 100, sub.getSubscriptionId()); - - assertTrue(subscriptionDao.isShowAtOrOverSize(getShow(), alloc)); - - jdbcTemplate.update( - "UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", - 200, sub.getSubscriptionId()); - - assertTrue(subscriptionDao.isShowAtOrOverSize(getShow(), alloc)); - } - - @Test - @Transactional - @Rollback(true) - public void testIsShowOverBurst() { - subscriptionDao.insertSubscription(buildSubscription(getShow(), alloc)); - - // Burst is 500 so 600 would be over burst. - assertTrue(subscriptionDao.isShowOverBurst(getShow(), alloc, 600)); - - // Burst is 500 so 300 would be under burst. - assertFalse(subscriptionDao.isShowOverBurst(getShow(), alloc, 300)); - } - - @Test(expected=org.springframework.jdbc.UncategorizedSQLException.class) - @Transactional - @Rollback(true) - public void testIsShowAtOrOverBurst() { - - SubscriptionEntity sub = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(sub); - assertFalse(subscriptionDao.isShowAtOrOverBurst(getShow(), alloc)); - - jdbcTemplate.update( - "UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", - 500, sub.getSubscriptionId()); - - assertTrue(subscriptionDao.isShowAtOrOverBurst(getShow(), alloc)); - - jdbcTemplate.update( - "UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", - 501, sub.getSubscriptionId()); - - assertTrue(subscriptionDao.isShowAtOrOverBurst(getShow(), alloc)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetSubscriptionDetail() { - - FacilityInterface f = facilityDao.getDefaultFacility(); - - SubscriptionEntity s = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(s); - assertNotNull(s.id); - assertNotNull(s.getId()); - - SubscriptionEntity s1 = subscriptionDao.getSubscriptionDetail( - s.getSubscriptionId()); - - assertEquals(alloc.getName() + ".pipe", s1.name); - assertEquals(s.burst, s1.burst); - assertEquals(s.id, s1.id); - assertEquals(s.size, s1.size); - assertEquals(s.allocationId, s1.allocationId); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertSubscription() { - SubscriptionEntity s = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(s); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteSubscription() { - SubscriptionEntity s = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(s); - subscriptionDao.deleteSubscription(s); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateSubscriptionSize() { - SubscriptionEntity s = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(s); - subscriptionDao.updateSubscriptionSize(s, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_size FROM subscription WHERE pk_subscription=?", - Integer.class, s.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateSubscriptionBurst() { - SubscriptionEntity s = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(s); - subscriptionDao.updateSubscriptionBurst(s, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_burst FROM subscription WHERE pk_subscription=?", - Integer.class, s.getId())); - } -} +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class SubscriptionDaoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + AllocationDao allocDao; + + @Resource + SubscriptionDao subscriptionDao; + + @Resource + AllocationDao allocationDao; + + @Resource + ShowDao showDao; + + @Resource + FacilityDao facilityDao; + public static final String SUB_NAME = "test.pipe"; + public static final String ALLOC_NAME = "test"; + private AllocationEntity alloc; + + public ShowInterface getShow() { + return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); + } + + public SubscriptionEntity buildSubscription(ShowInterface t, AllocationInterface a) { + SubscriptionEntity s = new SubscriptionEntity(); + s.allocationId = a.getId(); + s.showId = t.getId(); + s.burst = 500; + s.size = 100; + return s; + } + + public AllocationEntity buildAllocation() { + AllocationEntity a = new AllocationEntity(); + a.tag = "test"; + a.name = ALLOC_NAME; + a.facilityId = facilityDao.getDefaultFacility().getFacilityId(); + return a; + } + + @Before + public void before() { + alloc = new AllocationEntity(); + alloc.name = ALLOC_NAME; + alloc.tag = "test"; + allocationDao.insertAllocation(facilityDao.getDefaultFacility(), alloc); + } + + @Test + @Transactional + @Rollback(true) + public void testHasRunningProcs() { + SubscriptionEntity s = buildSubscription(getShow(), alloc); + subscriptionDao.insertSubscription(s); + assertFalse(subscriptionDao.hasRunningProcs(s)); + } + + @Test + @Transactional + @Rollback(true) + public void testIsShowOverSize() { + + SubscriptionEntity sub = buildSubscription(getShow(), alloc); + subscriptionDao.insertSubscription(sub); + + assertFalse(this.subscriptionDao.isShowOverSize(getShow(), alloc)); + + jdbcTemplate.update("UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", 100, + sub.getSubscriptionId()); + + assertFalse(subscriptionDao.isShowOverSize(getShow(), alloc)); + + jdbcTemplate.update("UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", 101, + sub.getSubscriptionId()); + + assertEquals(true, subscriptionDao.isShowOverSize(getShow(), alloc)); + } + + @Test + @Transactional + @Rollback(true) + public void testIsShowAtOrOverSize() { + + SubscriptionEntity sub = buildSubscription(getShow(), alloc); + subscriptionDao.insertSubscription(sub); + assertFalse(this.subscriptionDao.isShowAtOrOverSize(getShow(), alloc)); + + jdbcTemplate.update("UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", 100, + sub.getSubscriptionId()); + + assertTrue(subscriptionDao.isShowAtOrOverSize(getShow(), alloc)); + + jdbcTemplate.update("UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", 200, + sub.getSubscriptionId()); + + assertTrue(subscriptionDao.isShowAtOrOverSize(getShow(), alloc)); + } + + @Test + @Transactional + @Rollback(true) + public void testIsShowOverBurst() { + subscriptionDao.insertSubscription(buildSubscription(getShow(), alloc)); + + // Burst is 500 so 600 would be over burst. + assertTrue(subscriptionDao.isShowOverBurst(getShow(), alloc, 600)); + + // Burst is 500 so 300 would be under burst. + assertFalse(subscriptionDao.isShowOverBurst(getShow(), alloc, 300)); + } + + @Test(expected = org.springframework.jdbc.UncategorizedSQLException.class) + @Transactional + @Rollback(true) + public void testIsShowAtOrOverBurst() { + + SubscriptionEntity sub = buildSubscription(getShow(), alloc); + subscriptionDao.insertSubscription(sub); + assertFalse(subscriptionDao.isShowAtOrOverBurst(getShow(), alloc)); + + jdbcTemplate.update("UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", 500, + sub.getSubscriptionId()); + + assertTrue(subscriptionDao.isShowAtOrOverBurst(getShow(), alloc)); + + jdbcTemplate.update("UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", 501, + sub.getSubscriptionId()); + + assertTrue(subscriptionDao.isShowAtOrOverBurst(getShow(), alloc)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetSubscriptionDetail() { + + FacilityInterface f = facilityDao.getDefaultFacility(); + + SubscriptionEntity s = buildSubscription(getShow(), alloc); + subscriptionDao.insertSubscription(s); + assertNotNull(s.id); + assertNotNull(s.getId()); + + SubscriptionEntity s1 = subscriptionDao.getSubscriptionDetail(s.getSubscriptionId()); + + assertEquals(alloc.getName() + ".pipe", s1.name); + assertEquals(s.burst, s1.burst); + assertEquals(s.id, s1.id); + assertEquals(s.size, s1.size); + assertEquals(s.allocationId, s1.allocationId); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertSubscription() { + SubscriptionEntity s = buildSubscription(getShow(), alloc); + subscriptionDao.insertSubscription(s); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteSubscription() { + SubscriptionEntity s = buildSubscription(getShow(), alloc); + subscriptionDao.insertSubscription(s); + subscriptionDao.deleteSubscription(s); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateSubscriptionSize() { + SubscriptionEntity s = buildSubscription(getShow(), alloc); + subscriptionDao.insertSubscription(s); + subscriptionDao.updateSubscriptionSize(s, 100); + assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( + "SELECT int_size FROM subscription WHERE pk_subscription=?", Integer.class, s.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateSubscriptionBurst() { + SubscriptionEntity s = buildSubscription(getShow(), alloc); + subscriptionDao.insertSubscription(s); + subscriptionDao.updateSubscriptionBurst(s, 100); + assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( + "SELECT int_burst FROM subscription WHERE pk_subscription=?", Integer.class, s.getId())); + } +} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/TaskDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/TaskDaoTests.java index 6f760f212..60f6c911c 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/TaskDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/TaskDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import java.io.File; @@ -48,241 +44,217 @@ import static org.junit.Assert.assertFalse; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class TaskDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; - @Resource - ShowDao showDao; - - @Resource - DepartmentDao departmentDao; - - @Resource - TaskDao taskDao; - - @Resource - PointDao pointDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Before - public void testMode() { - jobLauncher.testMode = true; - } - - @Test - @Transactional - @Rollback(true) - public void insertTask() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - - String dept = jdbcTemplate.queryForObject( - "SELECT pk_dept FROM job WHERE pk_job=?", String.class, job.getJobId()); - - // Add in a new task, the job should switch to using this task. - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDepartment(dept)); - - TaskEntity t = new TaskEntity(p, "dev.foo", 100); - taskDao.insertTask(t); - - t = taskDao.getTaskDetail(t.id); - taskDao.deleteTask(t); - } - - @Test - @Transactional - @Rollback(true) - public void deleteTask() { - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - TaskEntity t = new TaskEntity(p, "dev.cue", 100); - taskDao.insertTask(t); - taskDao.deleteTask(t); - } - - @Test - @Transactional - @Rollback(true) - public void deleteTasksByShowAndDepartment() { - - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - int task_count = jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM task WHERE pk_point=?", - Integer.class, p.getPointId()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - taskDao.insertTask(t); - - assertEquals(Integer.valueOf(task_count + 1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM task WHERE pk_point=?", - Integer.class, p.getPointId())); - - taskDao.deleteTasks(p); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM task WHERE pk_point=?", - Integer.class, p.getPointId())); - } - - @Test - @Transactional - @Rollback(true) - public void deleteTasksByDepartmentConfig() { - - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, - "dev.cue"); - t.minCoreUnits = 100; - taskDao.insertTask(t); - - taskDao.deleteTasks(p); - - /** - * This is always - */ - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM task WHERE pk_point=?", - Integer.class, p.getPointId())); - } - - @Test - @Transactional - @Rollback(true) - public void getTaskDetail() { - - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - - taskDao.insertTask(t); - TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); - assertEquals(newTask.id,t.id); - } - - @Test - @Transactional - @Rollback(true) - public void getTaskDetailByDept() { - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - - taskDao.insertTask(t); - TaskEntity newTask = taskDao.getTaskDetail(departmentDao.getDefaultDepartment(), "dev.cue"); - assertEquals(newTask.id,t.id); - } - - @Test - @Transactional - @Rollback(true) - public void updateTaskMinProcs() { - - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - t.minCoreUnits = 100; - taskDao.insertTask(t); - TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); - taskDao.updateTaskMinCores(newTask, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM task WHERE pk_task=?", - Integer.class, newTask.getTaskId())); - } - - @Test - @Transactional - @Rollback(true) - public void adjustTaskMinProcs() { - - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p,"dev.cue"); - t.minCoreUnits = 10; - taskDao.insertTask(t); - TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); - taskDao.updateTaskMinCores(newTask, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM task WHERE pk_task=?", - Integer.class, newTask.getTaskId())); - - taskDao.adjustTaskMinCores(t, 105); - - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM task WHERE pk_task=?", - Integer.class, newTask.getTaskId())); - assertEquals(Integer.valueOf(5), jdbcTemplate.queryForObject( - "SELECT int_adjust_cores FROM task WHERE pk_task=?", - Integer.class, newTask.getTaskId())); - - taskDao.adjustTaskMinCores(t, 50); - - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM task WHERE pk_task=?", - Integer.class, newTask.getTaskId())); - assertEquals(Integer.valueOf(-50), jdbcTemplate.queryForObject( - "SELECT int_adjust_cores FROM task WHERE pk_task=?", - Integer.class, newTask.getTaskId())); - } - - - @Test - @Transactional - @Rollback(true) - public void mergeTask() { - - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - taskDao.insertTask(t); - - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM task WHERE pk_task=?", - Integer.class, t.getTaskId())); - - TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); - newTask.minCoreUnits = 200; - taskDao.mergeTask(newTask); - - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM task WHERE pk_task=?", - Integer.class, newTask.getTaskId())); - } - - @Test - @Transactional - @Rollback(true) - public void isJobManaged() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - assertFalse(taskDao.isManaged(job)); - } -} + @Resource + ShowDao showDao; + + @Resource + DepartmentDao departmentDao; + + @Resource + TaskDao taskDao; + + @Resource + PointDao pointDao; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + @Before + public void testMode() { + jobLauncher.testMode = true; + } + + @Test + @Transactional + @Rollback(true) + public void insertTask() { + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + + String dept = jdbcTemplate.queryForObject("SELECT pk_dept FROM job WHERE pk_job=?", + String.class, job.getJobId()); + + // Add in a new task, the job should switch to using this task. + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDepartment(dept)); + + TaskEntity t = new TaskEntity(p, "dev.foo", 100); + taskDao.insertTask(t); + + t = taskDao.getTaskDetail(t.id); + taskDao.deleteTask(t); + } + + @Test + @Transactional + @Rollback(true) + public void deleteTask() { + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + TaskEntity t = new TaskEntity(p, "dev.cue", 100); + taskDao.insertTask(t); + taskDao.deleteTask(t); + } + + @Test + @Transactional + @Rollback(true) + public void deleteTasksByShowAndDepartment() { + + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + + int task_count = jdbcTemplate.queryForObject("SELECT COUNT(*) FROM task WHERE pk_point=?", + Integer.class, p.getPointId()); + + TaskEntity t = new TaskEntity(p, "dev.cue"); + taskDao.insertTask(t); + + assertEquals(Integer.valueOf(task_count + 1), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM task WHERE pk_point=?", Integer.class, p.getPointId())); + + taskDao.deleteTasks(p); + + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM task WHERE pk_point=?", Integer.class, p.getPointId())); + } + + @Test + @Transactional + @Rollback(true) + public void deleteTasksByDepartmentConfig() { + + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + + TaskEntity t = new TaskEntity(p, "dev.cue"); + t.minCoreUnits = 100; + taskDao.insertTask(t); + + taskDao.deleteTasks(p); + + /** + * This is always + */ + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM task WHERE pk_point=?", Integer.class, p.getPointId())); + } + + @Test + @Transactional + @Rollback(true) + public void getTaskDetail() { + + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + + TaskEntity t = new TaskEntity(p, "dev.cue"); + + taskDao.insertTask(t); + TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); + assertEquals(newTask.id, t.id); + } + + @Test + @Transactional + @Rollback(true) + public void getTaskDetailByDept() { + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + + TaskEntity t = new TaskEntity(p, "dev.cue"); + + taskDao.insertTask(t); + TaskEntity newTask = taskDao.getTaskDetail(departmentDao.getDefaultDepartment(), "dev.cue"); + assertEquals(newTask.id, t.id); + } + + @Test + @Transactional + @Rollback(true) + public void updateTaskMinProcs() { + + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + + TaskEntity t = new TaskEntity(p, "dev.cue"); + t.minCoreUnits = 100; + taskDao.insertTask(t); + TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); + taskDao.updateTaskMinCores(newTask, 100); + assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( + "SELECT int_min_cores FROM task WHERE pk_task=?", Integer.class, newTask.getTaskId())); + } + + @Test + @Transactional + @Rollback(true) + public void adjustTaskMinProcs() { + + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + + TaskEntity t = new TaskEntity(p, "dev.cue"); + t.minCoreUnits = 10; + taskDao.insertTask(t); + TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); + taskDao.updateTaskMinCores(newTask, 100); + assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( + "SELECT int_min_cores FROM task WHERE pk_task=?", Integer.class, newTask.getTaskId())); + + taskDao.adjustTaskMinCores(t, 105); + + assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( + "SELECT int_min_cores FROM task WHERE pk_task=?", Integer.class, newTask.getTaskId())); + assertEquals(Integer.valueOf(5), jdbcTemplate.queryForObject( + "SELECT int_adjust_cores FROM task WHERE pk_task=?", Integer.class, newTask.getTaskId())); + + taskDao.adjustTaskMinCores(t, 50); + + assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( + "SELECT int_min_cores FROM task WHERE pk_task=?", Integer.class, newTask.getTaskId())); + assertEquals(Integer.valueOf(-50), jdbcTemplate.queryForObject( + "SELECT int_adjust_cores FROM task WHERE pk_task=?", Integer.class, newTask.getTaskId())); + } + + @Test + @Transactional + @Rollback(true) + public void mergeTask() { + + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + + TaskEntity t = new TaskEntity(p, "dev.cue"); + taskDao.insertTask(t); + + assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( + "SELECT int_min_cores FROM task WHERE pk_task=?", Integer.class, t.getTaskId())); + + TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); + newTask.minCoreUnits = 200; + taskDao.mergeTask(newTask); + + assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( + "SELECT int_min_cores FROM task WHERE pk_task=?", Integer.class, newTask.getTaskId())); + } + + @Test + @Transactional + @Rollback(true) + public void isJobManaged() { + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + assertFalse(taskDao.isManaged(job)); + } +} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/WhiteboardDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/WhiteboardDaoTests.java index b8368e1f8..70109b7f8 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/WhiteboardDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/WhiteboardDaoTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dao.postgres; import java.io.File; @@ -126,1241 +122,1218 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; - @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class WhiteboardDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - AllocationDao allocationDao; - - @Resource - HostDao hostDao; - - @Resource - WhiteboardDao whiteboardDao; - - @Resource - ShowDao showDao; - - @Resource - FilterDao filterDao; - - @Resource - ProcDao procDao; - - @Resource - MatcherDao matcherDao; - - @Resource - ActionDao actionDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - GroupDao groupDao; - - @Resource - LayerDao layerDao; - - @Resource - LimitDao limitDao; - - @Resource - DepartmentDao departmentDao; - - @Resource - DependManager dependManager; - - @Resource - FrameDao frameDao; - - @Resource - PointDao pointDao; - - @Resource - HostManager hostManager; - - @Resource - CommentManager commentManager; - - @Resource - DepartmentManager departmentManager; - - @Resource - Dispatcher dispatcher; - - @Resource - DispatchSupport dispatchSupport; - - @Resource - OwnerManager ownerManager; - - @Resource - BookingManager bookingManager; - - @Resource - ServiceManager serviceManager; - - @Resource - FrameSearchFactory frameSearchFactory; - - @Resource - HostSearchFactory hostSearchFactory; - - @Resource - JobSearchFactory jobSearchFactory; - - @Resource - ProcSearchFactory procSearchFactory; - - private static final String HOST = "testest"; - private static final String SHOW = "pipe"; - - @Before - public void testMode() { - jobLauncher.testMode = true; - } - - public ShowEntity getShow() { - return showDao.findShowDetail(SHOW); - } - - public FilterEntity createFilter() { - FilterEntity filter = new FilterEntity(); - filter.name = "Default"; - filter.showId = getShow().id; - filter.type = FilterType.MATCH_ANY; - filter.enabled = true; - filterDao.insertFilter(filter); - return filter; - } - - public MatcherEntity createMatcher(FilterEntity f) { - MatcherEntity matcher = new MatcherEntity(); - matcher.filterId = f.id; - matcher.name = null; - matcher.showId = getShow().getId(); - matcher.subject = MatchSubject.JOB_NAME; - matcher.type = MatchType.CONTAINS; - matcher.value = "testuser"; - matcherDao.insertMatcher(matcher); - return matcher; - } - - public ActionEntity createAction(FilterEntity f) { - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.PAUSE_JOB; - a1.filterId = f.getFilterId(); - a1.booleanValue = true; - a1.name = null; - a1.valueType = ActionValueType.BOOLEAN_TYPE; - actionDao.createAction(a1); - return a1; - } - - public RenderHost getRenderHost() { - // Hardcoded value of dispatcher.memory.mem_reserved_min - // to avoid having to read opencue.properties on a test setting - long memReservedMin = 262144; - RenderHost host = RenderHost.newBuilder() - .setName(HOST) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem((int) memReservedMin * 4) - .setFreeSwap(2076) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem((int) memReservedMin * 4) - .setTotalSwap(2096) - .setNimbyEnabled(true) - .setNumProcs(2) - .setCoresPerProc(400) - .setState(HardwareState.DOWN) - .setFacility("spi") - .setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512) - .build(); - return host; - } - - public JobDetail launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - public JobDetail launchLimitJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_limit.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - private void createTestLimits() { - limitDao.createLimit("util", 15); - limitDao.createLimit("arnold", 20); - } - - @Test - @Transactional - @Rollback(true) - public void getService() { - whiteboardDao.getService("arnold"); - } - - @Test - @Transactional - @Rollback(true) - public void getServices() { - whiteboardDao.getDefaultServices(); - } - - @Test - @Transactional - @Rollback(true) - public void getServiceOverride() { - - ShowEntity show = getShow(); - ServiceOverrideEntity s = new ServiceOverrideEntity(); - s.name = "test"; - s.minCores = 100; - s.timeout = 0; - s.timeout_llu = 0; - s.minMemory = 320000; - s.tags.add("general"); - s.threadable = false; - s.showId = show.getId(); - s.minMemoryIncrease = CueUtil.GB4; - - serviceManager.createService(s); - whiteboardDao.getServiceOverride(getShow(), "test"); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class WhiteboardDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Test - @Transactional - @Rollback(true) - public void getServiceOverrides() { - whiteboardDao.getServiceOverrides(getShow()); - } + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; - @Test - @Transactional - @Rollback(true) - public void testGetDepend() { + @Resource + AllocationDao allocationDao; - List depends = dependManager.getWhatDependsOn(launchJob()); - for (LightweightDependency depend: depends) { - whiteboardDao.getDepend(depend); - } - } + @Resource + HostDao hostDao; - @Test - @Transactional - @Rollback(true) - public void testGetDependById() { + @Resource + WhiteboardDao whiteboardDao; - List depends = dependManager.getWhatDependsOn(launchJob()); - for (LightweightDependency depend: depends) { - whiteboardDao.getDepend(depend); - whiteboardDao.getDepend(depend.id); - } - } + @Resource + ShowDao showDao; - @Test - @Transactional - @Rollback(true) - public void testGetWhatDependsOnThis() { - JobDetail job = launchJob(); - assertEquals(1,whiteboardDao.getWhatDependsOnThis(job).getDependsCount()); + @Resource + FilterDao filterDao; - LayerInterface layer1 = layerDao.findLayer(job, "pass_1"); - assertEquals(0, whiteboardDao.getWhatDependsOnThis(layer1).getDependsCount()); + @Resource + ProcDao procDao; - LayerInterface layer2 = layerDao.findLayer(job, "pass_1_preprocess"); - assertEquals(1, whiteboardDao.getWhatDependsOnThis(layer2).getDependsCount()); + @Resource + MatcherDao matcherDao; - FrameInterface frame = frameDao.findFrame(job, "0001-pass_1"); - assertEquals(0, whiteboardDao.getWhatDependsOnThis(frame).getDependsCount()); - } + @Resource + ActionDao actionDao; - @Test - @Transactional - @Rollback(true) - public void testGetWhatThisDependsOn() { - JobDetail job = launchJob(); - assertEquals(0, whiteboardDao.getWhatThisDependsOn(job).getDependsCount()); + @Resource + JobManager jobManager; - LayerInterface layer1 = layerDao.findLayer(job, "pass_1"); - assertEquals(1, whiteboardDao.getWhatThisDependsOn(layer1).getDependsCount()); + @Resource + JobLauncher jobLauncher; - LayerInterface layer2 = layerDao.findLayer(job, "pass_1_preprocess"); - assertEquals(0, whiteboardDao.getWhatThisDependsOn(layer2).getDependsCount()); + @Resource + GroupDao groupDao; - FrameInterface frame = frameDao.findFrame(job, "0001-pass_1"); - assertEquals(1, whiteboardDao.getWhatThisDependsOn(frame).getDependsCount()); - } + @Resource + LayerDao layerDao; - @Test - @Transactional - @Rollback(true) - public void testGetDepends() { - JobDetail job = launchJob(); - assertEquals(1,whiteboardDao.getDepends(job).getDependsCount()); - } + @Resource + LimitDao limitDao; - @Test - @Transactional - @Rollback(true) - public void testGetCommentsOnJob() { - JobDetail job = launchJob(); - assertEquals(0,whiteboardDao.getComments(job).getCommentsCount()); - } + @Resource + DepartmentDao departmentDao; - @Test - @Transactional - @Rollback(true) - public void testGetCommentsOnHost() { + @Resource + DependManager dependManager; + + @Resource + FrameDao frameDao; - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - hostDao.updateHostLock(hd, LockState.LOCKED, new Source("TEST")); + @Resource + PointDao pointDao; + + @Resource + HostManager hostManager; + + @Resource + CommentManager commentManager; + + @Resource + DepartmentManager departmentManager; + + @Resource + Dispatcher dispatcher; + + @Resource + DispatchSupport dispatchSupport; + + @Resource + OwnerManager ownerManager; + + @Resource + BookingManager bookingManager; + + @Resource + ServiceManager serviceManager; + + @Resource + FrameSearchFactory frameSearchFactory; + + @Resource + HostSearchFactory hostSearchFactory; + + @Resource + JobSearchFactory jobSearchFactory; + + @Resource + ProcSearchFactory procSearchFactory; + + private static final String HOST = "testest"; + private static final String SHOW = "pipe"; + + @Before + public void testMode() { + jobLauncher.testMode = true; + } + + public ShowEntity getShow() { + return showDao.findShowDetail(SHOW); + } + + public FilterEntity createFilter() { + FilterEntity filter = new FilterEntity(); + filter.name = "Default"; + filter.showId = getShow().id; + filter.type = FilterType.MATCH_ANY; + filter.enabled = true; + filterDao.insertFilter(filter); + return filter; + } + + public MatcherEntity createMatcher(FilterEntity f) { + MatcherEntity matcher = new MatcherEntity(); + matcher.filterId = f.id; + matcher.name = null; + matcher.showId = getShow().getId(); + matcher.subject = MatchSubject.JOB_NAME; + matcher.type = MatchType.CONTAINS; + matcher.value = "testuser"; + matcherDao.insertMatcher(matcher); + return matcher; + } + + public ActionEntity createAction(FilterEntity f) { + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.PAUSE_JOB; + a1.filterId = f.getFilterId(); + a1.booleanValue = true; + a1.name = null; + a1.valueType = ActionValueType.BOOLEAN_TYPE; + actionDao.createAction(a1); + return a1; + } + + public RenderHost getRenderHost() { + // Hardcoded value of dispatcher.memory.mem_reserved_min + // to avoid having to read opencue.properties on a test setting + long memReservedMin = 262144; + RenderHost host = RenderHost.newBuilder().setName(HOST).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem((int) memReservedMin * 4).setFreeSwap(2076).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) memReservedMin * 4).setTotalSwap(2096) + .setNimbyEnabled(true).setNumProcs(2).setCoresPerProc(400).setState(HardwareState.DOWN) + .setFacility("spi").setFreeGpuMem((int) CueUtil.MB512).setTotalGpuMem((int) CueUtil.MB512) + .build(); + return host; + } + + public JobDetail launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + } + + public JobDetail launchLimitJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_limit.xml")); + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + } + + private void createTestLimits() { + limitDao.createLimit("util", 15); + limitDao.createLimit("arnold", 20); + } + + @Test + @Transactional + @Rollback(true) + public void getService() { + whiteboardDao.getService("arnold"); + } + + @Test + @Transactional + @Rollback(true) + public void getServices() { + whiteboardDao.getDefaultServices(); + } + + @Test + @Transactional + @Rollback(true) + public void getServiceOverride() { + + ShowEntity show = getShow(); + ServiceOverrideEntity s = new ServiceOverrideEntity(); + s.name = "test"; + s.minCores = 100; + s.timeout = 0; + s.timeout_llu = 0; + s.minMemory = 320000; + s.tags.add("general"); + s.threadable = false; + s.showId = show.getId(); + s.minMemoryIncrease = CueUtil.GB4; + + serviceManager.createService(s); + whiteboardDao.getServiceOverride(getShow(), "test"); + } + + @Test + @Transactional + @Rollback(true) + public void getServiceOverrides() { + whiteboardDao.getServiceOverrides(getShow()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDepend() { + + List depends = dependManager.getWhatDependsOn(launchJob()); + for (LightweightDependency depend : depends) { + whiteboardDao.getDepend(depend); + } + } + + @Test + @Transactional + @Rollback(true) + public void testGetDependById() { + + List depends = dependManager.getWhatDependsOn(launchJob()); + for (LightweightDependency depend : depends) { + whiteboardDao.getDepend(depend); + whiteboardDao.getDepend(depend.id); + } + } + + @Test + @Transactional + @Rollback(true) + public void testGetWhatDependsOnThis() { + JobDetail job = launchJob(); + assertEquals(1, whiteboardDao.getWhatDependsOnThis(job).getDependsCount()); + + LayerInterface layer1 = layerDao.findLayer(job, "pass_1"); + assertEquals(0, whiteboardDao.getWhatDependsOnThis(layer1).getDependsCount()); + + LayerInterface layer2 = layerDao.findLayer(job, "pass_1_preprocess"); + assertEquals(1, whiteboardDao.getWhatDependsOnThis(layer2).getDependsCount()); + + FrameInterface frame = frameDao.findFrame(job, "0001-pass_1"); + assertEquals(0, whiteboardDao.getWhatDependsOnThis(frame).getDependsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetWhatThisDependsOn() { + JobDetail job = launchJob(); + assertEquals(0, whiteboardDao.getWhatThisDependsOn(job).getDependsCount()); + + LayerInterface layer1 = layerDao.findLayer(job, "pass_1"); + assertEquals(1, whiteboardDao.getWhatThisDependsOn(layer1).getDependsCount()); + + LayerInterface layer2 = layerDao.findLayer(job, "pass_1_preprocess"); + assertEquals(0, whiteboardDao.getWhatThisDependsOn(layer2).getDependsCount()); + + FrameInterface frame = frameDao.findFrame(job, "0001-pass_1"); + assertEquals(1, whiteboardDao.getWhatThisDependsOn(frame).getDependsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDepends() { + JobDetail job = launchJob(); + assertEquals(1, whiteboardDao.getDepends(job).getDependsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetCommentsOnJob() { + JobDetail job = launchJob(); + assertEquals(0, whiteboardDao.getComments(job).getCommentsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetCommentsOnHost() { + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + hostDao.updateHostLock(hd, LockState.LOCKED, new Source("TEST")); + + CommentDetail c = new CommentDetail(); + c.message = "you suck"; + c.subject = "a useful message"; + c.user = "testuser"; + c.timestamp = null; + + commentManager.addComment(hd, c); + assertEquals(1, whiteboardDao.getComments(hd).getCommentsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindFilter() { + createFilter(); + whiteboardDao.findFilter(getShow(), "Default"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFilter() { + whiteboardDao.getFilter(createFilter()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetMatchers() { + FilterEntity f = createFilter(); + createMatcher(f); + whiteboardDao.getMatchers(f); + } + + @Test + @Transactional + @Rollback(true) + public void testGetMatcher() { + FilterEntity f = createFilter(); + MatcherEntity m = createMatcher(f); + whiteboardDao.getMatcher(m); + } + + @Test + @Transactional + @Rollback(true) + public void testGetActions() { + FilterEntity f = createFilter(); + createAction(f); + whiteboardDao.getActions(f); + } + + @Test + @Transactional + @Rollback(true) + public void testGetAction() { + FilterEntity f = createFilter(); + whiteboardDao.getAction(createAction(f)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFilters() { + createFilter(); + whiteboardDao.getFilters(getShow()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFramesByFrameSearch() { + JobEntity job = launchJob(); + FrameSearchInterface r = frameSearchFactory.create(job); + FrameSearchCriteria criteria = r.getCriteria(); + r.setCriteria(criteria.toBuilder().setPage(1).setLimit(5).addLayers("pass_1").build()); + assertEquals(5, whiteboardDao.getFrames(r).getFramesCount()); + for (Frame f : whiteboardDao.getFrames(r).getFramesList()) { + assertEquals(f.getLayerName(), "pass_1"); + } + } + + @Test + @Transactional + @Rollback(true) + public void testGetLayers() { + JobDetail job = launchJob(); + whiteboardDao.getLayers(job); + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = null; + proc.coresReserved = 100; + proc.hostId = hd.id; + proc.hostName = host.getName(); + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + DispatchFrame dframe = frameDao.getDispatchFrame(frame.getId()); + dispatcher.setTestMode(true); + dispatcher.dispatch(dframe, proc); + + try { + Thread.sleep(2000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + dframe = frameDao.getDispatchFrame(frame.getId()); + + assertTrue(dispatchSupport.stopFrame(dframe, FrameState.SUCCEEDED, 0)); + dispatchSupport.updateUsageCounters(frame, 0); + whiteboardDao.getLayers(job); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLimits() { + createTestLimits(); + List limits = whiteboardDao.getLimits(); + assertEquals(limits.size(), 2); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLayerLimits() { + createTestLimits(); + JobDetail job = launchLimitJob(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); + List limits = whiteboardDao.getLimits(layer); + assertEquals(limits.size(), 1); + assertEquals(limits.get(0).getName(), "arnold"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLimit() { + String limitName = "testing"; + int limitMaxValue = 20; + String limitId = limitDao.createLimit(limitName, limitMaxValue); + Limit limit = whiteboardDao.getLimit(limitId); + assertEquals(limit.getName(), limitName); + assertEquals(limit.getMaxValue(), limitMaxValue); + } + + @Test + @Transactional + @Rollback(true) + public void testFindLimit() { + String limitName = "testing"; + int limitMaxValue = 20; + String limitId = limitDao.createLimit(limitName, limitMaxValue); + Limit limit = whiteboardDao.findLimit(limitName); + assertEquals(limit.getName(), limitName); + assertEquals(limit.getMaxValue(), limitMaxValue); + assertEquals(limit.getId(), limitId); + } + + @Test + @Transactional + @Rollback(true) + public void testStopFrameUpdatesLayerMaxRSS() { + long max_rss = 123456L; + + JobDetail job = launchJob(); + whiteboardDao.getLayers(job); + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = null; + proc.coresReserved = 100; + proc.hostId = hd.id; + proc.hostName = host.getName(); + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + DispatchFrame dframe = frameDao.getDispatchFrame(frame.getId()); + dispatcher.setTestMode(true); + dispatcher.dispatch(dframe, proc); + + try { + Thread.sleep(2000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + dframe = frameDao.getDispatchFrame(frame.getId()); + + // Note use of 4-arg stopFrame here to update max rss. + assertTrue(dispatchSupport.stopFrame(dframe, FrameState.SUCCEEDED, 0, max_rss)); + dispatchSupport.updateUsageCounters(frame, 0); + Layer layer = whiteboardDao.getLayer(frame.layerId); + assertEquals(max_rss, layer.getLayerStats().getMaxRss()); + } + + @Test + @Transactional + @Rollback(true) + public void testStopFrameUpdatesJobMaxRSS() { + long max_rss = 123456L; + + JobDetail job = launchJob(); + whiteboardDao.getLayers(job); + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = null; + proc.coresReserved = 100; + proc.hostId = hd.id; + proc.hostName = host.getName(); + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + DispatchFrame dframe = frameDao.getDispatchFrame(frame.getId()); + dispatcher.setTestMode(true); + dispatcher.dispatch(dframe, proc); + + try { + Thread.sleep(2000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + dframe = frameDao.getDispatchFrame(frame.getId()); + + // Note use of 4-arg stopFrame here to update max rss. + assertTrue(dispatchSupport.stopFrame(dframe, FrameState.SUCCEEDED, 0, max_rss)); + dispatchSupport.updateUsageCounters(frame, 0); + Job grpc_job = whiteboardDao.getJob(job.id); + assertEquals(max_rss, grpc_job.getJobStats().getMaxRss()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetJobs() { + launchJob(); + JobSearchCriteria r = JobSearchInterface.criteriaFactory(); + r = r.toBuilder().addShows("pipe").build(); + whiteboardDao.getJobs(jobSearchFactory.create(r)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetJobNames() { + launchJob(); + JobSearchCriteria r = JobSearchInterface.criteriaFactory(); + r = r.toBuilder().addShows("pipe").build(); + whiteboardDao.getJobNames(jobSearchFactory.create(r)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetUpdatedFrames() { + final JobDetail job = launchJob(); + List jobs = new ArrayList(); + + jobs.add(new JobInterface() { + public String getJobId() { + return job.getId(); + } + + public String getShowId() { + return null; + } + + public String getId() { + return job.getId(); + } + + public String getName() { + return null; + } + + public String getFacilityId() { + throw new RuntimeException("not implemented"); + } + }); + + whiteboardDao.getUpdatedFrames(job, new ArrayList(), + (int) (System.currentTimeMillis() / 1000)); + + } + + @Test(expected = IllegalArgumentException.class) + @Transactional + @Rollback(true) + public void testGetUpdatedFramesFailure() { + final JobDetail job = launchJob(); + List jobs = new ArrayList(); + + jobs.add(new JobInterface() { + public String getJobId() { + return job.getId(); + } + + public String getShowId() { + return null; + } + + public String getId() { + return job.getId(); + } + + public String getName() { + return null; + } + + public String getFacilityId() { + throw new RuntimeException("not implemented"); + } + }); + + // this one should fail + whiteboardDao.getUpdatedFrames(job, new ArrayList(), + (int) (System.currentTimeMillis() / 1000 - 1000000)); + } + + @Test + @Transactional + @Rollback(true) + public void testFindJob() { + JobDetail job = launchJob(); + whiteboardDao.findJob(job.name); + } + + @Test + @Transactional + @Rollback(true) + public void testGetJob() { + JobDetail job = launchJob(); + whiteboardDao.getJob(job.id); + } + + @Test + @Transactional + @Rollback(true) + public void testGetSubscriptionByID() { + whiteboardDao.getSubscription("00000000-0000-0000-0000-000000000001"); + } + + @Test + @Transactional + @Rollback(true) + public void findFindSubscription() { + whiteboardDao.findSubscription("pipe", "spi.general"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetSubscriptions() { + whiteboardDao.getSubscriptions(getShow()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetSubscriptionsByAllocation() { + whiteboardDao.getSubscriptions(allocationDao.findAllocationEntity("spi", "general")); + } + + @Test + @Transactional + @Rollback(true) + public void testGetShow() { + whiteboardDao.getShow(getShow().id); + } + + @Test + @Transactional + @Rollback(true) + public void testFindShow() { + whiteboardDao.findShow(getShow().name); + } + + @Test + @Transactional + @Rollback(true) + public void testGetShows() { + whiteboardDao.getShows(); + } + + @Test + @Transactional + @Rollback(true) + public void testGetActiveShows() { + whiteboardDao.getActiveShows(); + } + + @Test + @Transactional + @Rollback(true) + public void testFindHost() { + + try { + HostEntity h = hostManager.findHostDetail(HOST); + hostManager.deleteHost(h); + } catch (Exception e) { + } + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + hostDao.updateHostLock(hd, LockState.LOCKED, new Source("TEST")); + Host h = whiteboardDao.findHost(host.getName()); + assertEquals(host.getName(), h.getName()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetHosts() { + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + HostSearchCriteria h = HostSearchInterface.criteriaFactory(); + h = h.toBuilder().addHosts(HOST).build(); + assertEquals(1, whiteboardDao.getHosts(hostSearchFactory.create(h)).getHostsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetHostsByAllocation() { + RenderHost host = getRenderHost(); + AllocationEntity alloc = + allocationDao.getAllocationEntity("00000000-0000-0000-0000-000000000006"); + DispatchHost hd = hostManager.createHost(host, alloc); + + HostSearchCriteria h = HostSearchInterface.criteriaFactory(); + h = h.toBuilder().addAllocs(alloc.getName()).build(); + assertEquals(1, whiteboardDao.getHosts(hostSearchFactory.create(h)).getHostsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetAllocation() { + whiteboardDao.getAllocation("00000000-0000-0000-0000-000000000000"); + } + + @Test + @Transactional + @Rollback(true) + public void testFindAllocation() { + whiteboardDao.findAllocation("spi.general"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetAllocations() { + whiteboardDao.getAllocations(); + } + + @Test + @Transactional + @Rollback(true) + public void testGetRootGroup() { + whiteboardDao.getRootGroup(getShow()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetGroup() { + whiteboardDao.getGroup("A0000000-0000-0000-0000-000000000000"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetGroups() { + whiteboardDao.getGroups(getShow()); + whiteboardDao.getGroup(groupDao.getRootGroupId(getShow())); + whiteboardDao.getGroups(groupDao.getRootGroupDetail(getShow())); + } + + @Test + @Transactional + @Rollback(true) + public void testFindGroup() { + whiteboardDao.findGroup("pipe", "pipe"); + } + + @Test + @Transactional + @Rollback(true) + public void testFindFrame() { + JobDetail job = launchJob(); + whiteboardDao.findFrame(job.name, "pass_1", 1); + } + + @Test + @Transactional + @Rollback(true) + public void testFindFilterByName() { + createFilter(); + whiteboardDao.findFilter("pipe", "Default"); + } + + @Test + @Transactional + @Rollback(true) + public void testFindLayer() { + JobDetail job = launchJob(); + whiteboardDao.findLayer(job.name, "pass_1"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDepartment() { + ShowInterface show = showDao.findShowDetail("pipe"); + DepartmentInterface dept = departmentDao.getDefaultDepartment(); + + Department d = whiteboardDao.getDepartment(show, dept.getName()); + + assertEquals("pipe.Unknown", d.getName()); + assertEquals("Unknown", d.getDept()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDepartments() { + ShowInterface show = showDao.findShowDetail("pipe"); + whiteboardDao.getDepartments(show); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDepartmentNames() { + assertTrue(whiteboardDao.getDepartmentNames().size() > 0); + } + + @Test + @Transactional + @Rollback(true) + public void testGetTasks() { + whiteboardDao.getTasks(showDao.findShowDetail("pipe"), departmentDao.getDefaultDepartment()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetTask() { + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + + TaskEntity t = new TaskEntity(p, "dev.cue"); + departmentManager.createTask(t); + + whiteboardDao.getTask(showDao.findShowDetail("pipe"), departmentDao.getDefaultDepartment(), + "dev.cue"); + } + + @Test + @Transactional + @Rollback(true) + public void getFrame() { + JobDetail job = launchJob(); + FrameInterface frame = frameDao.findFrame(job, "0001-pass_1_preprocess"); + assertEquals(1, whiteboardDao.getFrame(frame.getFrameId()).getNumber()); + } + + @Test + @Transactional + @Rollback(true) + public void getLayer() { + JobDetail job = launchJob(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); + assertEquals(layer.getName(), whiteboardDao.getLayer(layer.getLayerId()).getName()); + } + + @Test + @Transactional + @Rollback(true) + public void getHost() { + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = null; + proc.coresReserved = 100; + proc.hostId = hd.id; + proc.hostName = host.getName(); + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + assertEquals(hd.getName(), whiteboardDao.getHost(proc.getHostId()).getName()); + } + + @Test + @Transactional + @Rollback(true) + public void getProcs() { + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = null; + proc.coresReserved = 100; + proc.hostId = hd.id; + proc.hostName = host.getName(); + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + assertEquals(1, whiteboardDao.getProcs(proc).getProcsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void getProcsBySearch() { + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); + + /* + * Book 5 procs. + */ + for (int i = 1; i < 6; i++) { + FrameDetail f = frameDao.findFrameDetail(job, String.format("%04d-pass_1", i)); + VirtualProc proc = new VirtualProc(); + proc.allocationId = null; + proc.coresReserved = 100; + proc.hostId = hd.id; + proc.hostName = host.getName(); + proc.jobId = job.id; + proc.frameId = f.id; + proc.layerId = f.layerId; + proc.showId = f.showId; + procDao.insertVirtualProc(proc); + } + + ProcSearchInterface r; + + /* + * Search for all 5 running procs + */ + r = procSearchFactory.create(); + ProcSearchCriteria criteria = r.getCriteria(); + r.setCriteria(criteria.toBuilder().addShows("pipe").build()); + assertEquals(5, whiteboardDao.getProcs(r).getProcsCount()); + + /* + * Limit the result to 1 result. + */ + r = procSearchFactory.create(); + ProcSearchCriteria criteriaA = r.getCriteria(); + r.setCriteria(criteriaA.toBuilder().addShows("pipe").addMaxResults(1).build()); + assertEquals(1, whiteboardDao.getProcs(r).getProcsCount()); + + /* + * Change the first result to 1, which should limit the result to 4. + */ + r = procSearchFactory.create(); + ProcSearchCriteria criteriaB = r.getCriteria(); + r.setCriteria(criteriaB.toBuilder().addShows("pipe").setFirstResult(2).build()); + assertEquals(4, whiteboardDao.getProcs(r).getProcsCount()); + + /* + * Now try to do the equivalent of a limit/offset + */ + r = procSearchFactory.create(); + ProcSearchCriteria criteriaC = r.getCriteria(); + r.setCriteria( + criteriaC.toBuilder().addShows("pipe").setFirstResult(3).addMaxResults(2).build()); + assertEquals(2, whiteboardDao.getProcs(r).getProcsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void getOwner() { + ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); + whiteboardDao.getOwner("spongebob"); + } + + @Test + @Transactional + @Rollback(true) + public void getOwnersByShow() { + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + OwnerEntity owner = ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); + + ownerManager.takeOwnership(owner, hd); + + assertTrue(whiteboardDao.getOwners(showDao.findShowDetail("pipe")).size() != 0); + } + + @Test + @Transactional + @Rollback(true) + public void getDeedsByShow() { + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + OwnerEntity owner = ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); + + ownerManager.takeOwnership(owner, hd); + assertTrue(whiteboardDao.getDeeds(showDao.findShowDetail("pipe")).getDeedsCount() != 0); + + assertTrue(whiteboardDao.getDeeds(showDao.findShowDetail("pipe")).getDeedsCount() != 0); + } + + @Test + @Transactional + @Rollback(true) + public void getDeedsByOwner() { + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + OwnerEntity owner = ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); + + ownerManager.takeOwnership(owner, hd); + assertTrue(whiteboardDao.getDeeds(owner).getDeedsCount() != 0); + } + + @Test + @Transactional + @Rollback(true) + public void getHostsByOwner() { + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + OwnerEntity owner = ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); + ownerManager.takeOwnership(owner, hd); - CommentDetail c = new CommentDetail(); - c.message = "you suck"; - c.subject = "a useful message"; - c.user = "testuser"; - c.timestamp = null; + assertEquals(1, whiteboardDao.getHosts(owner).getHostsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void getOwnerFromDeed() { + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + OwnerEntity owner = ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); + DeedEntity deed = ownerManager.takeOwnership(owner, hd); + + Owner o2 = whiteboardDao.getOwner(deed); + + assertEquals(owner.getName(), o2.getName()); + assertEquals(1, o2.getHostCount()); + } + + @Test + @Transactional + @Rollback(true) + public void getOwnerFromHost() { - commentManager.addComment(hd, c); - assertEquals(1,whiteboardDao.getComments(hd).getCommentsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFilter() { - createFilter(); - whiteboardDao.findFilter(getShow(), "Default"); - } + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + OwnerEntity owner = ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); + ownerManager.takeOwnership(owner, hd); + + Owner o2 = whiteboardDao.getOwner(hd); + + assertEquals(owner.getName(), o2.getName()); + assertEquals(1, o2.getHostCount()); + } + + @Test + @Transactional + @Rollback(true) + public void getRenderPartition() { - @Test - @Transactional - @Rollback(true) - public void testGetFilter() { - whiteboardDao.getFilter(createFilter()); - } + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); - @Test - @Transactional - @Rollback(true) - public void testGetMatchers() { - FilterEntity f = createFilter(); - createMatcher(f); - whiteboardDao.getMatchers(f); - } - - @Test - @Transactional - @Rollback(true) - public void testGetMatcher() { - FilterEntity f = createFilter(); - MatcherEntity m = createMatcher(f); - whiteboardDao.getMatcher(m); - } - - @Test - @Transactional - @Rollback(true) - public void testGetActions() { - FilterEntity f = createFilter(); - createAction(f); - whiteboardDao.getActions(f); - } - - @Test - @Transactional - @Rollback(true) - public void testGetAction() { - FilterEntity f = createFilter(); - whiteboardDao.getAction(createAction(f)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFilters() { - createFilter(); - whiteboardDao.getFilters(getShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFramesByFrameSearch() { - JobEntity job = launchJob(); - FrameSearchInterface r = frameSearchFactory.create(job); - FrameSearchCriteria criteria = r.getCriteria(); - r.setCriteria(criteria.toBuilder() - .setPage(1) - .setLimit(5) - .addLayers("pass_1") - .build()); - assertEquals(5, whiteboardDao.getFrames(r).getFramesCount()); - for (Frame f: whiteboardDao.getFrames(r).getFramesList()) { - assertEquals(f.getLayerName(), "pass_1"); - } - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayers() { - JobDetail job = launchJob(); - whiteboardDao.getLayers(job); - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = hd.id; - proc.hostName = host.getName(); - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - DispatchFrame dframe = frameDao.getDispatchFrame(frame.getId()); - dispatcher.setTestMode(true); - dispatcher.dispatch(dframe, proc); - - try { - Thread.sleep(2000); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - dframe = frameDao.getDispatchFrame(frame.getId()); - - assertTrue(dispatchSupport.stopFrame(dframe, FrameState.SUCCEEDED, 0)); - dispatchSupport.updateUsageCounters(frame, 0); - whiteboardDao.getLayers(job); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLimits() { - createTestLimits(); - List limits = whiteboardDao.getLimits(); - assertEquals(limits.size(), 2); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayerLimits() { - createTestLimits(); - JobDetail job = launchLimitJob(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - List limits = whiteboardDao.getLimits(layer); - assertEquals(limits.size(), 1); - assertEquals(limits.get(0).getName(), "arnold"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLimit() { - String limitName = "testing"; - int limitMaxValue = 20; - String limitId = limitDao.createLimit(limitName, limitMaxValue); - Limit limit = whiteboardDao.getLimit(limitId); - assertEquals(limit.getName(), limitName); - assertEquals(limit.getMaxValue(), limitMaxValue); - } - - @Test - @Transactional - @Rollback(true) - public void testFindLimit() { - String limitName = "testing"; - int limitMaxValue = 20; - String limitId = limitDao.createLimit(limitName, limitMaxValue); - Limit limit = whiteboardDao.findLimit(limitName); - assertEquals(limit.getName(), limitName); - assertEquals(limit.getMaxValue(), limitMaxValue); - assertEquals(limit.getId(), limitId); - } + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); + + LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 1, 1); + bookingManager.createLocalHostAssignment(hd, job, lba); - @Test - @Transactional - @Rollback(true) - public void testStopFrameUpdatesLayerMaxRSS() { - long max_rss = 123456L; - - JobDetail job = launchJob(); - whiteboardDao.getLayers(job); - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = hd.id; - proc.hostName = host.getName(); - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - DispatchFrame dframe = frameDao.getDispatchFrame(frame.getId()); - dispatcher.setTestMode(true); - dispatcher.dispatch(dframe, proc); - - try { - Thread.sleep(2000); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - dframe = frameDao.getDispatchFrame(frame.getId()); - - // Note use of 4-arg stopFrame here to update max rss. - assertTrue(dispatchSupport.stopFrame(dframe, FrameState.SUCCEEDED, 0, max_rss)); - dispatchSupport.updateUsageCounters(frame, 0); - Layer layer = whiteboardDao.getLayer(frame.layerId); - assertEquals(max_rss, layer.getLayerStats().getMaxRss()); - } - - @Test - @Transactional - @Rollback(true) - public void testStopFrameUpdatesJobMaxRSS() { - long max_rss = 123456L; - - JobDetail job = launchJob(); - whiteboardDao.getLayers(job); - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = hd.id; - proc.hostName = host.getName(); - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - DispatchFrame dframe = frameDao.getDispatchFrame(frame.getId()); - dispatcher.setTestMode(true); - dispatcher.dispatch(dframe, proc); - - try { - Thread.sleep(2000); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - dframe = frameDao.getDispatchFrame(frame.getId()); - - // Note use of 4-arg stopFrame here to update max rss. - assertTrue(dispatchSupport.stopFrame(dframe, FrameState.SUCCEEDED, 0, max_rss)); - dispatchSupport.updateUsageCounters(frame, 0); - Job grpc_job = whiteboardDao.getJob(job.id); - assertEquals(max_rss, grpc_job.getJobStats().getMaxRss()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJobs() { - launchJob(); - JobSearchCriteria r = JobSearchInterface.criteriaFactory(); - r = r.toBuilder().addShows("pipe").build(); - whiteboardDao.getJobs(jobSearchFactory.create(r)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJobNames() { - launchJob(); - JobSearchCriteria r = JobSearchInterface.criteriaFactory(); - r = r.toBuilder().addShows("pipe").build(); - whiteboardDao.getJobNames(jobSearchFactory.create(r)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetUpdatedFrames() { - final JobDetail job = launchJob(); - List jobs = new ArrayList(); - - jobs.add(new JobInterface() { - public String getJobId() { return job.getId(); } - public String getShowId() { return null; } - public String getId() { return job.getId(); } - public String getName() { return null; } - public String getFacilityId() { throw new RuntimeException("not implemented"); } - }); - - whiteboardDao.getUpdatedFrames(job, new ArrayList(), - (int) (System.currentTimeMillis() / 1000)); - - } - - @Test(expected=IllegalArgumentException.class) - @Transactional - @Rollback(true) - public void testGetUpdatedFramesFailure() { - final JobDetail job = launchJob(); - List jobs = new ArrayList(); - - jobs.add(new JobInterface() { - public String getJobId() { return job.getId(); } - public String getShowId() { return null; } - public String getId() { return job.getId(); } - public String getName() { return null; } - public String getFacilityId() { throw new RuntimeException("not implemented"); } - }); - - // this one should fail - whiteboardDao.getUpdatedFrames(job, new ArrayList(), - (int) (System.currentTimeMillis() / 1000 - 1000000)); - } - - @Test - @Transactional - @Rollback(true) - public void testFindJob() { - JobDetail job = launchJob(); - whiteboardDao.findJob(job.name); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJob() { - JobDetail job = launchJob(); - whiteboardDao.getJob(job.id); - } - - @Test - @Transactional - @Rollback(true) - public void testGetSubscriptionByID() { - whiteboardDao.getSubscription("00000000-0000-0000-0000-000000000001"); - } - - @Test - @Transactional - @Rollback(true) - public void findFindSubscription() { - whiteboardDao.findSubscription("pipe", "spi.general"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetSubscriptions() { - whiteboardDao.getSubscriptions(getShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetSubscriptionsByAllocation() { - whiteboardDao.getSubscriptions( - allocationDao.findAllocationEntity("spi", "general")); - } - - @Test - @Transactional - @Rollback(true) - public void testGetShow() { - whiteboardDao.getShow(getShow().id); - } - - @Test - @Transactional - @Rollback(true) - public void testFindShow() { - whiteboardDao.findShow(getShow().name); - } - - @Test - @Transactional - @Rollback(true) - public void testGetShows() { - whiteboardDao.getShows(); - } - - @Test - @Transactional - @Rollback(true) - public void testGetActiveShows() { - whiteboardDao.getActiveShows(); - } - - @Test - @Transactional - @Rollback(true) - public void testFindHost() { - - try { - HostEntity h = hostManager.findHostDetail(HOST); - hostManager.deleteHost(h); - } catch (Exception e) { } - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - hostDao.updateHostLock(hd, LockState.LOCKED, new Source("TEST")); - Host h = whiteboardDao.findHost(host.getName()); - assertEquals(host.getName(), h.getName()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetHosts() { - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - HostSearchCriteria h = HostSearchInterface.criteriaFactory(); - h = h.toBuilder().addHosts(HOST).build(); - assertEquals(1, whiteboardDao.getHosts(hostSearchFactory.create(h)).getHostsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetHostsByAllocation() { - RenderHost host = getRenderHost(); - AllocationEntity alloc = allocationDao.getAllocationEntity("00000000-0000-0000-0000-000000000006"); - DispatchHost hd = hostManager.createHost(host, alloc); - - HostSearchCriteria h = HostSearchInterface.criteriaFactory(); - h = h.toBuilder().addAllocs(alloc.getName()).build(); - assertEquals(1, whiteboardDao.getHosts(hostSearchFactory.create(h)).getHostsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetAllocation() { - whiteboardDao.getAllocation("00000000-0000-0000-0000-000000000000"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindAllocation() { - whiteboardDao.findAllocation("spi.general"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetAllocations() { - whiteboardDao.getAllocations(); - } - - @Test - @Transactional - @Rollback(true) - public void testGetRootGroup() { - whiteboardDao.getRootGroup(getShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetGroup() { - whiteboardDao.getGroup("A0000000-0000-0000-0000-000000000000"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetGroups() { - whiteboardDao.getGroups(getShow()); - whiteboardDao.getGroup(groupDao.getRootGroupId(getShow())); - whiteboardDao.getGroups(groupDao.getRootGroupDetail(getShow())); - } - - @Test - @Transactional - @Rollback(true) - public void testFindGroup() { - whiteboardDao.findGroup("pipe", "pipe"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFrame() { - JobDetail job = launchJob(); - whiteboardDao.findFrame(job.name, "pass_1", 1); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFilterByName() { - createFilter(); - whiteboardDao.findFilter("pipe", "Default"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindLayer() { - JobDetail job = launchJob(); - whiteboardDao.findLayer(job.name, "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDepartment() { - ShowInterface show = showDao.findShowDetail("pipe"); - DepartmentInterface dept = departmentDao.getDefaultDepartment(); - - Department d = whiteboardDao.getDepartment(show, dept.getName()); - - assertEquals("pipe.Unknown", d.getName()); - assertEquals("Unknown", d.getDept()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDepartments() { - ShowInterface show = showDao.findShowDetail("pipe"); - whiteboardDao.getDepartments(show); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDepartmentNames() { - assertTrue(whiteboardDao.getDepartmentNames().size() > 0); - } - - @Test - @Transactional - @Rollback(true) - public void testGetTasks() { - whiteboardDao.getTasks(showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetTask() { - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p,"dev.cue"); - departmentManager.createTask(t); - - whiteboardDao.getTask(showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment(), "dev.cue"); - } - - @Test - @Transactional - @Rollback(true) - public void getFrame() { - JobDetail job = launchJob(); - FrameInterface frame = frameDao.findFrame(job, "0001-pass_1_preprocess"); - assertEquals(1, whiteboardDao.getFrame(frame.getFrameId()).getNumber()); - } - - @Test - @Transactional - @Rollback(true) - public void getLayer() { - JobDetail job = launchJob(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - assertEquals(layer.getName(),whiteboardDao.getLayer(layer.getLayerId()).getName()); - } - - @Test - @Transactional - @Rollback(true) - public void getHost() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = hd.id; - proc.hostName = host.getName(); - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - assertEquals(hd.getName(), whiteboardDao.getHost(proc.getHostId()).getName()); - } - - @Test - @Transactional - @Rollback(true) - public void getProcs() { - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = hd.id; - proc.hostName = host.getName(); - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - assertEquals(1,whiteboardDao.getProcs(proc).getProcsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void getProcsBySearch() { - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); - - /* - * Book 5 procs. - */ - for (int i=1; i<6; i++) { - FrameDetail f = frameDao.findFrameDetail(job, String.format("%04d-pass_1",i)); - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = hd.id; - proc.hostName = host.getName(); - proc.jobId = job.id; - proc.frameId = f.id; - proc.layerId = f.layerId; - proc.showId = f.showId; - procDao.insertVirtualProc(proc); - } - - ProcSearchInterface r; - - /* - * Search for all 5 running procs - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteria = r.getCriteria(); - r.setCriteria(criteria.toBuilder().addShows("pipe").build()); - assertEquals(5, whiteboardDao.getProcs(r).getProcsCount()); - - /* - * Limit the result to 1 result. - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteriaA = r.getCriteria(); - r.setCriteria(criteriaA.toBuilder() - .addShows("pipe") - .addMaxResults(1) - .build()); - assertEquals(1, whiteboardDao.getProcs(r).getProcsCount()); - - /* - * Change the first result to 1, which should limit - * the result to 4. - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteriaB = r.getCriteria(); - r.setCriteria(criteriaB.toBuilder() - .addShows("pipe") - .setFirstResult(2) - .build()); - assertEquals(4, whiteboardDao.getProcs(r).getProcsCount()); - - /* - * Now try to do the equivalent of a limit/offset - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteriaC = r.getCriteria(); - r.setCriteria(criteriaC.toBuilder() - .addShows("pipe") - .setFirstResult(3) - .addMaxResults(2) - .build()); - assertEquals(2, whiteboardDao.getProcs(r).getProcsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void getOwner() { - ownerManager.createOwner("spongebob", - showDao.findShowDetail("pipe")); - whiteboardDao.getOwner("spongebob"); - } - - @Test - @Transactional - @Rollback(true) - public void getOwnersByShow() { - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - OwnerEntity owner = ownerManager.createOwner("spongebob", - showDao.findShowDetail("pipe")); - - ownerManager.takeOwnership(owner, hd); - - assertTrue(whiteboardDao.getOwners( - showDao.findShowDetail("pipe")).size() != 0); - } - - @Test - @Transactional - @Rollback(true) - public void getDeedsByShow() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - OwnerEntity owner = ownerManager.createOwner("spongebob", - showDao.findShowDetail("pipe")); - - ownerManager.takeOwnership(owner, hd); - assertTrue(whiteboardDao.getDeeds( - showDao.findShowDetail("pipe")).getDeedsCount() != 0); - - assertTrue(whiteboardDao.getDeeds( - showDao.findShowDetail("pipe")).getDeedsCount() != 0); - } - - @Test - @Transactional - @Rollback(true) - public void getDeedsByOwner() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - OwnerEntity owner = ownerManager.createOwner("spongebob", - showDao.findShowDetail("pipe")); - - ownerManager.takeOwnership(owner, hd); - assertTrue(whiteboardDao.getDeeds( - owner).getDeedsCount() != 0); - } - - @Test - @Transactional - @Rollback(true) - public void getHostsByOwner() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - OwnerEntity owner = ownerManager.createOwner("spongebob", - showDao.findShowDetail("pipe")); - ownerManager.takeOwnership(owner, hd); - - assertEquals(1, whiteboardDao.getHosts(owner).getHostsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void getOwnerFromDeed() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - OwnerEntity owner = ownerManager.createOwner("spongebob", - showDao.findShowDetail("pipe")); - DeedEntity deed = ownerManager.takeOwnership(owner, hd); - - Owner o2 = whiteboardDao.getOwner(deed); - - assertEquals(owner.getName(), o2.getName()); - assertEquals(1, o2.getHostCount()); - } - - @Test - @Transactional - @Rollback(true) - public void getOwnerFromHost() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - OwnerEntity owner = ownerManager.createOwner("spongebob", - showDao.findShowDetail("pipe")); - ownerManager.takeOwnership(owner, hd); - - Owner o2 = whiteboardDao.getOwner(hd); - - assertEquals(owner.getName(), o2.getName()); - assertEquals(1, o2.getHostCount()); - } - - @Test - @Transactional - @Rollback(true) - public void getRenderPartition() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); - - LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 1, 1); - bookingManager.createLocalHostAssignment(hd, job, lba); - - whiteboardDao.getRenderPartition(lba); - } - - @Test - @Transactional - @Rollback(true) - public void getRenderPartitionsByHost() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); - - LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 1, 1); - bookingManager.createLocalHostAssignment(hd, job, lba); - - assertEquals(1, whiteboardDao.getRenderPartitions(hd).getRenderPartitionsCount()); - - } - - @Test - @Transactional - @Rollback(true) - public void getFacility() { - whiteboardDao.getFacilities(); - whiteboardDao.getFacility("spi"); - } - - @Test - @Transactional - @Rollback(true) - public void getFrameWithNoDisplayOverride() { - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - Frame retrievedFrame = whiteboardDao.getFrame(frame.getFrameId()); - assertEquals(false, retrievedFrame.hasFrameStateDisplayOverride()); - } - - public FrameStateDisplayOverride createFrameStateDisplayOverride(String frameId) { - FrameStateDisplayOverride override = FrameStateDisplayOverride.newBuilder() - .setState(FrameState.SUCCEEDED) - .setText("FINISHED") - .setColor(FrameStateDisplayOverride.RGB.newBuilder() - .setRed(114) - .setGreen(42) - .setBlue(200) - .build()) - .build(); - frameDao.setFrameStateDisplayOverride(frameId, override); - - return override; - } - - @Test - @Transactional - @Rollback(true) - public void testFramesWithDisplayOverride() { - // since current_timestamp does not update, we need to make sure the - // timestamp we use when retrieving updated frames is older than when - // the frame's ts_updated value is set to during insertion. - long timestamp = System.currentTimeMillis(); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - // Create override - FrameStateDisplayOverride override = createFrameStateDisplayOverride(frame.getFrameId()); - FrameStateDisplayOverrideSeq results = frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); - assertEquals(1, results.getOverridesCount()); - - frameDao.updateFrameState(frame, FrameState.SUCCEEDED); - - // Test GET_FRAME - Frame retrievedFrame = whiteboardDao.getFrame(frame.getFrameId()); - assertTrue(retrievedFrame.hasFrameStateDisplayOverride()); - assertEquals(override, retrievedFrame.getFrameStateDisplayOverride()); - - // Test GET_UPDATED_FRAME - UpdatedFrameCheckResult rs = whiteboardDao.getUpdatedFrames(job, - new ArrayList(), (int) (timestamp / 1000)); - UpdatedFrameSeq uFrames = rs.getUpdatedFrames(); - // We'll end up getting all the frames for the job so we need to find - // the one we want. - for (UpdatedFrame uFrame: uFrames.getUpdatedFramesList()) { - if (uFrame.getId().equals(frame.getFrameId())) { - assertTrue(uFrame.hasFrameStateDisplayOverride()); - assertEquals(override, uFrame.getFrameStateDisplayOverride()); - break; - } - } - - // Test GET_FRAMES_CRITERIA - FrameSearchInterface r = frameSearchFactory.create(job); - FrameSearchCriteria criteria = r.getCriteria(); - r.setCriteria(criteria.toBuilder() - .setPage(1) - .setLimit(5) - .addLayers("pass_1_preprocess") - .build()); - FrameSeq frames = whiteboardDao.getFrames(r); - Frame fcFrame = frames.getFrames(0); - assertTrue(fcFrame.hasFrameStateDisplayOverride()); - assertEquals(override, fcFrame.getFrameStateDisplayOverride()); - } + whiteboardDao.getRenderPartition(lba); + } + + @Test + @Transactional + @Rollback(true) + public void getRenderPartitionsByHost() { + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); + + LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 1, 1); + bookingManager.createLocalHostAssignment(hd, job, lba); + + assertEquals(1, whiteboardDao.getRenderPartitions(hd).getRenderPartitionsCount()); + + } + + @Test + @Transactional + @Rollback(true) + public void getFacility() { + whiteboardDao.getFacilities(); + whiteboardDao.getFacility("spi"); + } + + @Test + @Transactional + @Rollback(true) + public void getFrameWithNoDisplayOverride() { + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + Frame retrievedFrame = whiteboardDao.getFrame(frame.getFrameId()); + assertEquals(false, retrievedFrame.hasFrameStateDisplayOverride()); + } + + public FrameStateDisplayOverride createFrameStateDisplayOverride(String frameId) { + FrameStateDisplayOverride override = FrameStateDisplayOverride.newBuilder() + .setState(FrameState.SUCCEEDED).setText("FINISHED").setColor(FrameStateDisplayOverride.RGB + .newBuilder().setRed(114).setGreen(42).setBlue(200).build()) + .build(); + frameDao.setFrameStateDisplayOverride(frameId, override); + + return override; + } + + @Test + @Transactional + @Rollback(true) + public void testFramesWithDisplayOverride() { + // since current_timestamp does not update, we need to make sure the + // timestamp we use when retrieving updated frames is older than when + // the frame's ts_updated value is set to during insertion. + long timestamp = System.currentTimeMillis(); + + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + // Create override + FrameStateDisplayOverride override = createFrameStateDisplayOverride(frame.getFrameId()); + FrameStateDisplayOverrideSeq results = + frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); + assertEquals(1, results.getOverridesCount()); + + frameDao.updateFrameState(frame, FrameState.SUCCEEDED); + + // Test GET_FRAME + Frame retrievedFrame = whiteboardDao.getFrame(frame.getFrameId()); + assertTrue(retrievedFrame.hasFrameStateDisplayOverride()); + assertEquals(override, retrievedFrame.getFrameStateDisplayOverride()); + + // Test GET_UPDATED_FRAME + UpdatedFrameCheckResult rs = whiteboardDao.getUpdatedFrames(job, + new ArrayList(), (int) (timestamp / 1000)); + UpdatedFrameSeq uFrames = rs.getUpdatedFrames(); + // We'll end up getting all the frames for the job so we need to find + // the one we want. + for (UpdatedFrame uFrame : uFrames.getUpdatedFramesList()) { + if (uFrame.getId().equals(frame.getFrameId())) { + assertTrue(uFrame.hasFrameStateDisplayOverride()); + assertEquals(override, uFrame.getFrameStateDisplayOverride()); + break; + } + } + + // Test GET_FRAMES_CRITERIA + FrameSearchInterface r = frameSearchFactory.create(job); + FrameSearchCriteria criteria = r.getCriteria(); + r.setCriteria( + criteria.toBuilder().setPage(1).setLimit(5).addLayers("pass_1_preprocess").build()); + FrameSeq frames = whiteboardDao.getFrames(r); + Frame fcFrame = frames.getFrames(0); + assertTrue(fcFrame.hasFrameStateDisplayOverride()); + assertEquals(override, fcFrame.getFrameStateDisplayOverride()); + } } - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuJobTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuJobTests.java index 55bd44463..b27e04f08 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuJobTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuJobTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dispatcher; import java.io.File; @@ -52,159 +48,141 @@ @ContextConfiguration public class CoreUnitDispatcherGpuJobTests extends TransactionalTest { - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - GroupManager groupManager; - - @Resource - Dispatcher dispatcher; - - @Resource - DispatchSupport dispatchSupport; - - @Resource - FrameDao frameDao; - - private static final String HOSTNAME = "beta"; - - private static final String JOBNAME = - "pipe-dev.cue-middletier_shell_dispatch_gpu_test_v1"; - - private static final String TARGET_JOB = - "pipe-dev.cue-middletier_shell_dispatch_gpu_test_v2"; - - @Before - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_gpu_test.xml")); - } - - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder() - .setName(HOSTNAME) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem((int) CueUtil.GB8) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem((int) CueUtil.GB8) - .setTotalSwap((int) CueUtil.GB2) - .setNimbyEnabled(false) - .setNumProcs(1) - .setCoresPerProc(200) - .addTags("test") - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("SP_OS", "Linux") - .setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512) - .build(); - - hostManager.createHost(host, - adminManager.findAllocationDetail("spi", "general")); - } - - public JobDetail getJob() { - return jobManager.findJobDetail(JOBNAME); - } - - public JobDetail getTargetJob() { - return jobManager.findJobDetail(TARGET_JOB); - } - - public DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHost() { - DispatchHost host = getHost(); - - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchGpuRemovedHostToNonGpuJob() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - - host.idleMemory = host.idleMemory - Math.min(CueUtil.GB4, host.idleMemory); - host.idleCores = host.idleCores - Math.min(100, host.idleCores); - host.idleGpuMemory = 0; - List procs = dispatcher.dispatchHost(host, job); - assertEquals(0, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchGpuHostToGroup() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - GroupDetail group = groupManager.getGroupDetail(job); - - List procs = dispatcher.dispatchHost(host, group); - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchGpuHostToShowNoPrefer() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - ShowEntity show = adminManager.findShowEntity("edu"); - - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchRemovedGpuHostToShowPrefer() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - ShowEntity show = adminManager.findShowEntity("edu"); - - List procs = dispatcher.dispatchHost(host, show); - assertEquals(0, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void dispatchProcToJob() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - - List procs = dispatcher.dispatchHost(host, job); - VirtualProc proc = procs.get(0); - dispatcher.dispatchProcToJob(proc, job); - } + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + HostManager hostManager; + + @Resource + AdminManager adminManager; + + @Resource + GroupManager groupManager; + + @Resource + Dispatcher dispatcher; + + @Resource + DispatchSupport dispatchSupport; + + @Resource + FrameDao frameDao; + + private static final String HOSTNAME = "beta"; + + private static final String JOBNAME = "pipe-dev.cue-middletier_shell_dispatch_gpu_test_v1"; + + private static final String TARGET_JOB = "pipe-dev.cue-middletier_shell_dispatch_gpu_test_v2"; + + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_gpu_test.xml")); + } + + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem((int) CueUtil.GB8).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB8).setTotalSwap((int) CueUtil.GB2) + .setNimbyEnabled(false).setNumProcs(1).setCoresPerProc(200).addTags("test") + .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux") + .setFreeGpuMem((int) CueUtil.MB512).setTotalGpuMem((int) CueUtil.MB512).build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } + + public JobDetail getJob() { + return jobManager.findJobDetail(JOBNAME); + } + + public JobDetail getTargetJob() { + return jobManager.findJobDetail(TARGET_JOB); + } + + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHost() { + DispatchHost host = getHost(); + + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuRemovedHostToNonGpuJob() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + + host.idleMemory = host.idleMemory - Math.min(CueUtil.GB4, host.idleMemory); + host.idleCores = host.idleCores - Math.min(100, host.idleCores); + host.idleGpuMemory = 0; + List procs = dispatcher.dispatchHost(host, job); + assertEquals(0, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuHostToGroup() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + GroupDetail group = groupManager.getGroupDetail(job); + + List procs = dispatcher.dispatchHost(host, group); + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuHostToShowNoPrefer() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + ShowEntity show = adminManager.findShowEntity("edu"); + + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchRemovedGpuHostToShowPrefer() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + ShowEntity show = adminManager.findShowEntity("edu"); + + List procs = dispatcher.dispatchHost(host, show); + assertEquals(0, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void dispatchProcToJob() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + + List procs = dispatcher.dispatchHost(host, job); + VirtualProc proc = procs.get(0); + dispatcher.dispatchProcToJob(proc, job); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuTests.java index c61c9553f..861bbd165 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dispatcher; import java.io.File; @@ -52,184 +48,166 @@ @ContextConfiguration public class CoreUnitDispatcherGpuTests extends TransactionalTest { - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - GroupManager groupManager; - - @Resource - Dispatcher dispatcher; - - @Resource - DispatchSupport dispatchSupport; - - @Resource - FrameDao frameDao; - - private static final String HOSTNAME = "beta"; - - private static final String JOBNAME = - "pipe-dev.cue-testuser_shell_dispatch_test_v1"; - - private static final String TARGET_JOB = - "pipe-dev.cue-testuser_shell_dispatch_test_v2"; - - @Before - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - } - - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder() - .setName(HOSTNAME) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem((int) CueUtil.GB8) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem((int) CueUtil.GB8) - .setTotalSwap((int) CueUtil.GB2) - .setNimbyEnabled(false) - .setNumProcs(1) - .setCoresPerProc(200) - .addTags("test") - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("SP_OS", "Linux") - .setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512) - .build(); - - hostManager.createHost(host, - adminManager.findAllocationDetail("spi", "general")); - } - - public JobDetail getJob() { - return jobManager.findJobDetail(JOBNAME); - } - - public JobDetail getTargetJob() { - return jobManager.findJobDetail(TARGET_JOB); - } - - public DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHost() { - DispatchHost host = getHost(); - - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchGpuRemovedHostToNonGpuJob() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - - host.idleMemory = host.idleMemory - Math.min(CueUtil.GB4, host.idleMemory); - host.idleCores = host.idleCores - Math.min(100, host.idleCores); - host.idleGpuMemory = 0; - List procs = dispatcher.dispatchHost(host, job); - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchGpuHostToGroup() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - GroupDetail group = groupManager.getGroupDetail(job); - - List procs = dispatcher.dispatchHost(host, group); - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchGpuHostToShowNoPrefer() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - ShowEntity show = adminManager.findShowEntity("edu"); - - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchRemovedGpuHostToShowPrefer() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - ShowEntity show = adminManager.findShowEntity("edu"); - - List procs = dispatcher.dispatchHost(host, show); - assertEquals(0, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostRemoveRestoreGpu() { - DispatchHost host = getHost(); - - long idleMemoryOrig = host.idleMemory; - int idleCoresOrig = host.idleCores; - long idleGpuMemoryOrig = host.idleGpuMemory; - int idleGpusOrig = host.idleGpus; - - host.removeGpu(); - assertEquals(0, host.idleGpuMemory); - assertEquals(0, host.idleGpus); - assertEquals(idleMemoryOrig - CueUtil.GB4, host.idleMemory); - assertEquals(idleCoresOrig - 100, host.idleCores); - - host.restoreGpu(); - assertEquals(idleMemoryOrig, host.idleMemory); - assertEquals(idleCoresOrig, host.idleCores); - assertEquals(idleGpuMemoryOrig, host.idleGpuMemory); - assertEquals(idleGpusOrig, host.idleGpus); - } - - @Test - @Transactional - @Rollback(true) - public void dispatchProcToJob() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - - host.idleGpuMemory = 0; - List procs = dispatcher.dispatchHost(host, job); - VirtualProc proc = procs.get(0); - dispatcher.dispatchProcToJob(proc, job); - } + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + HostManager hostManager; + + @Resource + AdminManager adminManager; + + @Resource + GroupManager groupManager; + + @Resource + Dispatcher dispatcher; + + @Resource + DispatchSupport dispatchSupport; + + @Resource + FrameDao frameDao; + + private static final String HOSTNAME = "beta"; + + private static final String JOBNAME = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; + + private static final String TARGET_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; + + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + } + + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem((int) CueUtil.GB8).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB8).setTotalSwap((int) CueUtil.GB2) + .setNimbyEnabled(false).setNumProcs(1).setCoresPerProc(200).addTags("test") + .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux") + .setFreeGpuMem((int) CueUtil.MB512).setTotalGpuMem((int) CueUtil.MB512).build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } + + public JobDetail getJob() { + return jobManager.findJobDetail(JOBNAME); + } + + public JobDetail getTargetJob() { + return jobManager.findJobDetail(TARGET_JOB); + } + + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHost() { + DispatchHost host = getHost(); + + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuRemovedHostToNonGpuJob() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + + host.idleMemory = host.idleMemory - Math.min(CueUtil.GB4, host.idleMemory); + host.idleCores = host.idleCores - Math.min(100, host.idleCores); + host.idleGpuMemory = 0; + List procs = dispatcher.dispatchHost(host, job); + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuHostToGroup() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + GroupDetail group = groupManager.getGroupDetail(job); + + List procs = dispatcher.dispatchHost(host, group); + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuHostToShowNoPrefer() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + ShowEntity show = adminManager.findShowEntity("edu"); + + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchRemovedGpuHostToShowPrefer() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + ShowEntity show = adminManager.findShowEntity("edu"); + + List procs = dispatcher.dispatchHost(host, show); + assertEquals(0, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostRemoveRestoreGpu() { + DispatchHost host = getHost(); + + long idleMemoryOrig = host.idleMemory; + int idleCoresOrig = host.idleCores; + long idleGpuMemoryOrig = host.idleGpuMemory; + int idleGpusOrig = host.idleGpus; + + host.removeGpu(); + assertEquals(0, host.idleGpuMemory); + assertEquals(0, host.idleGpus); + assertEquals(idleMemoryOrig - CueUtil.GB4, host.idleMemory); + assertEquals(idleCoresOrig - 100, host.idleCores); + + host.restoreGpu(); + assertEquals(idleMemoryOrig, host.idleMemory); + assertEquals(idleCoresOrig, host.idleCores); + assertEquals(idleGpuMemoryOrig, host.idleGpuMemory); + assertEquals(idleGpusOrig, host.idleGpus); + } + + @Test + @Transactional + @Rollback(true) + public void dispatchProcToJob() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + + host.idleGpuMemory = 0; + List procs = dispatcher.dispatchHost(host, job); + VirtualProc proc = procs.get(0); + dispatcher.dispatchProcToJob(proc, job); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpusJobTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpusJobTests.java index e2d1cb564..b1a86da99 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpusJobTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpusJobTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dispatcher; import java.io.File; @@ -57,222 +53,197 @@ @ContextConfiguration public class CoreUnitDispatcherGpusJobTests extends TransactionalTest { - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - Dispatcher dispatcher; - - @Resource - DispatchSupport dispatchSupport; - - @Resource - LayerDao layerDao; - - @Resource - FrameDao frameDao; - - @Resource - FrameSearchFactory frameSearchFactory; - - @Resource - DependManager dependManager; - - private static final String HOSTNAME = "beta"; - - private static final String CPU_JOB = "pipe-default-testuser_test_cpu"; - - private static final String GPU_JOB = "pipe-default-testuser_test_gpu"; - - private static final String GPU_OVERBOOK_JOB = "pipe-default-testuser_test_gpu_overbook"; - - @Before - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch( - new File("src/test/resources/conf/jobspec/jobspec_dispatch_gpus_test.xml")); - } - - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder() - .setName(HOSTNAME) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem((int) CueUtil.GB8) - .setFreeSwap(20760) - .setLoad(0) - .setTotalMcp(CueUtil.GB4) - .setTotalMem(CueUtil.GB8) - .setTotalSwap(CueUtil.GB2) - .setNimbyEnabled(false) - .setNumProcs(40) - .setCoresPerProc(100) - .addTags("test") - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("SP_OS", "Linux") - .setNumGpus(8) - .setFreeGpuMem(CueUtil.GB32) - .setTotalGpuMem(CueUtil.GB32) - .build(); - - hostManager.createHost(host, - adminManager.findAllocationDetail("spi", "general")); - } - - public DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHost() { - DispatchHost host = getHost(); - - List procs = dispatcher.dispatchHost(host); - // All jobs are paused. procs should be empty. - assertTrue(procs.isEmpty()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchCpuJob() { - JobDetail job = jobManager.findJobDetail(CPU_JOB); - jobManager.setJobPaused(job, false); - - DispatchHost host = getHost(); - List procs = dispatcher.dispatchHost(host, job); - // Cuebot doesn't dispatch non-GPU job to GPU host. procs should be empty. - assertTrue(procs.isEmpty()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchGpuJob() { - JobDetail job = jobManager.findJobDetail(GPU_JOB); - jobManager.setJobPaused(job, false); - - DispatchHost host = getHost(); - List procs = dispatcher.dispatchHost(host, job); - - /* - * The job contains 4 layers. - * - test_gpus_0_layer gpus=0 gpu_memory=1 - * - test_gpu_memory_0_layer gpus=1 gpu_memory=0 - * - test_gpus_1_layer gpus=1 gpu_memory=1 - * - test_gpus_4_kayer gpus=4 gpu_memory=7g - * - * Cuebot doesn't dispatch test_gpu_memory_0_layer because gpu_memory is 0. - * Also job_frame_dispatch_max is 2, - * the procs should be test_gpus_0_layer and test_gpus_1_layer. - */ - assertEquals(2, procs.size()); - - VirtualProc proc0 = procs.get(0); - LayerDetail layer0 = layerDao.findLayerDetail(job, "test_gpus_0_layer"); - assertEquals(layer0.id, proc0.layerId); - assertEquals(100, proc0.coresReserved); - assertEquals(3355443, proc0.memoryReserved); - assertEquals(0, proc0.gpusReserved); - assertEquals(1048576, proc0.gpuMemoryReserved); - - VirtualProc proc1 = procs.get(1); - LayerDetail layer1 = layerDao.findLayerDetail(job, "test_gpus_1_layer"); - assertEquals(layer1.id, proc1.layerId); - assertEquals(100, proc1.coresReserved); - assertEquals(3355443, proc1.memoryReserved); - assertEquals(1, proc1.gpusReserved); - assertEquals(1048576, proc0.gpuMemoryReserved); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchGpuJobWithDependency() { - JobDetail job = jobManager.findJobDetail(GPU_JOB); - LayerDetail dl0 = layerDao.findLayerDetail(job, "test_gpus_0_layer"); - LayerDetail dl1 = layerDao.findLayerDetail(job, "test_gpu_memory_0_layer"); - LayerOnLayer depend = new LayerOnLayer(dl0, dl1); - dependManager.createDepend(depend); - jobManager.setJobPaused(job, false); - - DispatchHost host = getHost(); - List procs = dispatcher.dispatchHost(host, job); - - /* - * The job contains 4 layers. - * - test_gpus_0_layer gpus=0 gpu_memory=1 - * - test_gpu_memory_0_layer gpus=1 gpu_memory=0 - * - test_gpus_1_layer gpus=1 gpu_memory=1 - * - test_gpus_4_kayer gpus=4 gpu_memory=7g - * - * Cuebot doesn't dispatch test_gpu_memory_0_layer because gpu_memory is 0. - * And test_gpus_0_layer depends on test_gpu_memory_0_layer. - * So the procs should be test_gpus_1_layer and test_gpus_4_layer. - */ - assertEquals(2, procs.size()); - - VirtualProc proc0 = procs.get(0); - LayerDetail layer0 = layerDao.findLayerDetail(job, "test_gpus_1_layer"); - assertEquals(layer0.id, proc0.layerId); - assertEquals(100, proc0.coresReserved); - assertEquals(3355443, proc0.memoryReserved); - assertEquals(1, proc0.gpusReserved); - assertEquals(1048576, proc0.gpuMemoryReserved); - - VirtualProc proc1 = procs.get(1); - LayerDetail layer1 = layerDao.findLayerDetail(job, "test_gpus_4_layer"); - assertEquals(layer1.id, proc1.layerId); - assertEquals(100, proc1.coresReserved); - assertEquals(3355443, proc1.memoryReserved); - assertEquals(4, proc1.gpusReserved); - assertEquals(7340032, proc1.gpuMemoryReserved); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchGpuOverbookJob() { - JobDetail job = jobManager.findJobDetail(GPU_OVERBOOK_JOB); - jobManager.setJobPaused(job, false); - - DispatchHost host = getHost(); - List procs = dispatcher.dispatchHost(host, job); - - /* - * The job contains 2 layers. - * - test_gpus_6_layer gpus=6 gpu_memory=1 - * - test_gpus_3_layer gpus=3 gpu_memory=1 - * the procs should be only test_gpus_6_layer since host only has 8 GPUs. - */ - assertEquals(1, procs.size()); - - VirtualProc proc0 = procs.get(0); - LayerDetail layer0 = layerDao.findLayerDetail(job, "test_gpus_6_layer"); - assertEquals(layer0.id, proc0.layerId); - assertEquals(100, proc0.coresReserved); - assertEquals(3355443, proc0.memoryReserved); - assertEquals(6, proc0.gpusReserved); - assertEquals(1048576, proc0.gpuMemoryReserved); - } -} + @Resource + JobManager jobManager; + @Resource + JobLauncher jobLauncher; + + @Resource + HostManager hostManager; + + @Resource + AdminManager adminManager; + + @Resource + Dispatcher dispatcher; + + @Resource + DispatchSupport dispatchSupport; + + @Resource + LayerDao layerDao; + + @Resource + FrameDao frameDao; + + @Resource + FrameSearchFactory frameSearchFactory; + + @Resource + DependManager dependManager; + + private static final String HOSTNAME = "beta"; + + private static final String CPU_JOB = "pipe-default-testuser_test_cpu"; + + private static final String GPU_JOB = "pipe-default-testuser_test_gpu"; + + private static final String GPU_OVERBOOK_JOB = "pipe-default-testuser_test_gpu_overbook"; + + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_gpus_test.xml")); + } + + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem((int) CueUtil.GB8).setFreeSwap(20760).setLoad(0) + .setTotalMcp(CueUtil.GB4).setTotalMem(CueUtil.GB8).setTotalSwap(CueUtil.GB2) + .setNimbyEnabled(false).setNumProcs(40).setCoresPerProc(100).addTags("test") + .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux").setNumGpus(8) + .setFreeGpuMem(CueUtil.GB32).setTotalGpuMem(CueUtil.GB32).build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } + + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHost() { + DispatchHost host = getHost(); + + List procs = dispatcher.dispatchHost(host); + // All jobs are paused. procs should be empty. + assertTrue(procs.isEmpty()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchCpuJob() { + JobDetail job = jobManager.findJobDetail(CPU_JOB); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(); + List procs = dispatcher.dispatchHost(host, job); + // Cuebot doesn't dispatch non-GPU job to GPU host. procs should be empty. + assertTrue(procs.isEmpty()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuJob() { + JobDetail job = jobManager.findJobDetail(GPU_JOB); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(); + List procs = dispatcher.dispatchHost(host, job); + + /* + * The job contains 4 layers. - test_gpus_0_layer gpus=0 gpu_memory=1 - test_gpu_memory_0_layer + * gpus=1 gpu_memory=0 - test_gpus_1_layer gpus=1 gpu_memory=1 - test_gpus_4_kayer gpus=4 + * gpu_memory=7g + * + * Cuebot doesn't dispatch test_gpu_memory_0_layer because gpu_memory is 0. Also + * job_frame_dispatch_max is 2, the procs should be test_gpus_0_layer and test_gpus_1_layer. + */ + assertEquals(2, procs.size()); + + VirtualProc proc0 = procs.get(0); + LayerDetail layer0 = layerDao.findLayerDetail(job, "test_gpus_0_layer"); + assertEquals(layer0.id, proc0.layerId); + assertEquals(100, proc0.coresReserved); + assertEquals(3355443, proc0.memoryReserved); + assertEquals(0, proc0.gpusReserved); + assertEquals(1048576, proc0.gpuMemoryReserved); + + VirtualProc proc1 = procs.get(1); + LayerDetail layer1 = layerDao.findLayerDetail(job, "test_gpus_1_layer"); + assertEquals(layer1.id, proc1.layerId); + assertEquals(100, proc1.coresReserved); + assertEquals(3355443, proc1.memoryReserved); + assertEquals(1, proc1.gpusReserved); + assertEquals(1048576, proc0.gpuMemoryReserved); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuJobWithDependency() { + JobDetail job = jobManager.findJobDetail(GPU_JOB); + LayerDetail dl0 = layerDao.findLayerDetail(job, "test_gpus_0_layer"); + LayerDetail dl1 = layerDao.findLayerDetail(job, "test_gpu_memory_0_layer"); + LayerOnLayer depend = new LayerOnLayer(dl0, dl1); + dependManager.createDepend(depend); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(); + List procs = dispatcher.dispatchHost(host, job); + + /* + * The job contains 4 layers. - test_gpus_0_layer gpus=0 gpu_memory=1 - test_gpu_memory_0_layer + * gpus=1 gpu_memory=0 - test_gpus_1_layer gpus=1 gpu_memory=1 - test_gpus_4_kayer gpus=4 + * gpu_memory=7g + * + * Cuebot doesn't dispatch test_gpu_memory_0_layer because gpu_memory is 0. And + * test_gpus_0_layer depends on test_gpu_memory_0_layer. So the procs should be + * test_gpus_1_layer and test_gpus_4_layer. + */ + assertEquals(2, procs.size()); + + VirtualProc proc0 = procs.get(0); + LayerDetail layer0 = layerDao.findLayerDetail(job, "test_gpus_1_layer"); + assertEquals(layer0.id, proc0.layerId); + assertEquals(100, proc0.coresReserved); + assertEquals(3355443, proc0.memoryReserved); + assertEquals(1, proc0.gpusReserved); + assertEquals(1048576, proc0.gpuMemoryReserved); + + VirtualProc proc1 = procs.get(1); + LayerDetail layer1 = layerDao.findLayerDetail(job, "test_gpus_4_layer"); + assertEquals(layer1.id, proc1.layerId); + assertEquals(100, proc1.coresReserved); + assertEquals(3355443, proc1.memoryReserved); + assertEquals(4, proc1.gpusReserved); + assertEquals(7340032, proc1.gpuMemoryReserved); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuOverbookJob() { + JobDetail job = jobManager.findJobDetail(GPU_OVERBOOK_JOB); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(); + List procs = dispatcher.dispatchHost(host, job); + + /* + * The job contains 2 layers. - test_gpus_6_layer gpus=6 gpu_memory=1 - test_gpus_3_layer gpus=3 + * gpu_memory=1 the procs should be only test_gpus_6_layer since host only has 8 GPUs. + */ + assertEquals(1, procs.size()); + + VirtualProc proc0 = procs.get(0); + LayerDetail layer0 = layerDao.findLayerDetail(job, "test_gpus_6_layer"); + assertEquals(layer0.id, proc0.layerId); + assertEquals(100, proc0.coresReserved); + assertEquals(3355443, proc0.memoryReserved); + assertEquals(6, proc0.gpusReserved); + assertEquals(1048576, proc0.gpuMemoryReserved); + } +} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherTests.java index 89112dd69..9b9ddda3e 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dispatcher; import java.io.File; @@ -52,167 +48,150 @@ @ContextConfiguration public class CoreUnitDispatcherTests extends TransactionalTest { - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - GroupManager groupManager; - - @Resource - Dispatcher dispatcher; - - @Resource - DispatchSupport dispatchSupport; - - @Resource - FrameDao frameDao; - - private static final String HOSTNAME = "beta"; - - private static final String JOBNAME = - "pipe-dev.cue-testuser_shell_dispatch_test_v1"; - - private static final String TARGET_JOB = - "pipe-dev.cue-testuser_shell_dispatch_test_v2"; - - @Before - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - } - - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder() - .setName(HOSTNAME) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem(8173264) - .setTotalSwap(20960) - .setNimbyEnabled(false) - .setNumProcs(1) - .setCoresPerProc(100) - .addTags("test") - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("SP_OS", "Linux") - .build(); - - hostManager.createHost(host, - adminManager.findAllocationDetail("spi", "general")); - } - - public JobDetail getJob() { - return jobManager.findJobDetail(JOBNAME); - } - - public JobDetail getTargetJob() { - return jobManager.findJobDetail(TARGET_JOB); - } - - public DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHost() { - DispatchHost host = getHost(); - - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testdispatchHostToAllShows() { - DispatchHost host = getHost(); - - List procs = dispatcher.dispatchHostToAllShows(host); - // The first show is removed. findDispatchJobs: shows.remove(0). - assertEquals(0, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostToJob() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - - List procs = dispatcher.dispatchHost(host, job); - - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostToGroup() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - GroupDetail group = groupManager.getGroupDetail(job); - - List procs = dispatcher.dispatchHost(host, group); - assertEquals(1, procs.size()); - - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostToShowNoPrefer() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - ShowEntity show = adminManager.findShowEntity("edu"); - - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostToShowPrefer() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - ShowEntity show = adminManager.findShowEntity("edu"); - - List procs = dispatcher.dispatchHost(host, show); - assertEquals(0, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchProcToJob() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - - List procs = dispatcher.dispatchHost(host, job); - VirtualProc proc = procs.get(0); - dispatcher.dispatchProcToJob(proc, job); - } -} + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + HostManager hostManager; + + @Resource + AdminManager adminManager; + @Resource + GroupManager groupManager; + + @Resource + Dispatcher dispatcher; + + @Resource + DispatchSupport dispatchSupport; + + @Resource + FrameDao frameDao; + + private static final String HOSTNAME = "beta"; + + private static final String JOBNAME = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; + + private static final String TARGET_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; + + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + } + + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) + .setNumProcs(1).setCoresPerProc(100).addTags("test").setState(HardwareState.UP) + .setFacility("spi").putAttributes("SP_OS", "Linux").build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } + + public JobDetail getJob() { + return jobManager.findJobDetail(JOBNAME); + } + + public JobDetail getTargetJob() { + return jobManager.findJobDetail(TARGET_JOB); + } + + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHost() { + DispatchHost host = getHost(); + + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testdispatchHostToAllShows() { + DispatchHost host = getHost(); + + List procs = dispatcher.dispatchHostToAllShows(host); + // The first show is removed. findDispatchJobs: shows.remove(0). + assertEquals(0, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostToJob() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + + List procs = dispatcher.dispatchHost(host, job); + + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostToGroup() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + GroupDetail group = groupManager.getGroupDetail(job); + + List procs = dispatcher.dispatchHost(host, group); + assertEquals(1, procs.size()); + + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostToShowNoPrefer() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + ShowEntity show = adminManager.findShowEntity("edu"); + + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostToShowPrefer() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + ShowEntity show = adminManager.findShowEntity("edu"); + + List procs = dispatcher.dispatchHost(host, show); + assertEquals(0, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchProcToJob() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + + List procs = dispatcher.dispatchHost(host, job); + VirtualProc proc = procs.get(0); + dispatcher.dispatchProcToJob(proc, job); + } +} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/DispatchSupportTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/DispatchSupportTests.java index 55a7806c0..34d9eaabc 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/DispatchSupportTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/DispatchSupportTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dispatcher; import java.io.File; @@ -48,120 +44,102 @@ @ContextConfiguration public class DispatchSupportTests extends TransactionalTest { - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - GroupManager groupManager; - - @Resource - Dispatcher dispatcher; - - @Resource - DispatchSupport dispatchSupport; - - @Resource - FrameDao frameDao; - - private static final String HOSTNAME = "beta"; - - private static final String JOBNAME = - "pipe-dev.cue-testuser_shell_dispatch_test_v1"; - - private static final String TARGET_JOB = - "pipe-dev.cue-testuser_shell_dispatch_test_v2"; - - @Before - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - } - - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder() - .setName(HOSTNAME) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(0) - .setTotalMcp(CueUtil.GB4) - .setTotalMem(8173264) - .setTotalSwap(20960) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(400) - .addTags("test") - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("SP_OS", "Linux") - .setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512) - .build(); - - hostManager.createHost(host, - adminManager.findAllocationDetail("spi","general")); - } - - public JobDetail getJob() { - return jobManager.findJobDetail(JOBNAME); - } - - public JobDetail getTargetJob() { - return jobManager.findJobDetail(TARGET_JOB); - } - - public DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } - - @Test - @Transactional - @Rollback(true) - public void testDetermineIdleCores() { - DispatchHost host = getHost(); - - int grace_load = Dispatcher.CORE_LOAD_THRESHOLD * (host.cores / 100); - - // Machine is idle, no load. - dispatchSupport.determineIdleCores(host, 0); - assertEquals(800, host.idleCores); - - // Machine is idle but shows load of 200. - host.idleCores = 800; - dispatchSupport.determineIdleCores(host, 200); - assertEquals(grace_load + 600, host.idleCores); - - //Machine is idle but has the grace load. - host.idleCores = 800; - dispatchSupport.determineIdleCores(host, grace_load); - assertEquals(800, host.idleCores); - - //Machine has 100 units idle, grace_load -1 - host.idleCores = 100; - dispatchSupport.determineIdleCores(host, 700 + grace_load - 1); - assertEquals(100, host.idleCores); - - // Machine has 100 units idle, grace_load + 1 - host.idleCores = 100; - dispatchSupport.determineIdleCores(host, 700 + grace_load + 1); - assertEquals(99, host.idleCores); - } -} + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + HostManager hostManager; + + @Resource + AdminManager adminManager; + + @Resource + GroupManager groupManager; + + @Resource + Dispatcher dispatcher; + + @Resource + DispatchSupport dispatchSupport; + + @Resource + FrameDao frameDao; + + private static final String HOSTNAME = "beta"; + + private static final String JOBNAME = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; + + private static final String TARGET_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; + + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + } + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(0) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) + .setNumProcs(2).setCoresPerProc(400).addTags("test").setState(HardwareState.UP) + .setFacility("spi").putAttributes("SP_OS", "Linux").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } + + public JobDetail getJob() { + return jobManager.findJobDetail(JOBNAME); + } + + public JobDetail getTargetJob() { + return jobManager.findJobDetail(TARGET_JOB); + } + + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } + + @Test + @Transactional + @Rollback(true) + public void testDetermineIdleCores() { + DispatchHost host = getHost(); + + int grace_load = Dispatcher.CORE_LOAD_THRESHOLD * (host.cores / 100); + + // Machine is idle, no load. + dispatchSupport.determineIdleCores(host, 0); + assertEquals(800, host.idleCores); + + // Machine is idle but shows load of 200. + host.idleCores = 800; + dispatchSupport.determineIdleCores(host, 200); + assertEquals(grace_load + 600, host.idleCores); + + // Machine is idle but has the grace load. + host.idleCores = 800; + dispatchSupport.determineIdleCores(host, grace_load); + assertEquals(800, host.idleCores); + + // Machine has 100 units idle, grace_load -1 + host.idleCores = 100; + dispatchSupport.determineIdleCores(host, 700 + grace_load - 1); + assertEquals(100, host.idleCores); + + // Machine has 100 units idle, grace_load + 1 + host.idleCores = 100; + dispatchSupport.determineIdleCores(host, 700 + grace_load + 1); + assertEquals(99, host.idleCores); + } +} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/FrameCompleteHandlerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/FrameCompleteHandlerTests.java index 7d0901562..04dee5927 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/FrameCompleteHandlerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/FrameCompleteHandlerTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dispatcher; import java.io.File; @@ -64,425 +60,362 @@ @ContextConfiguration public class FrameCompleteHandlerTests extends TransactionalTest { - @Resource - AdminManager adminManager; - - @Resource - FrameCompleteHandler frameCompleteHandler; - - @Resource - HostManager hostManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - JobManager jobManager; - - @Resource - FrameDao frameDao; - - @Resource - LayerDao layerDao; - - @Resource - Dispatcher dispatcher; - - @Resource - DispatchSupport dispatchSupport; + @Resource + AdminManager adminManager; + + @Resource + FrameCompleteHandler frameCompleteHandler; - @Resource - ServiceManager serviceManager; + @Resource + HostManager hostManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + JobManager jobManager; + + @Resource + FrameDao frameDao; - private static final String HOSTNAME = "beta"; - private static final String HOSTNAME2 = "zeta"; - - @Before - public void setTestMode() { - - dispatcher.setTestMode(true); + @Resource + LayerDao layerDao; + + @Resource + Dispatcher dispatcher; + + @Resource + DispatchSupport dispatchSupport; + + @Resource + ServiceManager serviceManager; + + private static final String HOSTNAME = "beta"; + private static final String HOSTNAME2 = "zeta"; + + @Before + public void setTestMode() { + + dispatcher.setTestMode(true); + } + + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_gpus_test.xml")); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem((int) CueUtil.GB8).setFreeSwap(20760).setLoad(0) + .setTotalMcp(CueUtil.GB4).setTotalMem(CueUtil.GB8).setTotalSwap(CueUtil.GB2) + .setNimbyEnabled(false).setNumProcs(40).setCoresPerProc(100).setState(HardwareState.UP) + .setFacility("spi").putAttributes("SP_OS", "Linux").setNumGpus(8) + .setFreeGpuMem(CueUtil.GB16 * 8).setTotalGpuMem(CueUtil.GB16 * 8).build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + + RenderHost host2 = RenderHost.newBuilder().setName(HOSTNAME2).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem((int) CueUtil.GB4).setFreeSwap((int) CueUtil.GB4) + .setLoad(0).setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB8) + .setTotalSwap((int) CueUtil.GB8).setNimbyEnabled(false).setNumProcs(8).setCoresPerProc(100) + .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux").build(); + + hostManager.createHost(host2, adminManager.findAllocationDetail("spi", "general")); + } + + public DispatchHost getHost(String hostname) { + return hostManager.findDispatchHost(hostname); + } + + @Test + @Transactional + @Rollback(true) + public void testGpuReport() { + JobDetail job = jobManager.findJobDetail("pipe-default-testuser_test0"); + LayerDetail layer = layerDao.findLayerDetail(job, "layer0"); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(HOSTNAME); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + + assertEquals(7, host.idleGpus); + assertEquals(CueUtil.GB16 * 8 - CueUtil.GB, host.idleGpuMemory); + + RunningFrameInfo info = + RunningFrameInfo.newBuilder().setJobId(proc.getJobId()).setLayerId(proc.getLayerId()) + .setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()).build(); + FrameCompleteReport report = + FrameCompleteReport.newBuilder().setFrame(info).setExitStatus(0).build(); + frameCompleteHandler.handleFrameCompleteReport(report); + + assertTrue(jobManager.isLayerComplete(layer)); + assertTrue(jobManager.isJobComplete(job)); + } + + @Test + @Transactional + @Rollback(true) + public void testGpuReportMultiple() { + JobDetail job0 = jobManager.findJobDetail("pipe-default-testuser_test0"); + LayerDetail layer0_0 = layerDao.findLayerDetail(job0, "layer0"); + jobManager.setJobPaused(job0, false); + + JobDetail job1 = jobManager.findJobDetail("pipe-default-testuser_test1"); + LayerDetail layer1_0 = layerDao.findLayerDetail(job1, "layer0"); + jobManager.setJobPaused(job1, false); + + DispatchHost host = getHost(HOSTNAME); + List procs = dispatcher.dispatchHost(host); + assertEquals(2, procs.size()); + + assertEquals(4, host.idleGpus); + assertEquals(CueUtil.GB16 * 8 - CueUtil.GB2, host.idleGpuMemory); + + for (VirtualProc proc : procs) { + RunningFrameInfo info = + RunningFrameInfo.newBuilder().setJobId(proc.getJobId()).setLayerId(proc.getLayerId()) + .setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()).build(); + FrameCompleteReport report = + FrameCompleteReport.newBuilder().setFrame(info).setExitStatus(0).build(); + frameCompleteHandler.handleFrameCompleteReport(report); } - @Before - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch( - new File("src/test/resources/conf/jobspec/jobspec_gpus_test.xml")); + assertTrue(jobManager.isLayerComplete(layer0_0)); + assertTrue(jobManager.isJobComplete(job0)); + assertTrue(jobManager.isLayerComplete(layer1_0)); + assertTrue(jobManager.isJobComplete(job1)); + } + + @Test + @Transactional + @Rollback(true) + public void testGpuReportOver() { + JobDetail job1 = jobManager.findJobDetail("pipe-default-testuser_test1"); + LayerDetail layer1_0 = layerDao.findLayerDetail(job1, "layer0"); + jobManager.setJobPaused(job1, false); + + JobDetail job2 = jobManager.findJobDetail("pipe-default-testuser_test2"); + LayerDetail layer2_0 = layerDao.findLayerDetail(job2, "layer0"); + jobManager.setJobPaused(job2, false); + + DispatchHost host = getHost(HOSTNAME); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + + assertTrue(host.idleGpus == 5 || host.idleGpus == 2); + assertEquals(CueUtil.GB16 * 8 - CueUtil.GB, host.idleGpuMemory); + + for (VirtualProc proc : procs) { + RunningFrameInfo info = + RunningFrameInfo.newBuilder().setJobId(proc.getJobId()).setLayerId(proc.getLayerId()) + .setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()).build(); + FrameCompleteReport report = + FrameCompleteReport.newBuilder().setFrame(info).setExitStatus(0).build(); + frameCompleteHandler.handleFrameCompleteReport(report); } - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder() - .setName(HOSTNAME) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem((int) CueUtil.GB8) - .setFreeSwap(20760) - .setLoad(0) - .setTotalMcp(CueUtil.GB4) - .setTotalMem(CueUtil.GB8) - .setTotalSwap(CueUtil.GB2) - .setNimbyEnabled(false) - .setNumProcs(40) - .setCoresPerProc(100) - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("SP_OS", "Linux") - .setNumGpus(8) - .setFreeGpuMem(CueUtil.GB16 * 8) - .setTotalGpuMem(CueUtil.GB16 * 8) - .build(); - - hostManager.createHost(host, - adminManager.findAllocationDetail("spi", "general")); - - RenderHost host2 = RenderHost.newBuilder() - .setName(HOSTNAME2) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem((int) CueUtil.GB4) - .setFreeSwap((int) CueUtil.GB4) - .setLoad(0) - .setTotalMcp(CueUtil.GB4) - .setTotalMem((int) CueUtil.GB8) - .setTotalSwap((int) CueUtil.GB8) - .setNimbyEnabled(false) - .setNumProcs(8) - .setCoresPerProc(100) - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("SP_OS", "Linux") - .build(); - - hostManager.createHost(host2, - adminManager.findAllocationDetail("spi", "general")); + assertEquals(1, (jobManager.isLayerComplete(layer1_0) ? 1 : 0) + + (jobManager.isLayerComplete(layer2_0) ? 1 : 0)); + assertEquals(1, + (jobManager.isJobComplete(job1) ? 1 : 0) + (jobManager.isJobComplete(job2) ? 1 : 0)); + } + + private void executeDepend(FrameState frameState, int exitStatus, int dependCount, + FrameState dependState) { + JobDetail job = jobManager.findJobDetail("pipe-default-testuser_test_depend"); + LayerDetail layerFirst = layerDao.findLayerDetail(job, "layer_first"); + LayerDetail layerSecond = layerDao.findLayerDetail(job, "layer_second"); + FrameDetail frameFirst = frameDao.findFrameDetail(job, "0000-layer_first"); + FrameDetail frameSecond = frameDao.findFrameDetail(job, "0000-layer_second"); + + assertEquals(1, frameSecond.dependCount); + assertEquals(FrameState.DEPEND, frameSecond.state); + + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(HOSTNAME); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + assertEquals(job.getId(), proc.getJobId()); + assertEquals(layerFirst.getId(), proc.getLayerId()); + assertEquals(frameFirst.getId(), proc.getFrameId()); + + RunningFrameInfo info = + RunningFrameInfo.newBuilder().setJobId(proc.getJobId()).setLayerId(proc.getLayerId()) + .setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()).build(); + FrameCompleteReport report = + FrameCompleteReport.newBuilder().setFrame(info).setExitStatus(exitStatus).build(); + + DispatchJob dispatchJob = jobManager.getDispatchJob(proc.getJobId()); + DispatchFrame dispatchFrame = jobManager.getDispatchFrame(report.getFrame().getFrameId()); + FrameDetail frameDetail = jobManager.getFrameDetail(report.getFrame().getFrameId()); + dispatchSupport.stopFrame(dispatchFrame, frameState, report.getExitStatus(), + report.getFrame().getMaxRss()); + frameCompleteHandler.handlePostFrameCompleteOperations(proc, report, dispatchJob, dispatchFrame, + frameState, frameDetail); + + assertTrue(jobManager.isLayerComplete(layerFirst)); + assertFalse(jobManager.isLayerComplete(layerSecond)); + + frameSecond = frameDao.findFrameDetail(job, "0000-layer_second"); + assertEquals(dependCount, frameSecond.dependCount); + assertEquals(dependState, frameSecond.state); + } + + @Test + @Transactional + @Rollback(true) + public void testDependOnSuccess() { + assertTrue(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); + executeDepend(FrameState.SUCCEEDED, 0, 0, FrameState.WAITING); + } + + @Test + @Transactional + @Rollback(true) + public void testDependOnFailure() { + assertTrue(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); + executeDepend(FrameState.EATEN, -1, 1, FrameState.DEPEND); + } + + @Test + @Transactional + @Rollback(true) + public void testDependOnSuccessSatifyOnAny() { + frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(false); + assertFalse(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); + executeDepend(FrameState.SUCCEEDED, 0, 0, FrameState.WAITING); + frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(true); + } + + @Test + @Transactional + @Rollback(true) + public void testDependOnFailureSatisfyOnAny() { + frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(false); + assertFalse(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); + executeDepend(FrameState.EATEN, -1, 0, FrameState.WAITING); + frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(true); + } + + private void executeMinMemIncrease(int expected, boolean override) { + if (override) { + ServiceOverrideEntity soe = new ServiceOverrideEntity(); + soe.showId = "00000000-0000-0000-0000-000000000000"; + soe.name = "apitest"; + soe.threadable = false; + soe.minCores = 10; + soe.minMemory = (int) CueUtil.GB2; + soe.tags = new LinkedHashSet<>(); + soe.tags.add("general"); + soe.minMemoryIncrease = (int) CueUtil.GB8; + + serviceManager.createService(soe); } - public DispatchHost getHost(String hostname) { - return hostManager.findDispatchHost(hostname); + String jobName = "pipe-default-testuser_min_mem_test"; + JobDetail job = jobManager.findJobDetail(jobName); + LayerDetail layer = layerDao.findLayerDetail(job, "test_layer"); + FrameDetail frame = frameDao.findFrameDetail(job, "0000-test_layer"); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(HOSTNAME2); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + assertEquals(job.getId(), proc.getJobId()); + assertEquals(layer.getId(), proc.getLayerId()); + assertEquals(frame.getId(), proc.getFrameId()); + + RunningFrameInfo info = + RunningFrameInfo.newBuilder().setJobId(proc.getJobId()).setLayerId(proc.getLayerId()) + .setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()).build(); + FrameCompleteReport report = FrameCompleteReport.newBuilder().setFrame(info) + .setExitStatus(Dispatcher.EXIT_STATUS_MEMORY_FAILURE).build(); + + DispatchJob dispatchJob = jobManager.getDispatchJob(proc.getJobId()); + DispatchFrame dispatchFrame = jobManager.getDispatchFrame(report.getFrame().getFrameId()); + FrameDetail frameDetail = jobManager.getFrameDetail(report.getFrame().getFrameId()); + dispatchSupport.stopFrame(dispatchFrame, FrameState.DEAD, report.getExitStatus(), + report.getFrame().getMaxRss()); + frameCompleteHandler.handlePostFrameCompleteOperations(proc, report, dispatchJob, dispatchFrame, + FrameState.WAITING, frameDetail); + + assertFalse(jobManager.isLayerComplete(layer)); + + JobDetail ujob = jobManager.findJobDetail(jobName); + LayerDetail ulayer = layerDao.findLayerDetail(ujob, "test_layer"); + assertEquals(expected, ulayer.getMinimumMemory()); + } + + private void executeMinMemIncreaseDocker(int expected, boolean override) { + if (override) { + ServiceOverrideEntity soe = new ServiceOverrideEntity(); + soe.showId = "00000000-0000-0000-0000-000000000000"; + soe.name = "apitest"; + soe.threadable = false; + soe.minCores = 10; + soe.minMemory = (int) CueUtil.GB2; + soe.tags = new LinkedHashSet<>(); + soe.tags.add("general"); + soe.minMemoryIncrease = (int) CueUtil.GB8; + + serviceManager.createService(soe); } - @Test - @Transactional - @Rollback(true) - public void testGpuReport() { - JobDetail job = jobManager.findJobDetail("pipe-default-testuser_test0"); - LayerDetail layer = layerDao.findLayerDetail(job, "layer0"); - jobManager.setJobPaused(job, false); - - DispatchHost host = getHost(HOSTNAME); - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); - - assertEquals(7, host.idleGpus); - assertEquals(CueUtil.GB16 * 8 - CueUtil.GB, host.idleGpuMemory); - - RunningFrameInfo info = RunningFrameInfo.newBuilder() - .setJobId(proc.getJobId()) - .setLayerId(proc.getLayerId()) - .setFrameId(proc.getFrameId()) - .setResourceId(proc.getProcId()) - .build(); - FrameCompleteReport report = FrameCompleteReport.newBuilder() - .setFrame(info) - .setExitStatus(0) - .build(); - frameCompleteHandler.handleFrameCompleteReport(report); - - assertTrue(jobManager.isLayerComplete(layer)); - assertTrue(jobManager.isJobComplete(job)); - } - - @Test - @Transactional - @Rollback(true) - public void testGpuReportMultiple() { - JobDetail job0 = jobManager.findJobDetail("pipe-default-testuser_test0"); - LayerDetail layer0_0 = layerDao.findLayerDetail(job0, "layer0"); - jobManager.setJobPaused(job0, false); - - JobDetail job1 = jobManager.findJobDetail("pipe-default-testuser_test1"); - LayerDetail layer1_0 = layerDao.findLayerDetail(job1, "layer0"); - jobManager.setJobPaused(job1, false); - - DispatchHost host = getHost(HOSTNAME); - List procs = dispatcher.dispatchHost(host); - assertEquals(2, procs.size()); - - assertEquals(4, host.idleGpus); - assertEquals(CueUtil.GB16 * 8 - CueUtil.GB2, host.idleGpuMemory); - - for (VirtualProc proc : procs) { - RunningFrameInfo info = RunningFrameInfo.newBuilder() - .setJobId(proc.getJobId()) - .setLayerId(proc.getLayerId()) - .setFrameId(proc.getFrameId()) - .setResourceId(proc.getProcId()) - .build(); - FrameCompleteReport report = FrameCompleteReport.newBuilder() - .setFrame(info) - .setExitStatus(0) - .build(); - frameCompleteHandler.handleFrameCompleteReport(report); - } - - assertTrue(jobManager.isLayerComplete(layer0_0)); - assertTrue(jobManager.isJobComplete(job0)); - assertTrue(jobManager.isLayerComplete(layer1_0)); - assertTrue(jobManager.isJobComplete(job1)); - } - - @Test - @Transactional - @Rollback(true) - public void testGpuReportOver() { - JobDetail job1 = jobManager.findJobDetail("pipe-default-testuser_test1"); - LayerDetail layer1_0 = layerDao.findLayerDetail(job1, "layer0"); - jobManager.setJobPaused(job1, false); - - JobDetail job2 = jobManager.findJobDetail("pipe-default-testuser_test2"); - LayerDetail layer2_0 = layerDao.findLayerDetail(job2, "layer0"); - jobManager.setJobPaused(job2, false); - - DispatchHost host = getHost(HOSTNAME); - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - - assertTrue(host.idleGpus == 5 || host.idleGpus == 2); - assertEquals(CueUtil.GB16 * 8 - CueUtil.GB, host.idleGpuMemory); - - for (VirtualProc proc : procs) { - RunningFrameInfo info = RunningFrameInfo.newBuilder() - .setJobId(proc.getJobId()) - .setLayerId(proc.getLayerId()) - .setFrameId(proc.getFrameId()) - .setResourceId(proc.getProcId()) - .build(); - FrameCompleteReport report = FrameCompleteReport.newBuilder() - .setFrame(info) - .setExitStatus(0) - .build(); - frameCompleteHandler.handleFrameCompleteReport(report); - } - - assertEquals(1, - (jobManager.isLayerComplete(layer1_0) ? 1 : 0) + - (jobManager.isLayerComplete(layer2_0) ? 1 : 0)); - assertEquals(1, - (jobManager.isJobComplete(job1) ? 1 : 0) + - (jobManager.isJobComplete(job2) ? 1 : 0)); - } - - private void executeDepend( - FrameState frameState, int exitStatus, int dependCount, FrameState dependState) { - JobDetail job = jobManager.findJobDetail("pipe-default-testuser_test_depend"); - LayerDetail layerFirst = layerDao.findLayerDetail(job, "layer_first"); - LayerDetail layerSecond = layerDao.findLayerDetail(job, "layer_second"); - FrameDetail frameFirst = frameDao.findFrameDetail(job, "0000-layer_first"); - FrameDetail frameSecond = frameDao.findFrameDetail(job, "0000-layer_second"); - - assertEquals(1, frameSecond.dependCount); - assertEquals(FrameState.DEPEND, frameSecond.state); - - jobManager.setJobPaused(job, false); - - DispatchHost host = getHost(HOSTNAME); - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); - assertEquals(job.getId(), proc.getJobId()); - assertEquals(layerFirst.getId(), proc.getLayerId()); - assertEquals(frameFirst.getId(), proc.getFrameId()); - - RunningFrameInfo info = RunningFrameInfo.newBuilder() - .setJobId(proc.getJobId()) - .setLayerId(proc.getLayerId()) - .setFrameId(proc.getFrameId()) - .setResourceId(proc.getProcId()) - .build(); - FrameCompleteReport report = FrameCompleteReport.newBuilder() - .setFrame(info) - .setExitStatus(exitStatus) - .build(); - - DispatchJob dispatchJob = jobManager.getDispatchJob(proc.getJobId()); - DispatchFrame dispatchFrame = jobManager.getDispatchFrame(report.getFrame().getFrameId()); - FrameDetail frameDetail = jobManager.getFrameDetail(report.getFrame().getFrameId()); - dispatchSupport.stopFrame(dispatchFrame, frameState, report.getExitStatus(), - report.getFrame().getMaxRss()); - frameCompleteHandler.handlePostFrameCompleteOperations(proc, - report, dispatchJob, dispatchFrame, frameState, frameDetail); - - assertTrue(jobManager.isLayerComplete(layerFirst)); - assertFalse(jobManager.isLayerComplete(layerSecond)); - - frameSecond = frameDao.findFrameDetail(job, "0000-layer_second"); - assertEquals(dependCount, frameSecond.dependCount); - assertEquals(dependState, frameSecond.state); - } - - @Test - @Transactional - @Rollback(true) - public void testDependOnSuccess() { - assertTrue(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); - executeDepend(FrameState.SUCCEEDED, 0, 0, FrameState.WAITING); - } - - @Test - @Transactional - @Rollback(true) - public void testDependOnFailure() { - assertTrue(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); - executeDepend(FrameState.EATEN, -1, 1, FrameState.DEPEND); - } - - @Test - @Transactional - @Rollback(true) - public void testDependOnSuccessSatifyOnAny() { - frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(false); - assertFalse(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); - executeDepend(FrameState.SUCCEEDED, 0, 0, FrameState.WAITING); - frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(true); - } - - @Test - @Transactional - @Rollback(true) - public void testDependOnFailureSatisfyOnAny() { - frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(false); - assertFalse(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); - executeDepend(FrameState.EATEN, -1, 0, FrameState.WAITING); - frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(true); - } - - private void executeMinMemIncrease(int expected, boolean override) { - if (override) { - ServiceOverrideEntity soe = new ServiceOverrideEntity(); - soe.showId = "00000000-0000-0000-0000-000000000000"; - soe.name = "apitest"; - soe.threadable = false; - soe.minCores = 10; - soe.minMemory = (int) CueUtil.GB2; - soe.tags = new LinkedHashSet<>(); - soe.tags.add("general"); - soe.minMemoryIncrease = (int) CueUtil.GB8; - - serviceManager.createService(soe); - } - - String jobName = "pipe-default-testuser_min_mem_test"; - JobDetail job = jobManager.findJobDetail(jobName); - LayerDetail layer = layerDao.findLayerDetail(job, "test_layer"); - FrameDetail frame = frameDao.findFrameDetail(job, "0000-test_layer"); - jobManager.setJobPaused(job, false); - - DispatchHost host = getHost(HOSTNAME2); - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); - assertEquals(job.getId(), proc.getJobId()); - assertEquals(layer.getId(), proc.getLayerId()); - assertEquals(frame.getId(), proc.getFrameId()); - - RunningFrameInfo info = RunningFrameInfo.newBuilder() - .setJobId(proc.getJobId()) - .setLayerId(proc.getLayerId()) - .setFrameId(proc.getFrameId()) - .setResourceId(proc.getProcId()) - .build(); - FrameCompleteReport report = FrameCompleteReport.newBuilder() - .setFrame(info) - .setExitStatus(Dispatcher.EXIT_STATUS_MEMORY_FAILURE) - .build(); - - DispatchJob dispatchJob = jobManager.getDispatchJob(proc.getJobId()); - DispatchFrame dispatchFrame = jobManager.getDispatchFrame(report.getFrame().getFrameId()); - FrameDetail frameDetail = jobManager.getFrameDetail(report.getFrame().getFrameId()); - dispatchSupport.stopFrame(dispatchFrame, FrameState.DEAD, report.getExitStatus(), - report.getFrame().getMaxRss()); - frameCompleteHandler.handlePostFrameCompleteOperations(proc, - report, dispatchJob, dispatchFrame, FrameState.WAITING, frameDetail); - - assertFalse(jobManager.isLayerComplete(layer)); - - JobDetail ujob = jobManager.findJobDetail(jobName); - LayerDetail ulayer = layerDao.findLayerDetail(ujob, "test_layer"); - assertEquals(expected, ulayer.getMinimumMemory()); - } - - - - private void executeMinMemIncreaseDocker(int expected, boolean override) { - if (override) { - ServiceOverrideEntity soe = new ServiceOverrideEntity(); - soe.showId = "00000000-0000-0000-0000-000000000000"; - soe.name = "apitest"; - soe.threadable = false; - soe.minCores = 10; - soe.minMemory = (int) CueUtil.GB2; - soe.tags = new LinkedHashSet<>(); - soe.tags.add("general"); - soe.minMemoryIncrease = (int) CueUtil.GB8; - - serviceManager.createService(soe); - } - - String jobName = "pipe-default-testuser_min_mem_test"; - JobDetail job = jobManager.findJobDetail(jobName); - LayerDetail layer = layerDao.findLayerDetail(job, "test_layer"); - FrameDetail frame = frameDao.findFrameDetail(job, "0000-test_layer"); - jobManager.setJobPaused(job, false); - - DispatchHost host = getHost(HOSTNAME2); - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); - assertEquals(job.getId(), proc.getJobId()); - assertEquals(layer.getId(), proc.getLayerId()); - assertEquals(frame.getId(), proc.getFrameId()); - - RunningFrameInfo info = RunningFrameInfo.newBuilder() - .setJobId(proc.getJobId()) - .setLayerId(proc.getLayerId()) - .setFrameId(proc.getFrameId()) - .setResourceId(proc.getProcId()) - .build(); - FrameCompleteReport report = FrameCompleteReport.newBuilder() - .setFrame(info) - .setExitStatus(Dispatcher.DOCKER_EXIT_STATUS_MEMORY_FAILURE) - .build(); - - DispatchJob dispatchJob = jobManager.getDispatchJob(proc.getJobId()); - DispatchFrame dispatchFrame = jobManager.getDispatchFrame(report.getFrame().getFrameId()); - FrameDetail frameDetail = jobManager.getFrameDetail(report.getFrame().getFrameId()); - dispatchSupport.stopFrame(dispatchFrame, FrameState.DEAD, report.getExitStatus(), - report.getFrame().getMaxRss()); - frameCompleteHandler.handlePostFrameCompleteOperations(proc, - report, dispatchJob, dispatchFrame, FrameState.WAITING, frameDetail); - - assertFalse(jobManager.isLayerComplete(layer)); - - JobDetail ujob = jobManager.findJobDetail(jobName); - LayerDetail ulayer = layerDao.findLayerDetail(ujob, "test_layer"); - assertEquals(expected, ulayer.getMinimumMemory()); - } - - @Test - @Transactional - @Rollback(true) - public void testMinMemIncrease() { - executeMinMemIncrease(6291456, false); - } - - @Test - @Transactional - @Rollback(true) - public void testMinMemIncreaseShowOverride() { - executeMinMemIncrease(10485760, true); - } + String jobName = "pipe-default-testuser_min_mem_test"; + JobDetail job = jobManager.findJobDetail(jobName); + LayerDetail layer = layerDao.findLayerDetail(job, "test_layer"); + FrameDetail frame = frameDao.findFrameDetail(job, "0000-test_layer"); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(HOSTNAME2); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + assertEquals(job.getId(), proc.getJobId()); + assertEquals(layer.getId(), proc.getLayerId()); + assertEquals(frame.getId(), proc.getFrameId()); + + RunningFrameInfo info = + RunningFrameInfo.newBuilder().setJobId(proc.getJobId()).setLayerId(proc.getLayerId()) + .setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()).build(); + FrameCompleteReport report = FrameCompleteReport.newBuilder().setFrame(info) + .setExitStatus(Dispatcher.DOCKER_EXIT_STATUS_MEMORY_FAILURE).build(); + + DispatchJob dispatchJob = jobManager.getDispatchJob(proc.getJobId()); + DispatchFrame dispatchFrame = jobManager.getDispatchFrame(report.getFrame().getFrameId()); + FrameDetail frameDetail = jobManager.getFrameDetail(report.getFrame().getFrameId()); + dispatchSupport.stopFrame(dispatchFrame, FrameState.DEAD, report.getExitStatus(), + report.getFrame().getMaxRss()); + frameCompleteHandler.handlePostFrameCompleteOperations(proc, report, dispatchJob, dispatchFrame, + FrameState.WAITING, frameDetail); + + assertFalse(jobManager.isLayerComplete(layer)); + + JobDetail ujob = jobManager.findJobDetail(jobName); + LayerDetail ulayer = layerDao.findLayerDetail(ujob, "test_layer"); + assertEquals(expected, ulayer.getMinimumMemory()); + } + + @Test + @Transactional + @Rollback(true) + public void testMinMemIncrease() { + executeMinMemIncrease(6291456, false); + } + + @Test + @Transactional + @Rollback(true) + public void testMinMemIncreaseShowOverride() { + executeMinMemIncrease(10485760, true); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HistoryControlTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HistoryControlTests.java index de67ff26a..9eabfb1fb 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HistoryControlTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HistoryControlTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dispatcher; import java.io.File; @@ -55,145 +51,119 @@ @ContextConfiguration public class HistoryControlTests extends TransactionalTest { - @Resource - AdminManager adminManager; - - @Resource - FrameCompleteHandler frameCompleteHandler; - - @Resource - HostManager hostManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - JobManager jobManager; - - @Resource - LayerDao layerDao; - - @Resource - Dispatcher dispatcher; - - private static final String HOSTNAME = "beta"; - private static final String DELETE_HISTORY = - "DELETE FROM frame_history; " + - "DELETE FROM job_history; "; - private static final String DISABLE_HISTORY = - "INSERT INTO " + - "config (pk_config,str_key) " + - "VALUES " + - "(uuid_generate_v1(),'DISABLE_HISTORY');"; - - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } - - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch( - new File("src/test/resources/conf/jobspec/jobspec_gpus_test.xml")); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder() - .setName(HOSTNAME) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem((int) CueUtil.GB8) - .setFreeSwap(20760) - .setLoad(0) - .setTotalMcp(CueUtil.GB4) - .setTotalMem(CueUtil.GB8) - .setTotalSwap(CueUtil.GB2) - .setNimbyEnabled(false) - .setNumProcs(40) - .setCoresPerProc(100) - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("SP_OS", "Linux") - .setNumGpus(8) - .setFreeGpuMem(CueUtil.GB16 * 8) - .setTotalGpuMem(CueUtil.GB16 * 8) - .build(); - - hostManager.createHost(host, - adminManager.findAllocationDetail("spi", "general")); - } - - public DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } - - public void launchAndDeleteJob() { - launchJob(); - - JobDetail job = jobManager.findJobDetail("pipe-default-testuser_test0"); - LayerDetail layer = layerDao.findLayerDetail(job, "layer0"); - jobManager.setJobPaused(job, false); - - DispatchHost host = getHost(); - List procs = dispatcher.dispatchHost(host); - VirtualProc proc = procs.get(0); - - RunningFrameInfo info = RunningFrameInfo.newBuilder() - .setJobId(proc.getJobId()) - .setLayerId(proc.getLayerId()) - .setFrameId(proc.getFrameId()) - .setResourceId(proc.getProcId()) - .build(); - FrameCompleteReport report = FrameCompleteReport.newBuilder() - .setFrame(info) - .setExitStatus(0) - .build(); - frameCompleteHandler.handleFrameCompleteReport(report); - - assertTrue(jobManager.isLayerComplete(layer)); - assertTrue(jobManager.isJobComplete(job)); - - jdbcTemplate.update("DELETE FROM job WHERE pk_job=?", job.getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testEnabled() { - jdbcTemplate.update(DELETE_HISTORY); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM job_history", Integer.class)); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM frame_history", Integer.class)); - - launchAndDeleteJob(); - - assertEquals(Integer.valueOf(5), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM job_history", Integer.class)); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM frame_history", Integer.class)); - } - - @Test - @Transactional - @Rollback(true) - public void testDisabled() { - jdbcTemplate.update(DELETE_HISTORY); - jdbcTemplate.update(DISABLE_HISTORY); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM job_history", Integer.class)); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM frame_history", Integer.class)); - - launchAndDeleteJob(); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM job_history", Integer.class)); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM frame_history", Integer.class)); - } + @Resource + AdminManager adminManager; + + @Resource + FrameCompleteHandler frameCompleteHandler; + + @Resource + HostManager hostManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + JobManager jobManager; + + @Resource + LayerDao layerDao; + + @Resource + Dispatcher dispatcher; + + private static final String HOSTNAME = "beta"; + private static final String DELETE_HISTORY = + "DELETE FROM frame_history; " + "DELETE FROM job_history; "; + private static final String DISABLE_HISTORY = "INSERT INTO " + "config (pk_config,str_key) " + + "VALUES " + "(uuid_generate_v1(),'DISABLE_HISTORY');"; + + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } + + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_gpus_test.xml")); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem((int) CueUtil.GB8).setFreeSwap(20760).setLoad(0) + .setTotalMcp(CueUtil.GB4).setTotalMem(CueUtil.GB8).setTotalSwap(CueUtil.GB2) + .setNimbyEnabled(false).setNumProcs(40).setCoresPerProc(100).setState(HardwareState.UP) + .setFacility("spi").putAttributes("SP_OS", "Linux").setNumGpus(8) + .setFreeGpuMem(CueUtil.GB16 * 8).setTotalGpuMem(CueUtil.GB16 * 8).build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } + + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } + + public void launchAndDeleteJob() { + launchJob(); + + JobDetail job = jobManager.findJobDetail("pipe-default-testuser_test0"); + LayerDetail layer = layerDao.findLayerDetail(job, "layer0"); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(); + List procs = dispatcher.dispatchHost(host); + VirtualProc proc = procs.get(0); + + RunningFrameInfo info = + RunningFrameInfo.newBuilder().setJobId(proc.getJobId()).setLayerId(proc.getLayerId()) + .setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()).build(); + FrameCompleteReport report = + FrameCompleteReport.newBuilder().setFrame(info).setExitStatus(0).build(); + frameCompleteHandler.handleFrameCompleteReport(report); + + assertTrue(jobManager.isLayerComplete(layer)); + assertTrue(jobManager.isJobComplete(job)); + + jdbcTemplate.update("DELETE FROM job WHERE pk_job=?", job.getId()); + } + + @Test + @Transactional + @Rollback(true) + public void testEnabled() { + jdbcTemplate.update(DELETE_HISTORY); + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject("SELECT COUNT(*) FROM job_history", Integer.class)); + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject("SELECT COUNT(*) FROM frame_history", Integer.class)); + + launchAndDeleteJob(); + + assertEquals(Integer.valueOf(5), + jdbcTemplate.queryForObject("SELECT COUNT(*) FROM job_history", Integer.class)); + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject("SELECT COUNT(*) FROM frame_history", Integer.class)); + } + + @Test + @Transactional + @Rollback(true) + public void testDisabled() { + jdbcTemplate.update(DELETE_HISTORY); + jdbcTemplate.update(DISABLE_HISTORY); + + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject("SELECT COUNT(*) FROM job_history", Integer.class)); + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject("SELECT COUNT(*) FROM frame_history", Integer.class)); + + launchAndDeleteJob(); + + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject("SELECT COUNT(*) FROM job_history", Integer.class)); + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject("SELECT COUNT(*) FROM frame_history", Integer.class)); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerGpuTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerGpuTests.java index 120c620a1..f0c1ffe64 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerGpuTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerGpuTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dispatcher; import javax.annotation.Resource; @@ -45,81 +41,58 @@ @ContextConfiguration public class HostReportHandlerGpuTests extends TransactionalTest { - @Resource - AdminManager adminManager; - - @Resource - HostManager hostManager; - - @Resource - HostReportHandler hostReportHandler; - - @Resource - Dispatcher dispatcher; - - private static final String HOSTNAME = "beta"; - - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } - - private static CoreDetail getCoreDetail(int total, int idle, int booked, int locked) { - return CoreDetail.newBuilder() - .setTotalCores(total) - .setIdleCores(idle) - .setBookedCores(booked) - .setLockedCores(locked) - .build(); - } - - private DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } - - private static RenderHost getRenderHost() { - return RenderHost.newBuilder() - .setName(HOSTNAME) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(CueUtil.GB8) - .setFreeSwap(CueUtil.GB2) - .setLoad(0) - .setTotalMcp(CueUtil.GB4) - .setTotalMem(CueUtil.GB8) - .setTotalSwap(CueUtil.GB2) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(100) - .addTags("test") - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("SP_OS", "Linux") - .setNumGpus(64) - .setFreeGpuMem(1048576L * 2000) - .setTotalGpuMem(1048576L * 2048) - .build(); - } - - @Test - @Transactional - @Rollback(true) - public void testHandleHostReport() { - CoreDetail cores = getCoreDetail(200, 200, 0, 0); - HostReport report = HostReport.newBuilder() - .setHost(getRenderHost()) - .setCoreInfo(cores) - .build(); - - hostReportHandler.handleHostReport(report, true); - DispatchHost host = getHost(); - assertEquals(host.lockState, LockState.OPEN); - assertEquals(host.memory, CueUtil.GB8 - 524288); - assertEquals(host.gpus, 64); - assertEquals(host.idleGpus, 64); - assertEquals(host.gpuMemory, 1048576L * 2048); - assertEquals(host.idleGpuMemory, 2147483648L); - } + @Resource + AdminManager adminManager; + + @Resource + HostManager hostManager; + + @Resource + HostReportHandler hostReportHandler; + + @Resource + Dispatcher dispatcher; + + private static final String HOSTNAME = "beta"; + + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } + + private static CoreDetail getCoreDetail(int total, int idle, int booked, int locked) { + return CoreDetail.newBuilder().setTotalCores(total).setIdleCores(idle).setBookedCores(booked) + .setLockedCores(locked).build(); + } + + private DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } + + private static RenderHost getRenderHost() { + return RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(CueUtil.GB8).setFreeSwap(CueUtil.GB2).setLoad(0) + .setTotalMcp(CueUtil.GB4).setTotalMem(CueUtil.GB8).setTotalSwap(CueUtil.GB2) + .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(100).addTags("test") + .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux") + .setNumGpus(64).setFreeGpuMem(1048576L * 2000).setTotalGpuMem(1048576L * 2048).build(); + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReport() { + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + HostReport report = HostReport.newBuilder().setHost(getRenderHost()).setCoreInfo(cores).build(); + + hostReportHandler.handleHostReport(report, true); + DispatchHost host = getHost(); + assertEquals(host.lockState, LockState.OPEN); + assertEquals(host.memory, CueUtil.GB8 - 524288); + assertEquals(host.gpus, 64); + assertEquals(host.idleGpus, 64); + assertEquals(host.gpuMemory, 1048576L * 2048); + assertEquals(host.idleGpuMemory, 2147483648L); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerTests.java index 40c83d68d..b37eca5b8 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dispatcher; import java.io.File; @@ -68,574 +64,460 @@ @ContextConfiguration public class HostReportHandlerTests extends TransactionalTest { - @Resource - AdminManager adminManager; - - @Resource - HostManager hostManager; - - @Resource - HostReportHandler hostReportHandler; - - @Resource - FrameCompleteHandler frameCompleteHandler; - - @Resource - Dispatcher dispatcher; - - @Resource - JobLauncher jobLauncher; - - @Resource - JobManager jobManager; - - @Resource - CommentManager commentManager; - - private static final String HOSTNAME = "beta"; - private static final String NEW_HOSTNAME = "gamma"; - private String hostname; - private String hostname2; - private static final String SUBJECT_COMMENT_FULL_TEMP_DIR = "Host set to REPAIR for not having enough storage " + - "space on the temporary directory (mcp)"; - private static final String CUEBOT_COMMENT_USER = "cuebot"; - - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } - - @Before - public void createHost() { - hostname = UUID.randomUUID().toString().substring(0, 8); - hostname2 = UUID.randomUUID().toString().substring(0, 8); - hostManager.createHost(getRenderHost(hostname), - adminManager.findAllocationDetail("spi","general")); - hostManager.createHost(getRenderHost(hostname2), - adminManager.findAllocationDetail("spi","general")); - } - - private static CoreDetail getCoreDetail(int total, int idle, int booked, int locked) { - return CoreDetail.newBuilder() - .setTotalCores(total) - .setIdleCores(idle) - .setBookedCores(booked) - .setLockedCores(locked) - .build(); - } - - private DispatchHost getHost(String hostname) { - return hostManager.findDispatchHost(hostname); - } - - private static RenderHost.Builder getRenderHostBuilder(String hostname) { - return RenderHost.newBuilder() - .setName(hostname) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(CueUtil.GB8) - .setFreeSwap(CueUtil.GB2) - .setLoad(0) - .setTotalMcp(CueUtil.GB4) - .setTotalMem(CueUtil.GB8) - .setTotalSwap(CueUtil.GB2) - .setNimbyEnabled(false) - .setNumProcs(16) - .setCoresPerProc(100) - .addTags("test") - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("SP_OS", "Linux") - .setNumGpus(0) - .setFreeGpuMem(0) - .setTotalGpuMem(0); - } - - private static RenderHost getRenderHost(String hostname) { - return getRenderHostBuilder(hostname).build(); - } - - private static RenderHost getNewRenderHost(String tags) { - return RenderHost.newBuilder() - .setName(NEW_HOSTNAME) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(CueUtil.GB8) - .setFreeSwap(CueUtil.GB2) - .setLoad(0) - .setTotalMcp(195430) - .setTotalMem(CueUtil.GB8) - .setTotalSwap(CueUtil.GB2) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(100) - .addTags(tags) - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("SP_OS", "Linux") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) - .build(); - } - - @Test - @Transactional - @Rollback(true) - public void testHandleHostReport() throws InterruptedException { - CoreDetail cores = getCoreDetail(200, 200, 0, 0); - HostReport report1 = HostReport.newBuilder() - .setHost(getRenderHost(hostname)) - .setCoreInfo(cores) - .build(); - HostReport report2 = HostReport.newBuilder() - .setHost(getRenderHost(hostname2)) - .setCoreInfo(cores) - .build(); - HostReport report1_2 = HostReport.newBuilder() - .setHost(getRenderHost(hostname)) - .setCoreInfo(getCoreDetail(200, 200, 100, 0)) - .build(); - - hostReportHandler.handleHostReport(report1, false); - DispatchHost host = getHost(hostname); - assertEquals(LockState.OPEN, host.lockState); - assertEquals(HardwareState.UP, host.hardwareState); - hostReportHandler.handleHostReport(report1_2, false); - host = getHost(hostname); - assertEquals(HardwareState.UP, host.hardwareState); - - // Test Queue thread handling - ThreadPoolExecutor queue = hostReportHandler.getReportQueue(); - // Make sure jobs flow normally without any nullpointer exception - // Expecting results from a ThreadPool based class on JUnit is tricky - // A future test will be developed in the future to better address the behavior of - // this feature - hostReportHandler.queueHostReport(report1); // HOSTNAME - hostReportHandler.queueHostReport(report2); // HOSTNAME2 - hostReportHandler.queueHostReport(report1); // HOSTNAME - hostReportHandler.queueHostReport(report1); // HOSTNAME - hostReportHandler.queueHostReport(report1_2); // HOSTNAME - } - - @Test - @Transactional - @Rollback(true) - public void testHandleHostReportWithNewAllocation() { - FacilityInterface facility = adminManager.getFacility( - "AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"); - assertEquals(facility.getName(), "spi"); - - AllocationEntity detail = new AllocationEntity(); - detail.name = "test"; - detail.tag = "test"; - adminManager.createAllocation(facility, detail); - detail = adminManager.findAllocationDetail("spi", "test"); - - boolean isBoot = true; - CoreDetail cores = getCoreDetail(200, 200, 0, 0); - HostReport report = HostReport.newBuilder() - .setHost(getNewRenderHost("test")) - .setCoreInfo(cores) - .build(); - - hostReportHandler.handleHostReport(report, isBoot); - DispatchHost host = hostManager.findDispatchHost(NEW_HOSTNAME); - assertEquals(host.getAllocationId(), detail.id); - } - - @Test - @Transactional - @Rollback(true) - public void testHandleHostReportWithExistentAllocation() { - AllocationEntity alloc = adminManager.getAllocationDetail( - "00000000-0000-0000-0000-000000000006"); - assertEquals(alloc.getName(), "spi.general"); - - boolean isBoot = true; - CoreDetail cores = getCoreDetail(200, 200, 0, 0); - HostReport report = HostReport.newBuilder() - .setHost(getNewRenderHost("general")) - .setCoreInfo(cores) - .build(); - - hostReportHandler.handleHostReport(report, isBoot); - DispatchHost host = hostManager.findDispatchHost(NEW_HOSTNAME); - assertEquals(host.getAllocationId(), alloc.id); - } - - @Test - @Transactional - @Rollback(true) - public void testHandleHostReportWithNonExistentTags() { - AllocationEntity alloc = adminManager.getAllocationDetail( - "00000000-0000-0000-0000-000000000002"); - assertEquals(alloc.getName(), "lax.unassigned"); - - boolean isBoot = true; - CoreDetail cores = getCoreDetail(200, 200, 0, 0); - HostReport report = HostReport.newBuilder() - .setHost(getNewRenderHost("nonexistent")) - .setCoreInfo(cores) - .build(); - - hostReportHandler.handleHostReport(report, isBoot); - DispatchHost host = hostManager.findDispatchHost(NEW_HOSTNAME); - assertEquals(host.getAllocationId(), alloc.id); - } - - @Test - @Transactional - @Rollback(true) - public void testHandleHostReportWithFullTemporaryDirectories() { - // Create CoreDetail - CoreDetail cores = getCoreDetail(200, 200, 0, 0); - - /* - * Test 1: - * Precondition: - * - HardwareState=UP - * Action: - * - Receives a HostReport with less freeTempDir than the threshold - * (opencue.properties: min_available_temp_storage_percentage) - * Postcondition: - * - Host hardwareState changes to REPAIR - * - A comment is created with subject=SUBJECT_COMMENT_FULL_TEMP_DIR and - * user=CUEBOT_COMMENT_USER - * */ - // Create HostReport with totalMcp=4GB and freeMcp=128MB - HostReport report1 = HostReport.newBuilder() - .setHost(getRenderHostBuilder(hostname).setFreeMcp(CueUtil.MB128).build()) - .setCoreInfo(cores) - .build(); - // Call handleHostReport() => Create the comment with subject=SUBJECT_COMMENT_FULL_TEMP_DIR and change the - // host's hardwareState to REPAIR - hostReportHandler.handleHostReport(report1, false); - // Get host - DispatchHost host = getHost(hostname); - // Get list of comments by host, user, and subject - List comments = commentManager.getCommentsByHostUserAndSubject(host, CUEBOT_COMMENT_USER, - SUBJECT_COMMENT_FULL_TEMP_DIR); - // Check if there is 1 comment - assertEquals(comments.size(), 1); - // Get host comment - CommentDetail comment = comments.get(0); - // Check if the comment has the user = CUEBOT_COMMENT_USER - assertEquals(comment.user, CUEBOT_COMMENT_USER); - // Check if the comment has the subject = SUBJECT_COMMENT_FULL_TEMP_DIR - assertEquals(comment.subject, SUBJECT_COMMENT_FULL_TEMP_DIR); - // Check host lock state - assertEquals(LockState.OPEN, host.lockState); - // Check if host hardware state is REPAIR - assertEquals(HardwareState.REPAIR, host.hardwareState); - // Test Queue thread handling - ThreadPoolExecutor queue = hostReportHandler.getReportQueue(); - // Make sure jobs flow normally without any nullpointer exception - hostReportHandler.queueHostReport(report1); // HOSTNAME - hostReportHandler.queueHostReport(report1); // HOSTNAME - - /* - * Test 2: - * Precondition: - * - HardwareState=REPAIR - * - There is a comment for the host with subject=SUBJECT_COMMENT_FULL_TEMP_DIR and - * user=CUEBOT_COMMENT_USER - * Action: - * Receives a HostReport with more freeTempDir than the threshold - * (opencue.properties: min_available_temp_storage_percentage) - * Postcondition: - * - Host hardwareState changes to UP - * - Comment with subject=SUBJECT_COMMENT_FULL_TEMP_DIR and user=CUEBOT_COMMENT_USER gets deleted - * */ - // Set the host freeTempDir to the minimum size required = 1GB (1048576 KB) - HostReport report2 = HostReport.newBuilder() - .setHost(getRenderHostBuilder(hostname).setFreeMcp(CueUtil.GB).build()) - .setCoreInfo(cores) - .build(); - // Call handleHostReport() => Delete the comment with subject=SUBJECT_COMMENT_FULL_TEMP_DIR and change the - // host's hardwareState to UP - hostReportHandler.handleHostReport(report2, false); - // Get host - host = getHost(hostname); - // Get list of comments by host, user, and subject - comments = commentManager.getCommentsByHostUserAndSubject(host, CUEBOT_COMMENT_USER, - SUBJECT_COMMENT_FULL_TEMP_DIR); - // Check if there is no comment associated with the host - assertEquals(comments.size(), 0); - // Check host lock state - assertEquals(LockState.OPEN, host.lockState); - // Check if host hardware state is UP - assertEquals(HardwareState.UP, host.hardwareState); - // Test Queue thread handling - queue = hostReportHandler.getReportQueue(); - // Make sure jobs flow normally without any nullpointer exception - hostReportHandler.queueHostReport(report1); // HOSTNAME - hostReportHandler.queueHostReport(report1); // HOSTNAME + @Resource + AdminManager adminManager; + + @Resource + HostManager hostManager; + + @Resource + HostReportHandler hostReportHandler; + + @Resource + FrameCompleteHandler frameCompleteHandler; + + @Resource + Dispatcher dispatcher; + + @Resource + JobLauncher jobLauncher; + + @Resource + JobManager jobManager; + + @Resource + CommentManager commentManager; + + private static final String HOSTNAME = "beta"; + private static final String NEW_HOSTNAME = "gamma"; + private String hostname; + private String hostname2; + private static final String SUBJECT_COMMENT_FULL_TEMP_DIR = + "Host set to REPAIR for not having enough storage " + + "space on the temporary directory (mcp)"; + private static final String CUEBOT_COMMENT_USER = "cuebot"; + + @Before + public void setTestMode() { + dispatcher.setTestMode(true); } - @Test - @Transactional - @Rollback(true) - public void testHandleHostReportWithHardwareStateRepairNotRelatedToFullTempDir() { - // Create CoreDetail - CoreDetail cores = getCoreDetail(200, 200, 0, 0); - - /* - * Test if host.hardwareState == HardwareState.REPAIR - * (Not related to freeMcp < dispatcher.min_bookable_free_mcp_kb (opencue.properties)) - * - * - There is no comment with subject=SUBJECT_COMMENT_FULL_MCP_DIR and user=CUEBOT_COMMENT_USER associated with - * the host - * The host.hardwareState continue as HardwareState.REPAIR - * */ - // Create HostReport - HostReport report = HostReport.newBuilder() - .setHost(getRenderHostBuilder(hostname).setFreeMcp(CueUtil.GB).build()) - .setCoreInfo(cores) - .build(); - // Get host - DispatchHost host = getHost(hostname); - // Host's HardwareState set to REPAIR - hostManager.setHostState(host, HardwareState.REPAIR); - host.hardwareState = HardwareState.REPAIR; - // Get list of comments by host, user, and subject - List hostComments = commentManager.getCommentsByHostUserAndSubject(host, CUEBOT_COMMENT_USER, - SUBJECT_COMMENT_FULL_TEMP_DIR); - // Check if there is no comment - assertEquals(hostComments.size(), 0); - // There is no comment to delete - boolean commentsDeleted = commentManager.deleteCommentByHostUserAndSubject(host, - CUEBOT_COMMENT_USER, SUBJECT_COMMENT_FULL_TEMP_DIR); - assertFalse(commentsDeleted); - // Call handleHostReport() - hostReportHandler.handleHostReport(report, false); - // Check host lock state - assertEquals(LockState.OPEN, host.lockState); - // Check if host hardware state is REPAIR - assertEquals(HardwareState.REPAIR, host.hardwareState); - // Test Queue thread handling - ThreadPoolExecutor queueThread = hostReportHandler.getReportQueue(); - // Make sure jobs flow normally without any nullpointer exception - hostReportHandler.queueHostReport(report); // HOSTNAME - hostReportHandler.queueHostReport(report); // HOSTNAME - } - - @Test - @Transactional - @Rollback(true) - public void testMemoryAndLlu() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); - - DispatchHost host = getHost(hostname); - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); - - CoreDetail cores = getCoreDetail(200, 200, 0, 0); - long now = System.currentTimeMillis(); - - RunningFrameInfo info = RunningFrameInfo.newBuilder() - .setJobId(proc.getJobId()) - .setLayerId(proc.getLayerId()) - .setFrameId(proc.getFrameId()) - .setResourceId(proc.getProcId()) - .setLluTime(now / 1000) - .setMaxRss(420000) - .build(); - HostReport report = HostReport.newBuilder() - .setHost(getRenderHost(hostname)) - .setCoreInfo(cores) - .addFrames(info) - .build(); - - hostReportHandler.handleHostReport(report, false); - - FrameDetail frame = jobManager.getFrameDetail(proc.getFrameId()); - assertEquals(frame.dateLLU, new Timestamp(now / 1000 * 1000)); - assertEquals(420000, frame.maxRss); - } - - @Test - @Transactional - @Rollback(true) - public void testMemoryAggressionRss() { - jobLauncher.testMode = true; - dispatcher.setTestMode(true); - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); - - DispatchHost host = getHost(hostname); - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); - - // 1.6 = 1 + dispatcher.oom_frame_overboard_allowed_threshold - long memoryOverboard = (long) Math.ceil((double) proc.memoryReserved * 1.6); - - // Test rss overboard - RunningFrameInfo info = RunningFrameInfo.newBuilder() - .setJobId(proc.getJobId()) - .setLayerId(proc.getLayerId()) - .setFrameId(proc.getFrameId()) - .setResourceId(proc.getProcId()) - .setRss(memoryOverboard) - .setMaxRss(memoryOverboard) - .build(); - HostReport report = HostReport.newBuilder() - .setHost(getRenderHost(hostname)) - .setCoreInfo(getCoreDetail(200, 200, 0, 0)) - .addFrames(info) - .build(); - - long killCount = DispatchSupport.killedOffenderProcs.get(); - hostReportHandler.handleHostReport(report, false); - assertEquals(killCount + 1, DispatchSupport.killedOffenderProcs.get()); - } - - @Test - @Transactional - @Rollback(true) - public void testMemoryAggressionMaxRss() { - jobLauncher.testMode = true; - dispatcher.setTestMode(true); - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); - - DispatchHost host = getHost(hostname); - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); - - // 0.6 = dispatcher.oom_frame_overboard_allowed_threshold - long memoryOverboard = (long) Math.ceil((double) proc.memoryReserved * - (1.0 + (2 * 0.6))); - - // Test rss>90% and maxRss overboard - RunningFrameInfo info = RunningFrameInfo.newBuilder() - .setJobId(proc.getJobId()) - .setLayerId(proc.getLayerId()) - .setFrameId(proc.getFrameId()) - .setResourceId(proc.getProcId()) - .setRss((long)Math.ceil(0.95 * proc.memoryReserved)) - .setMaxRss(memoryOverboard) - .build(); - HostReport report = HostReport.newBuilder() - .setHost(getRenderHost(hostname)) - .setCoreInfo(getCoreDetail(200, 200, 0, 0)) - .addFrames(info) - .build(); - - long killCount = DispatchSupport.killedOffenderProcs.get(); - hostReportHandler.handleHostReport(report, false); - assertEquals(killCount + 1, DispatchSupport.killedOffenderProcs.get()); - } - - @Test - @Transactional - @Rollback(true) - public void testMemoryAggressionMemoryWarning() { - jobLauncher.testMode = true; - dispatcher.setTestMode(true); - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_multiple_frames.xml")); - - DispatchHost host = getHost(hostname); - List procs = dispatcher.dispatchHost(host); - assertEquals(3, procs.size()); - VirtualProc proc1 = procs.get(0); - VirtualProc proc2 = procs.get(1); - VirtualProc proc3 = procs.get(2); - - // Ok - RunningFrameInfo info1 = RunningFrameInfo.newBuilder() - .setJobId(proc1.getJobId()) - .setLayerId(proc1.getLayerId()) - .setFrameId(proc1.getFrameId()) - .setResourceId(proc1.getProcId()) - .setUsedSwapMemory(CueUtil.MB512 - CueUtil.MB128) - .setVsize(CueUtil.GB2) - .setRss(CueUtil.GB2) - .setMaxRss(CueUtil.GB2) - .build(); - - // Overboard Rss - RunningFrameInfo info2 = RunningFrameInfo.newBuilder() - .setJobId(proc2.getJobId()) - .setLayerId(proc2.getLayerId()) - .setFrameId(proc2.getFrameId()) - .setResourceId(proc2.getProcId()) - .setUsedSwapMemory(CueUtil.MB512) - .setVsize(CueUtil.GB4) - .setRss(CueUtil.GB4) - .setMaxRss(CueUtil.GB4) - .build(); - - // Overboard Rss - long memoryUsedProc3 = CueUtil.GB8; - RunningFrameInfo info3 = RunningFrameInfo.newBuilder() - .setJobId(proc3.getJobId()) - .setLayerId(proc3.getLayerId()) - .setFrameId(proc3.getFrameId()) - .setResourceId(proc3.getProcId()) - .setUsedSwapMemory(CueUtil.MB512 * 2) - .setVsize(memoryUsedProc3) - .setRss(memoryUsedProc3) - .setMaxRss(memoryUsedProc3) - .build(); - - RenderHost hostAfterUpdate = getRenderHostBuilder(hostname) - .setFreeMem(0) - .setFreeSwap(CueUtil.GB2 - - info1.getUsedSwapMemory() - - info2.getUsedSwapMemory() - - info3.getUsedSwapMemory()) - .build(); - - HostReport report = HostReport.newBuilder() - .setHost(hostAfterUpdate) - .setCoreInfo(getCoreDetail(200, 200, 0, 0)) - .addAllFrames(Arrays.asList(info1, info2, info3)) - .build(); - - // Get layer state before report gets sent - LayerDetail layerBeforeIncrease = jobManager.getLayerDetail(proc3.getLayerId()); - - // In this case, killing 2 frames should be enough to ge the machine to a safe - // state. Total Swap: 2GB, usage before kill: 1944MB, usage after kill: 348 (less than 20%) - long killCount = DispatchSupport.killedOffenderProcs.get(); - hostReportHandler.handleHostReport(report, false); - assertEquals(killCount + 2, DispatchSupport.killedOffenderProcs.get()); - - // Confirm the frame will be set to retry after it's completion has been - // processed - - RunningFrameInfo runningFrame = RunningFrameInfo.newBuilder() - .setFrameId(proc3.getFrameId()) - .setFrameName("frame_name") - .setLayerId(proc3.getLayerId()) - .setRss(memoryUsedProc3) - .setMaxRss(memoryUsedProc3) - .setResourceId(proc3.id) - .build(); - FrameCompleteReport completeReport = FrameCompleteReport.newBuilder() - .setHost(hostAfterUpdate) - .setFrame(runningFrame) - .setExitSignal(9) - .setRunTime(1) - .setExitStatus(1) - .build(); - - frameCompleteHandler.handleFrameCompleteReport(completeReport); - FrameDetail killedFrame = jobManager.getFrameDetail(proc3.getFrameId()); - LayerDetail layer = jobManager.getLayerDetail(proc3.getLayerId()); - assertEquals(FrameState.WAITING, killedFrame.state); - // Memory increases are processed in two different places. - // First: proc.reserved + 2GB - // Second: the maximum reported proc.maxRss - // The higher valuer beween First and Second wins. - // In this case, proc.maxRss - assertEquals(Math.max(memoryUsedProc3, layerBeforeIncrease.getMinimumMemory() + CueUtil.GB2), - layer.getMinimumMemory()); - } -} + @Before + public void createHost() { + hostname = UUID.randomUUID().toString().substring(0, 8); + hostname2 = UUID.randomUUID().toString().substring(0, 8); + hostManager.createHost(getRenderHost(hostname), + adminManager.findAllocationDetail("spi", "general")); + hostManager.createHost(getRenderHost(hostname2), + adminManager.findAllocationDetail("spi", "general")); + } + + private static CoreDetail getCoreDetail(int total, int idle, int booked, int locked) { + return CoreDetail.newBuilder().setTotalCores(total).setIdleCores(idle).setBookedCores(booked) + .setLockedCores(locked).build(); + } + + private DispatchHost getHost(String hostname) { + return hostManager.findDispatchHost(hostname); + } + + private static RenderHost.Builder getRenderHostBuilder(String hostname) { + return RenderHost.newBuilder().setName(hostname).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(CueUtil.GB8).setFreeSwap(CueUtil.GB2).setLoad(0) + .setTotalMcp(CueUtil.GB4).setTotalMem(CueUtil.GB8).setTotalSwap(CueUtil.GB2) + .setNimbyEnabled(false).setNumProcs(16).setCoresPerProc(100).addTags("test") + .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux").setNumGpus(0) + .setFreeGpuMem(0).setTotalGpuMem(0); + } + + private static RenderHost getRenderHost(String hostname) { + return getRenderHostBuilder(hostname).build(); + } + + private static RenderHost getNewRenderHost(String tags) { + return RenderHost.newBuilder().setName(NEW_HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(CueUtil.GB8).setFreeSwap(CueUtil.GB2).setLoad(0) + .setTotalMcp(195430).setTotalMem(CueUtil.GB8).setTotalSwap(CueUtil.GB2) + .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(100).addTags(tags) + .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux") + .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) + .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)).build(); + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReport() throws InterruptedException { + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + HostReport report1 = + HostReport.newBuilder().setHost(getRenderHost(hostname)).setCoreInfo(cores).build(); + HostReport report2 = + HostReport.newBuilder().setHost(getRenderHost(hostname2)).setCoreInfo(cores).build(); + HostReport report1_2 = HostReport.newBuilder().setHost(getRenderHost(hostname)) + .setCoreInfo(getCoreDetail(200, 200, 100, 0)).build(); + + hostReportHandler.handleHostReport(report1, false); + DispatchHost host = getHost(hostname); + assertEquals(LockState.OPEN, host.lockState); + assertEquals(HardwareState.UP, host.hardwareState); + hostReportHandler.handleHostReport(report1_2, false); + host = getHost(hostname); + assertEquals(HardwareState.UP, host.hardwareState); + + // Test Queue thread handling + ThreadPoolExecutor queue = hostReportHandler.getReportQueue(); + // Make sure jobs flow normally without any nullpointer exception + // Expecting results from a ThreadPool based class on JUnit is tricky + // A future test will be developed in the future to better address the behavior + // of + // this feature + hostReportHandler.queueHostReport(report1); // HOSTNAME + hostReportHandler.queueHostReport(report2); // HOSTNAME2 + hostReportHandler.queueHostReport(report1); // HOSTNAME + hostReportHandler.queueHostReport(report1); // HOSTNAME + hostReportHandler.queueHostReport(report1_2); // HOSTNAME + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReportWithNewAllocation() { + FacilityInterface facility = adminManager.getFacility("AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"); + assertEquals(facility.getName(), "spi"); + + AllocationEntity detail = new AllocationEntity(); + detail.name = "test"; + detail.tag = "test"; + adminManager.createAllocation(facility, detail); + detail = adminManager.findAllocationDetail("spi", "test"); + + boolean isBoot = true; + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + HostReport report = + HostReport.newBuilder().setHost(getNewRenderHost("test")).setCoreInfo(cores).build(); + + hostReportHandler.handleHostReport(report, isBoot); + DispatchHost host = hostManager.findDispatchHost(NEW_HOSTNAME); + assertEquals(host.getAllocationId(), detail.id); + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReportWithExistentAllocation() { + AllocationEntity alloc = + adminManager.getAllocationDetail("00000000-0000-0000-0000-000000000006"); + assertEquals(alloc.getName(), "spi.general"); + + boolean isBoot = true; + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + HostReport report = + HostReport.newBuilder().setHost(getNewRenderHost("general")).setCoreInfo(cores).build(); + + hostReportHandler.handleHostReport(report, isBoot); + DispatchHost host = hostManager.findDispatchHost(NEW_HOSTNAME); + assertEquals(host.getAllocationId(), alloc.id); + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReportWithNonExistentTags() { + AllocationEntity alloc = + adminManager.getAllocationDetail("00000000-0000-0000-0000-000000000002"); + assertEquals(alloc.getName(), "lax.unassigned"); + + boolean isBoot = true; + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + HostReport report = + HostReport.newBuilder().setHost(getNewRenderHost("nonexistent")).setCoreInfo(cores).build(); + + hostReportHandler.handleHostReport(report, isBoot); + DispatchHost host = hostManager.findDispatchHost(NEW_HOSTNAME); + assertEquals(host.getAllocationId(), alloc.id); + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReportWithFullTemporaryDirectories() { + // Create CoreDetail + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + + /* + * Test 1: Precondition: - HardwareState=UP Action: - Receives a HostReport with less + * freeTempDir than the threshold (opencue.properties: min_available_temp_storage_percentage) + * Postcondition: - Host hardwareState changes to REPAIR - A comment is created with + * subject=SUBJECT_COMMENT_FULL_TEMP_DIR and user=CUEBOT_COMMENT_USER + */ + // Create HostReport with totalMcp=4GB and freeMcp=128MB + HostReport report1 = HostReport.newBuilder() + .setHost(getRenderHostBuilder(hostname).setFreeMcp(CueUtil.MB128).build()) + .setCoreInfo(cores).build(); + // Call handleHostReport() => Create the comment with + // subject=SUBJECT_COMMENT_FULL_TEMP_DIR and change the + // host's hardwareState to REPAIR + hostReportHandler.handleHostReport(report1, false); + // Get host + DispatchHost host = getHost(hostname); + // Get list of comments by host, user, and subject + List comments = commentManager.getCommentsByHostUserAndSubject(host, + CUEBOT_COMMENT_USER, SUBJECT_COMMENT_FULL_TEMP_DIR); + // Check if there is 1 comment + assertEquals(comments.size(), 1); + // Get host comment + CommentDetail comment = comments.get(0); + // Check if the comment has the user = CUEBOT_COMMENT_USER + assertEquals(comment.user, CUEBOT_COMMENT_USER); + // Check if the comment has the subject = SUBJECT_COMMENT_FULL_TEMP_DIR + assertEquals(comment.subject, SUBJECT_COMMENT_FULL_TEMP_DIR); + // Check host lock state + assertEquals(LockState.OPEN, host.lockState); + // Check if host hardware state is REPAIR + assertEquals(HardwareState.REPAIR, host.hardwareState); + // Test Queue thread handling + ThreadPoolExecutor queue = hostReportHandler.getReportQueue(); + // Make sure jobs flow normally without any nullpointer exception + hostReportHandler.queueHostReport(report1); // HOSTNAME + hostReportHandler.queueHostReport(report1); // HOSTNAME + + /* + * Test 2: Precondition: - HardwareState=REPAIR - There is a comment for the host with + * subject=SUBJECT_COMMENT_FULL_TEMP_DIR and user=CUEBOT_COMMENT_USER Action: Receives a + * HostReport with more freeTempDir than the threshold (opencue.properties: + * min_available_temp_storage_percentage) Postcondition: - Host hardwareState changes to UP - + * Comment with subject=SUBJECT_COMMENT_FULL_TEMP_DIR and user=CUEBOT_COMMENT_USER gets deleted + */ + // Set the host freeTempDir to the minimum size required = 1GB (1048576 KB) + HostReport report2 = HostReport.newBuilder() + .setHost(getRenderHostBuilder(hostname).setFreeMcp(CueUtil.GB).build()).setCoreInfo(cores) + .build(); + // Call handleHostReport() => Delete the comment with + // subject=SUBJECT_COMMENT_FULL_TEMP_DIR and change the + // host's hardwareState to UP + hostReportHandler.handleHostReport(report2, false); + // Get host + host = getHost(hostname); + // Get list of comments by host, user, and subject + comments = commentManager.getCommentsByHostUserAndSubject(host, CUEBOT_COMMENT_USER, + SUBJECT_COMMENT_FULL_TEMP_DIR); + // Check if there is no comment associated with the host + assertEquals(comments.size(), 0); + // Check host lock state + assertEquals(LockState.OPEN, host.lockState); + // Check if host hardware state is UP + assertEquals(HardwareState.UP, host.hardwareState); + // Test Queue thread handling + queue = hostReportHandler.getReportQueue(); + // Make sure jobs flow normally without any nullpointer exception + hostReportHandler.queueHostReport(report1); // HOSTNAME + hostReportHandler.queueHostReport(report1); // HOSTNAME + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReportWithHardwareStateRepairNotRelatedToFullTempDir() { + // Create CoreDetail + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + + /* + * Test if host.hardwareState == HardwareState.REPAIR (Not related to freeMcp < + * dispatcher.min_bookable_free_mcp_kb (opencue.properties)) + * + * - There is no comment with subject=SUBJECT_COMMENT_FULL_MCP_DIR and user=CUEBOT_COMMENT_USER + * associated with the host The host.hardwareState continue as HardwareState.REPAIR + */ + // Create HostReport + HostReport report = HostReport.newBuilder() + .setHost(getRenderHostBuilder(hostname).setFreeMcp(CueUtil.GB).build()).setCoreInfo(cores) + .build(); + // Get host + DispatchHost host = getHost(hostname); + // Host's HardwareState set to REPAIR + hostManager.setHostState(host, HardwareState.REPAIR); + host.hardwareState = HardwareState.REPAIR; + // Get list of comments by host, user, and subject + List hostComments = commentManager.getCommentsByHostUserAndSubject(host, + CUEBOT_COMMENT_USER, SUBJECT_COMMENT_FULL_TEMP_DIR); + // Check if there is no comment + assertEquals(hostComments.size(), 0); + // There is no comment to delete + boolean commentsDeleted = commentManager.deleteCommentByHostUserAndSubject(host, + CUEBOT_COMMENT_USER, SUBJECT_COMMENT_FULL_TEMP_DIR); + assertFalse(commentsDeleted); + // Call handleHostReport() + hostReportHandler.handleHostReport(report, false); + // Check host lock state + assertEquals(LockState.OPEN, host.lockState); + // Check if host hardware state is REPAIR + assertEquals(HardwareState.REPAIR, host.hardwareState); + // Test Queue thread handling + ThreadPoolExecutor queueThread = hostReportHandler.getReportQueue(); + // Make sure jobs flow normally without any nullpointer exception + hostReportHandler.queueHostReport(report); // HOSTNAME + hostReportHandler.queueHostReport(report); // HOSTNAME + } + + @Test + @Transactional + @Rollback(true) + public void testMemoryAndLlu() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); + DispatchHost host = getHost(hostname); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + long now = System.currentTimeMillis(); + + RunningFrameInfo info = RunningFrameInfo.newBuilder().setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()).setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()) + .setLluTime(now / 1000).setMaxRss(420000).build(); + HostReport report = HostReport.newBuilder().setHost(getRenderHost(hostname)).setCoreInfo(cores) + .addFrames(info).build(); + + hostReportHandler.handleHostReport(report, false); + + FrameDetail frame = jobManager.getFrameDetail(proc.getFrameId()); + assertEquals(frame.dateLLU, new Timestamp(now / 1000 * 1000)); + assertEquals(420000, frame.maxRss); + } + + @Test + @Transactional + @Rollback(true) + public void testMemoryAggressionRss() { + jobLauncher.testMode = true; + dispatcher.setTestMode(true); + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); + + DispatchHost host = getHost(hostname); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + + // 1.6 = 1 + dispatcher.oom_frame_overboard_allowed_threshold + long memoryOverboard = (long) Math.ceil((double) proc.memoryReserved * 1.6); + + // Test rss overboard + RunningFrameInfo info = RunningFrameInfo.newBuilder().setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()).setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()) + .setRss(memoryOverboard).setMaxRss(memoryOverboard).build(); + HostReport report = HostReport.newBuilder().setHost(getRenderHost(hostname)) + .setCoreInfo(getCoreDetail(200, 200, 0, 0)).addFrames(info).build(); + + long killCount = DispatchSupport.killedOffenderProcs.get(); + hostReportHandler.handleHostReport(report, false); + assertEquals(killCount + 1, DispatchSupport.killedOffenderProcs.get()); + } + + @Test + @Transactional + @Rollback(true) + public void testMemoryAggressionMaxRss() { + jobLauncher.testMode = true; + dispatcher.setTestMode(true); + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); + + DispatchHost host = getHost(hostname); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + + // 0.6 = dispatcher.oom_frame_overboard_allowed_threshold + long memoryOverboard = (long) Math.ceil((double) proc.memoryReserved * (1.0 + (2 * 0.6))); + + // Test rss>90% and maxRss overboard + RunningFrameInfo info = RunningFrameInfo.newBuilder().setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()).setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()) + .setRss((long) Math.ceil(0.95 * proc.memoryReserved)).setMaxRss(memoryOverboard).build(); + HostReport report = HostReport.newBuilder().setHost(getRenderHost(hostname)) + .setCoreInfo(getCoreDetail(200, 200, 0, 0)).addFrames(info).build(); + + long killCount = DispatchSupport.killedOffenderProcs.get(); + hostReportHandler.handleHostReport(report, false); + assertEquals(killCount + 1, DispatchSupport.killedOffenderProcs.get()); + } + + @Test + @Transactional + @Rollback(true) + public void testMemoryAggressionMemoryWarning() { + jobLauncher.testMode = true; + dispatcher.setTestMode(true); + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_multiple_frames.xml")); + + DispatchHost host = getHost(hostname); + List procs = dispatcher.dispatchHost(host); + assertEquals(3, procs.size()); + VirtualProc proc1 = procs.get(0); + VirtualProc proc2 = procs.get(1); + VirtualProc proc3 = procs.get(2); + + // Ok + RunningFrameInfo info1 = RunningFrameInfo.newBuilder().setJobId(proc1.getJobId()) + .setLayerId(proc1.getLayerId()).setFrameId(proc1.getFrameId()) + .setResourceId(proc1.getProcId()).setUsedSwapMemory(CueUtil.MB512 - CueUtil.MB128) + .setVsize(CueUtil.GB2).setRss(CueUtil.GB2).setMaxRss(CueUtil.GB2).build(); + + // Overboard Rss + RunningFrameInfo info2 = RunningFrameInfo.newBuilder().setJobId(proc2.getJobId()) + .setLayerId(proc2.getLayerId()).setFrameId(proc2.getFrameId()) + .setResourceId(proc2.getProcId()).setUsedSwapMemory(CueUtil.MB512).setVsize(CueUtil.GB4) + .setRss(CueUtil.GB4).setMaxRss(CueUtil.GB4).build(); + + // Overboard Rss + long memoryUsedProc3 = CueUtil.GB8; + RunningFrameInfo info3 = RunningFrameInfo.newBuilder().setJobId(proc3.getJobId()) + .setLayerId(proc3.getLayerId()).setFrameId(proc3.getFrameId()) + .setResourceId(proc3.getProcId()).setUsedSwapMemory(CueUtil.MB512 * 2) + .setVsize(memoryUsedProc3).setRss(memoryUsedProc3).setMaxRss(memoryUsedProc3).build(); + + RenderHost hostAfterUpdate = getRenderHostBuilder(hostname).setFreeMem(0) + .setFreeSwap(CueUtil.GB2 - info1.getUsedSwapMemory() - info2.getUsedSwapMemory() + - info3.getUsedSwapMemory()) + .build(); + + HostReport report = + HostReport.newBuilder().setHost(hostAfterUpdate).setCoreInfo(getCoreDetail(200, 200, 0, 0)) + .addAllFrames(Arrays.asList(info1, info2, info3)).build(); + + // Get layer state before report gets sent + LayerDetail layerBeforeIncrease = jobManager.getLayerDetail(proc3.getLayerId()); + + // In this case, killing 2 frames should be enough to ge the machine to a safe + // state. Total Swap: 2GB, usage before kill: 1944MB, usage after kill: 348 + // (less than 20%) + long killCount = DispatchSupport.killedOffenderProcs.get(); + hostReportHandler.handleHostReport(report, false); + assertEquals(killCount + 2, DispatchSupport.killedOffenderProcs.get()); + + // Confirm the frame will be set to retry after it's completion has been + // processed + + RunningFrameInfo runningFrame = RunningFrameInfo.newBuilder().setFrameId(proc3.getFrameId()) + .setFrameName("frame_name").setLayerId(proc3.getLayerId()).setRss(memoryUsedProc3) + .setMaxRss(memoryUsedProc3).setResourceId(proc3.id).build(); + FrameCompleteReport completeReport = FrameCompleteReport.newBuilder().setHost(hostAfterUpdate) + .setFrame(runningFrame).setExitSignal(9).setRunTime(1).setExitStatus(1).build(); + + frameCompleteHandler.handleFrameCompleteReport(completeReport); + FrameDetail killedFrame = jobManager.getFrameDetail(proc3.getFrameId()); + LayerDetail layer = jobManager.getLayerDetail(proc3.getLayerId()); + assertEquals(FrameState.WAITING, killedFrame.state); + // Memory increases are processed in two different places. + // First: proc.reserved + 2GB + // Second: the maximum reported proc.maxRss + // The higher valuer beween First and Second wins. + // In this case, proc.maxRss + assertEquals(Math.max(memoryUsedProc3, layerBeforeIncrease.getMinimumMemory() + CueUtil.GB2), + layer.getMinimumMemory()); + } +} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/LocalDispatcherTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/LocalDispatcherTests.java index a7218b47a..6d142ad8c 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/LocalDispatcherTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/LocalDispatcherTests.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.test.dispatcher; import java.io.File; @@ -49,325 +46,290 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; - @ContextConfiguration public class LocalDispatcherTests extends TransactionalTest { - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - Dispatcher localDispatcher; - - @Resource - BookingManager bookingManager; - - private static final String HOSTNAME = "beta"; - - private static final String JOBNAME = - "pipe-dev.cue-testuser_shell_dispatch_test_v1"; - - private static final String TARGET_JOB = - "pipe-dev.cue-testuser_shell_dispatch_test_v2"; - - @Before - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - } - - @Before - public void setTestMode() { - localDispatcher.setTestMode(true); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder() - .setName(HOSTNAME) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(0) - .setTotalMcp(CueUtil.GB4) - .setTotalMem(8173264) - .setTotalSwap(20960) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(400) - .setState(HardwareState.UP) - .setFacility("spi") - .addTags("test") - .putAttributes("SP_OS", "Linux") - .setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512) - .build(); - - hostManager.createHost(host, - adminManager.findAllocationDetail("spi", "general")); - } - - public JobDetail getJob() { - return jobManager.findJobDetail(JOBNAME); - } - - public JobDetail getTargetJob() { - return jobManager.findJobDetail(TARGET_JOB); - } - - public DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostAutoDetectJob() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setThreads(1); - lja.setMaxMemory(CueUtil.GB8); - lja.setMaxCoreUnits(200); - bookingManager.createLocalHostAssignment(host, job, lja); - - List procs = localDispatcher.dispatchHost(host); - - // Should have 2 procs. - assertEquals(2, procs.size()); - assertTrue(bookingManager.hasActiveLocalFrames(host)); - - /* - * Check to ensure the procs are marked as local. - */ - assertTrue(procs - .stream() - .allMatch(proc -> proc.isLocalDispatch)); - - /* - * Check to ensure the right job was booked. - */ - assertTrue(procs - .stream() - .allMatch(proc -> proc.jobId.equals(job.getId()))); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostAutoDetectLayer() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - LayerInterface layer = jobManager.getLayers(job).get(0); - - LocalHostAssignment lba = new LocalHostAssignment(300, 1, CueUtil.GB8, 0, 0); - bookingManager.createLocalHostAssignment(host, layer, lba); - - List procs = localDispatcher.dispatchHost(host); - - // Should have 2 procs. - assertEquals(3, procs.size()); - assertTrue(bookingManager.hasActiveLocalFrames(host)); - - /* - * Check that they are all marked local. - */ - assertTrue(procs - .stream() - .allMatch(proc -> proc.isLocalDispatch)); - - /* - * Check that they are all frame the same layer. - */ - assertTrue(procs - .stream() - .allMatch(proc -> proc.layerId.equals(layer.getId()))); - - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostAutoDetectFrame() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - LayerInterface layer = jobManager.getLayers(job).get(0); - FrameInterface frame = jobManager.findFrame(layer, 5); - - LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); - bookingManager.createLocalHostAssignment(host, frame, lba); - - List procs = localDispatcher.dispatchHost(host); - - /* - * Should always be 1 or 0, in this case it should be 1. - */ - assertEquals(1, procs.size()); - assertTrue(bookingManager.hasActiveLocalFrames(host)); - /* - * Check the frame id. - */ - assertEquals(frame.getFrameId(), procs.get(0).frameId); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostToLocalJob() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - - LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); - bookingManager.createLocalHostAssignment(host, job, lba); - - List procs = localDispatcher.dispatchHost(host, job); - - // Should have 2 procs. - assertEquals(2, procs.size()); - assertTrue(bookingManager.hasActiveLocalFrames(host)); - - // Check that they are local. - assertTrue(procs - .stream() - .allMatch(proc -> proc.isLocalDispatch)); - - /* - * Check to ensure the right job was booked. - */ - assertTrue(procs - .stream() - .allMatch(proc -> proc.jobId.equals(job.getId()))); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostToLocalLayer() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - LayerInterface layer = jobManager.getLayers(job).get(0); - - LocalHostAssignment lba = new LocalHostAssignment(300, 1, CueUtil.GB8, 0, 0); - bookingManager.createLocalHostAssignment(host, layer, lba); - - List procs = localDispatcher.dispatchHost(host, layer); - - // Should have 2 procs. - assertEquals(3, procs.size()); - assertTrue(bookingManager.hasActiveLocalFrames(host)); - - /* - * Check that they are all marked local. - */ - assertTrue(procs - .stream() - .allMatch(proc -> proc.isLocalDispatch)); - - /* - * Check that they are all frame the same layer. - */ - assertTrue(procs - .stream() - .allMatch(proc -> proc.layerId.equals(layer.getId()))); - - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostToLocalFrame() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - LayerInterface layer = jobManager.getLayers(job).get(0); - FrameInterface frame = jobManager.findFrame(layer, 5); - - LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); - bookingManager.createLocalHostAssignment(host, frame, lba); - - List procs = localDispatcher.dispatchHost(host, frame); - - /* - * Should always be 1 or 0 procs, in this case 1. - */ - assertEquals(1, procs.size()); - assertTrue(bookingManager.hasActiveLocalFrames(host)); - /* - * Check the frame id. - */ - assertEquals(frame.getFrameId(), procs.get(0).frameId); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostToLocalFrameTwice() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - LayerInterface layer = jobManager.getLayers(job).get(0); - FrameInterface frame = jobManager.findFrame(layer, 5); - - LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); - bookingManager.createLocalHostAssignment(host, frame, lba); - - List procs = localDispatcher.dispatchHost(host, frame); - - /* - * Should always be 1 or 0 procs, in this case 1. - */ - assertEquals(1, procs.size()); - - /* - * Dispatch again. - */ - procs = localDispatcher.dispatchHost(host, frame); - - /* - * Should always be 1 or 0 procs, in this case 0. - */ - assertEquals(0, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostToLocalJobDeficit() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - - LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 0, 0); - bookingManager.createLocalHostAssignment(host, job, lba); - - List procs = localDispatcher.dispatchHost(host, job); - - // Should have 1 proc. - assertEquals(1, procs.size()); - assertTrue(bookingManager.hasActiveLocalFrames(host)); - - // Check that they are local. - assertTrue(procs.get(0).isLocalDispatch); - /* - * Check to ensure the right job was booked. - */ - assertEquals(job.getJobId(), procs.get(0).jobId); - - /* - * Now, lower our min cores to create a deficit. - */ - assertFalse(bookingManager.hasResourceDeficit(host)); - bookingManager.setMaxResources(lba, 700, 0, 0, 0); - assertTrue(bookingManager.hasResourceDeficit(host)); - } -} + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + HostManager hostManager; + @Resource + AdminManager adminManager; + + @Resource + Dispatcher localDispatcher; + + @Resource + BookingManager bookingManager; + + private static final String HOSTNAME = "beta"; + + private static final String JOBNAME = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; + + private static final String TARGET_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; + + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + } + + @Before + public void setTestMode() { + localDispatcher.setTestMode(true); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(0) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) + .setNumProcs(2).setCoresPerProc(400).setState(HardwareState.UP).setFacility("spi") + .addTags("test").putAttributes("SP_OS", "Linux").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } + + public JobDetail getJob() { + return jobManager.findJobDetail(JOBNAME); + } + + public JobDetail getTargetJob() { + return jobManager.findJobDetail(TARGET_JOB); + } + + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostAutoDetectJob() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setThreads(1); + lja.setMaxMemory(CueUtil.GB8); + lja.setMaxCoreUnits(200); + bookingManager.createLocalHostAssignment(host, job, lja); + + List procs = localDispatcher.dispatchHost(host); + + // Should have 2 procs. + assertEquals(2, procs.size()); + assertTrue(bookingManager.hasActiveLocalFrames(host)); + + /* + * Check to ensure the procs are marked as local. + */ + assertTrue(procs.stream().allMatch(proc -> proc.isLocalDispatch)); + + /* + * Check to ensure the right job was booked. + */ + assertTrue(procs.stream().allMatch(proc -> proc.jobId.equals(job.getId()))); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostAutoDetectLayer() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + LayerInterface layer = jobManager.getLayers(job).get(0); + + LocalHostAssignment lba = new LocalHostAssignment(300, 1, CueUtil.GB8, 0, 0); + bookingManager.createLocalHostAssignment(host, layer, lba); + + List procs = localDispatcher.dispatchHost(host); + + // Should have 2 procs. + assertEquals(3, procs.size()); + assertTrue(bookingManager.hasActiveLocalFrames(host)); + + /* + * Check that they are all marked local. + */ + assertTrue(procs.stream().allMatch(proc -> proc.isLocalDispatch)); + + /* + * Check that they are all frame the same layer. + */ + assertTrue(procs.stream().allMatch(proc -> proc.layerId.equals(layer.getId()))); + + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostAutoDetectFrame() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + LayerInterface layer = jobManager.getLayers(job).get(0); + FrameInterface frame = jobManager.findFrame(layer, 5); + + LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); + bookingManager.createLocalHostAssignment(host, frame, lba); + + List procs = localDispatcher.dispatchHost(host); + + /* + * Should always be 1 or 0, in this case it should be 1. + */ + assertEquals(1, procs.size()); + assertTrue(bookingManager.hasActiveLocalFrames(host)); + /* + * Check the frame id. + */ + assertEquals(frame.getFrameId(), procs.get(0).frameId); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostToLocalJob() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + + LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); + bookingManager.createLocalHostAssignment(host, job, lba); + + List procs = localDispatcher.dispatchHost(host, job); + + // Should have 2 procs. + assertEquals(2, procs.size()); + assertTrue(bookingManager.hasActiveLocalFrames(host)); + + // Check that they are local. + assertTrue(procs.stream().allMatch(proc -> proc.isLocalDispatch)); + + /* + * Check to ensure the right job was booked. + */ + assertTrue(procs.stream().allMatch(proc -> proc.jobId.equals(job.getId()))); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostToLocalLayer() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + LayerInterface layer = jobManager.getLayers(job).get(0); + + LocalHostAssignment lba = new LocalHostAssignment(300, 1, CueUtil.GB8, 0, 0); + bookingManager.createLocalHostAssignment(host, layer, lba); + + List procs = localDispatcher.dispatchHost(host, layer); + + // Should have 2 procs. + assertEquals(3, procs.size()); + assertTrue(bookingManager.hasActiveLocalFrames(host)); + + /* + * Check that they are all marked local. + */ + assertTrue(procs.stream().allMatch(proc -> proc.isLocalDispatch)); + + /* + * Check that they are all frame the same layer. + */ + assertTrue(procs.stream().allMatch(proc -> proc.layerId.equals(layer.getId()))); + + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostToLocalFrame() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + LayerInterface layer = jobManager.getLayers(job).get(0); + FrameInterface frame = jobManager.findFrame(layer, 5); + + LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); + bookingManager.createLocalHostAssignment(host, frame, lba); + + List procs = localDispatcher.dispatchHost(host, frame); + + /* + * Should always be 1 or 0 procs, in this case 1. + */ + assertEquals(1, procs.size()); + assertTrue(bookingManager.hasActiveLocalFrames(host)); + /* + * Check the frame id. + */ + assertEquals(frame.getFrameId(), procs.get(0).frameId); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostToLocalFrameTwice() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + LayerInterface layer = jobManager.getLayers(job).get(0); + FrameInterface frame = jobManager.findFrame(layer, 5); + + LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); + bookingManager.createLocalHostAssignment(host, frame, lba); + + List procs = localDispatcher.dispatchHost(host, frame); + + /* + * Should always be 1 or 0 procs, in this case 1. + */ + assertEquals(1, procs.size()); + + /* + * Dispatch again. + */ + procs = localDispatcher.dispatchHost(host, frame); + + /* + * Should always be 1 or 0 procs, in this case 0. + */ + assertEquals(0, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostToLocalJobDeficit() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + + LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 0, 0); + bookingManager.createLocalHostAssignment(host, job, lba); + + List procs = localDispatcher.dispatchHost(host, job); + + // Should have 1 proc. + assertEquals(1, procs.size()); + assertTrue(bookingManager.hasActiveLocalFrames(host)); + + // Check that they are local. + assertTrue(procs.get(0).isLocalDispatch); + /* + * Check to ensure the right job was booked. + */ + assertEquals(job.getJobId(), procs.get(0).jobId); + + /* + * Now, lower our min cores to create a deficit. + */ + assertFalse(bookingManager.hasResourceDeficit(host)); + bookingManager.setMaxResources(lba, 700, 0, 0, 0); + assertTrue(bookingManager.hasResourceDeficit(host)); + } +} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/RedirectManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/RedirectManagerTests.java index 70e3db4af..d58b589bb 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/RedirectManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/RedirectManagerTests.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.test.dispatcher; import java.io.File; @@ -69,406 +66,352 @@ import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; - /** * Tests for the redirect manager. */ @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class RedirectManagerTests - extends AbstractTransactionalJUnit4SpringContextTests { +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class RedirectManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - RedirectManager redirectManager; + @Resource + RedirectManager redirectManager; - @Resource - RedirectService redirectService; + @Resource + RedirectService redirectService; - @Resource - JobManager jobManager; + @Resource + JobManager jobManager; - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - HostManager hostManager; + @Resource + HostManager hostManager; - @Resource - AdminManager adminManager; + @Resource + AdminManager adminManager; - @Resource - Dispatcher dispatcher; + @Resource + Dispatcher dispatcher; - @Resource - GroupManager groupManager; + @Resource + GroupManager groupManager; - @Resource - ProcDao procDao; + @Resource + ProcDao procDao; - @Resource - JobDao jobDao; + @Resource + JobDao jobDao; - @Resource - Whiteboard whiteboard; - - @Resource - ProcSearchFactory procSearchFactory; + @Resource + Whiteboard whiteboard; - private static final String HOSTNAME = "beta"; + @Resource + ProcSearchFactory procSearchFactory; - private static final String JOBNAME = - "pipe-dev.cue-testuser_shell_dispatch_test_v1"; + private static final String HOSTNAME = "beta"; - private static final String TARGET_JOB = - "pipe-dev.cue-testuser_shell_dispatch_test_v2"; + private static final String JOBNAME = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; - @Before - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - } + private static final String TARGET_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + } - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder() - .setName(HOSTNAME) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem(8173264) - .setTotalSwap(20960) - .setNimbyEnabled(false) - .setNumProcs(1) - .setCoresPerProc(100) - .setState(HardwareState.UP) - .setFacility("spi") - .addTags("test") - .putAttributes("SP_OS", "Linux") - .build(); - - hostManager.createHost(host, - adminManager.findAllocationDetail("spi", "general")); - } + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } - public JobDetail getJob() { - return jobManager.findJobDetail(JOBNAME); - } + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) + .setNumProcs(1).setCoresPerProc(100).setState(HardwareState.UP).setFacility("spi") + .addTags("test").putAttributes("SP_OS", "Linux").build(); - public JobDetail getTargetJob() { - return jobManager.findJobDetail(TARGET_JOB); - } + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } + public JobDetail getJob() { + return jobManager.findJobDetail(JOBNAME); + } - public DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } + public JobDetail getTargetJob() { + return jobManager.findJobDetail(TARGET_JOB); + } - @Test - @Transactional - @Rollback(true) - public void testAddJobRedirectByCriteria() { + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } - JobDetail job = getJob(); - DispatchHost host = getHost(); + @Test + @Transactional + @Rollback(true) + public void testAddJobRedirectByCriteria() { - List procs = dispatcher.dispatchHost(host, job); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); + JobDetail job = getJob(); + DispatchHost host = getHost(); - /* Setup a proc search */ - ProcSearchInterface search = procSearchFactory.create(); - ProcSearchCriteria criteria = search.getCriteria(); - search.setCriteria(criteria.toBuilder().addJobs(job.getName()).build()); + List procs = dispatcher.dispatchHost(host, job); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); - List jobs = new ArrayList(1); - jobs.add(jobManager.findJob(TARGET_JOB)); + /* Setup a proc search */ + ProcSearchInterface search = procSearchFactory.create(); + ProcSearchCriteria criteria = search.getCriteria(); + search.setCriteria(criteria.toBuilder().addJobs(job.getName()).build()); - /* Now redirect this proc to the other job */ - redirectManager.addRedirect( - search.getCriteria(), - jobs, - false, - new Source()); + List jobs = new ArrayList(1); + jobs.add(jobManager.findJob(TARGET_JOB)); - /* Test that the redirect was added properly. */ - assertTrue(redirectManager.hasRedirect(procs.get(0))); + /* Now redirect this proc to the other job */ + redirectManager.addRedirect(search.getCriteria(), jobs, false, new Source()); - /* Check to ensure the redirect target was set. */ - assertEquals(TARGET_JOB, whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); + /* Test that the redirect was added properly. */ + assertTrue(redirectManager.hasRedirect(procs.get(0))); - redirectManager.removeRedirect(proc); - assertFalse(redirectManager.hasRedirect(proc)); - assertThat( - whiteboard.getProcs(search).getProcs(0).getRedirectTarget(), - is(emptyString())); - } + /* Check to ensure the redirect target was set. */ + assertEquals(TARGET_JOB, whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); - @Test - @Transactional - @Rollback(true) - public void testAddGroupRedirectByCriteria() { + redirectManager.removeRedirect(proc); + assertFalse(redirectManager.hasRedirect(proc)); + assertThat(whiteboard.getProcs(search).getProcs(0).getRedirectTarget(), is(emptyString())); + } - JobDetail job = getJob(); - DispatchHost host = getHost(); + @Test + @Transactional + @Rollback(true) + public void testAddGroupRedirectByCriteria() { - List procs = dispatcher.dispatchHost(host, job); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); + JobDetail job = getJob(); + DispatchHost host = getHost(); - // Double check there is a proc. - procDao.getVirtualProc(proc.getId()); + List procs = dispatcher.dispatchHost(host, job); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); - /* Setup a proc search */ - ProcSearchInterface search = procSearchFactory.create(); - ProcSearchCriteria criteria = search.getCriteria(); - search.setCriteria(criteria.toBuilder().addJobs(job.getName()).build()); + // Double check there is a proc. + procDao.getVirtualProc(proc.getId()); - GroupInterface root = groupManager.getRootGroupDetail(job); - GroupDetail group = new GroupDetail(); - group.name = "Foo"; - group.showId = root.getShowId(); + /* Setup a proc search */ + ProcSearchInterface search = procSearchFactory.create(); + ProcSearchCriteria criteria = search.getCriteria(); + search.setCriteria(criteria.toBuilder().addJobs(job.getName()).build()); - groupManager.createGroup(group, root); + GroupInterface root = groupManager.getRootGroupDetail(job); + GroupDetail group = new GroupDetail(); + group.name = "Foo"; + group.showId = root.getShowId(); - /* Now redirect this proc to the other job */ - redirectManager.addRedirect( - search.getCriteria(), - group, - false, - new Source()); + groupManager.createGroup(group, root); - /* Test that the redirect was added properly. */ - assertTrue(redirectManager.hasRedirect(procs.get(0))); + /* Now redirect this proc to the other job */ + redirectManager.addRedirect(search.getCriteria(), group, false, new Source()); - /* Check to ensure the redirect target was set. */ - assertEquals(group.getName(), whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); + /* Test that the redirect was added properly. */ + assertTrue(redirectManager.hasRedirect(procs.get(0))); - redirectManager.removeRedirect(proc); - assertFalse(redirectManager.hasRedirect(proc)); - assertThat( - whiteboard.getProcs(search).getProcs(0).getRedirectTarget(), - is(emptyString())); - } + /* Check to ensure the redirect target was set. */ + assertEquals(group.getName(), whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); - @Test - @Transactional - @Rollback(true) - public void testAddJobRedirect() { + redirectManager.removeRedirect(proc); + assertFalse(redirectManager.hasRedirect(proc)); + assertThat(whiteboard.getProcs(search).getProcs(0).getRedirectTarget(), is(emptyString())); + } - JobDetail job = getJob(); - JobDetail target = getTargetJob(); - DispatchHost host = getHost(); + @Test + @Transactional + @Rollback(true) + public void testAddJobRedirect() { - List procs = dispatcher.dispatchHost(host, job); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); + JobDetail job = getJob(); + JobDetail target = getTargetJob(); + DispatchHost host = getHost(); - ProcSearchInterface search = procSearchFactory.create(); - search.setCriteria(ProcSearchCriteria.newBuilder().addJobs(job.getName()).build()); + List procs = dispatcher.dispatchHost(host, job); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); - assertTrue(redirectManager.addRedirect(proc, target, - false, new Source())); + ProcSearchInterface search = procSearchFactory.create(); + search.setCriteria(ProcSearchCriteria.newBuilder().addJobs(job.getName()).build()); - assertTrue(redirectManager.hasRedirect(proc)); - assertEquals(TARGET_JOB, whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); + assertTrue(redirectManager.addRedirect(proc, target, false, new Source())); - redirectManager.removeRedirect(proc); - assertFalse(redirectManager.hasRedirect(proc)); - assertThat( - whiteboard.getProcs(search).getProcs(0).getRedirectTarget(), - is(emptyString())); - } + assertTrue(redirectManager.hasRedirect(proc)); + assertEquals(TARGET_JOB, whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); - @Test - @Transactional - @Rollback(true) - public void testAddGroupRedirect() { + redirectManager.removeRedirect(proc); + assertFalse(redirectManager.hasRedirect(proc)); + assertThat(whiteboard.getProcs(search).getProcs(0).getRedirectTarget(), is(emptyString())); + } - JobDetail job = getJob(); - JobDetail target = getTargetJob(); - DispatchHost host = getHost(); + @Test + @Transactional + @Rollback(true) + public void testAddGroupRedirect() { - /* Find the root group and move our target job there. */ - GroupDetail group = groupManager.getRootGroupDetail(job); - groupManager.reparentJob(target, - group, - new Inherit[] { }); + JobDetail job = getJob(); + JobDetail target = getTargetJob(); + DispatchHost host = getHost(); - assertEquals(group.getId(), - groupManager.getGroupDetail(target).getId()); + /* Find the root group and move our target job there. */ + GroupDetail group = groupManager.getRootGroupDetail(job); + groupManager.reparentJob(target, group, new Inherit[] {}); - List procs = dispatcher.dispatchHost(host, job); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); + assertEquals(group.getId(), groupManager.getGroupDetail(target).getId()); - ProcSearchInterface search = procSearchFactory.create(); - search.setCriteria(ProcSearchCriteria.newBuilder().addJobs(job.getName()).build()); + List procs = dispatcher.dispatchHost(host, job); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); - assertEquals(group.getGroupId(), jobDao.getJobDetail(target.getJobId()).groupId); + ProcSearchInterface search = procSearchFactory.create(); + search.setCriteria(ProcSearchCriteria.newBuilder().addJobs(job.getName()).build()); - assertTrue(redirectManager.addRedirect(proc, group, false, new Source())); + assertEquals(group.getGroupId(), jobDao.getJobDetail(target.getJobId()).groupId); - assertTrue(redirectManager.hasRedirect(proc)); - assertEquals(group.getName(), whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); + assertTrue(redirectManager.addRedirect(proc, group, false, new Source())); - redirectManager.removeRedirect(proc); - assertFalse(redirectManager.hasRedirect(proc)); - assertThat( - whiteboard.getProcs(search).getProcs(0).getRedirectTarget(), - is(emptyString())); - } + assertTrue(redirectManager.hasRedirect(proc)); + assertEquals(group.getName(), whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); - @Test - @Transactional - @Rollback(true) - public void testJobRedirect() { + redirectManager.removeRedirect(proc); + assertFalse(redirectManager.hasRedirect(proc)); + assertThat(whiteboard.getProcs(search).getProcs(0).getRedirectTarget(), is(emptyString())); + } - JobDetail job = getJob(); - JobDetail target = getTargetJob(); - DispatchHost host = getHost(); + @Test + @Transactional + @Rollback(true) + public void testJobRedirect() { - List procs = dispatcher.dispatchHost(host, job); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); + JobDetail job = getJob(); + JobDetail target = getTargetJob(); + DispatchHost host = getHost(); - ProcSearchInterface search = procSearchFactory.create(); - search.setCriteria(ProcSearchCriteria.newBuilder().addJobs(job.getName()).build()); + List procs = dispatcher.dispatchHost(host, job); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); - assertTrue(redirectManager.addRedirect(proc, target, - false, new Source())); + ProcSearchInterface search = procSearchFactory.create(); + search.setCriteria(ProcSearchCriteria.newBuilder().addJobs(job.getName()).build()); - assertTrue(redirectManager.hasRedirect(proc)); - assertEquals(TARGET_JOB, whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); + assertTrue(redirectManager.addRedirect(proc, target, false, new Source())); - assertTrue(redirectManager.redirect(proc)); + assertTrue(redirectManager.hasRedirect(proc)); + assertEquals(TARGET_JOB, whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); - assertEquals( - Convert.coreUnitsToCores(100), - whiteboard.getJob(target.getJobId()).getJobStats().getReservedCores(), - 0); - } + assertTrue(redirectManager.redirect(proc)); - @Test - @Transactional - @Rollback(true) - public void testGroupRedirect() { + assertEquals(Convert.coreUnitsToCores(100), + whiteboard.getJob(target.getJobId()).getJobStats().getReservedCores(), 0); + } - JobDetail job = getJob(); - JobDetail target = getTargetJob(); - DispatchHost host = getHost(); + @Test + @Transactional + @Rollback(true) + public void testGroupRedirect() { - /* Find the root group and move our target job there. */ - GroupDetail group = groupManager.getRootGroupDetail(job); - groupManager.reparentJob(target, - group, - new Inherit[] { }); + JobDetail job = getJob(); + JobDetail target = getTargetJob(); + DispatchHost host = getHost(); - assertEquals(group.getId(), - groupManager.getGroupDetail(target).getId()); + /* Find the root group and move our target job there. */ + GroupDetail group = groupManager.getRootGroupDetail(job); + groupManager.reparentJob(target, group, new Inherit[] {}); - List procs = dispatcher.dispatchHost(host, job); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); + assertEquals(group.getId(), groupManager.getGroupDetail(target).getId()); - ProcSearchInterface search = procSearchFactory.create(); - search.setCriteria(ProcSearchCriteria.newBuilder().addJobs(job.getName()).build()); + List procs = dispatcher.dispatchHost(host, job); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); - assertEquals(group.getGroupId(), jobDao.getJobDetail(target.getJobId()).groupId); + ProcSearchInterface search = procSearchFactory.create(); + search.setCriteria(ProcSearchCriteria.newBuilder().addJobs(job.getName()).build()); - assertTrue(redirectManager.addRedirect(proc, group, - false, new Source())); + assertEquals(group.getGroupId(), jobDao.getJobDetail(target.getJobId()).groupId); - assertTrue(redirectManager.hasRedirect(proc)); - assertEquals( - group.getName(), whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); + assertTrue(redirectManager.addRedirect(proc, group, false, new Source())); - redirectManager.redirect(proc); + assertTrue(redirectManager.hasRedirect(proc)); + assertEquals(group.getName(), whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); - assertEquals( - Convert.coreUnitsToCores(100), - whiteboard.getGroup(group.getGroupId()).getGroupStats().getReservedCores(), - 0); - } + redirectManager.redirect(proc); - @Test - @Transactional - @Rollback(true) - public void testNonExistentRedirect() { - JobDetail job = getJob(); - DispatchHost host = getHost(); + assertEquals(Convert.coreUnitsToCores(100), + whiteboard.getGroup(group.getGroupId()).getGroupStats().getReservedCores(), 0); + } - List procs = dispatcher.dispatchHost(host, job); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); + @Test + @Transactional + @Rollback(true) + public void testNonExistentRedirect() { + JobDetail job = getJob(); + DispatchHost host = getHost(); - assertFalse(redirectManager.hasRedirect(proc)); + List procs = dispatcher.dispatchHost(host, job); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); - // This should not throw any exception. - assertFalse(redirectManager.redirect(proc)); - } + assertFalse(redirectManager.hasRedirect(proc)); - /** - * Test that parallel attempts to save a redirect with the - * same key succeed without throwing an exception. - */ - @Test - @Transactional - @Rollback(true) - public void testParallelPuts() { - final int N = 20; - - CountDownLatch startSignal = new CountDownLatch(1); - CountDownLatch stopSignal = new CountDownLatch(N); - - final String redirect_key = "test"; - - Redirect redirect = new Redirect(RedirectType.JOB_REDIRECT, "foo", "bar"); - - for (int i = 0; i < N; i++) { - new Thread(new Runnable() { - @Override - public void run() { - try { - try { - startSignal.await(); - } - catch (InterruptedException e) { - throw new RuntimeException("Failed to wait for start signal", e); - } - - // This should not throw anything... - redirectService.put(redirect_key, redirect); - } - finally { - stopSignal.countDown(); - } - } - }).start(); - } + // This should not throw any exception. + assertFalse(redirectManager.redirect(proc)); + } + + /** + * Test that parallel attempts to save a redirect with the same key succeed without throwing an + * exception. + */ + @Test + @Transactional + @Rollback(true) + public void testParallelPuts() { + final int N = 20; + + CountDownLatch startSignal = new CountDownLatch(1); + CountDownLatch stopSignal = new CountDownLatch(N); + + final String redirect_key = "test"; + + Redirect redirect = new Redirect(RedirectType.JOB_REDIRECT, "foo", "bar"); - // Start all the threads at roughly the same time. - try { - startSignal.countDown(); + for (int i = 0; i < N; i++) { + new Thread(new Runnable() { + @Override + public void run() { + try { try { - stopSignal.await(); + startSignal.await(); + } catch (InterruptedException e) { + throw new RuntimeException("Failed to wait for start signal", e); } - catch (InterruptedException e) { - throw new RuntimeException("Failed to wait for stop signal", e); - } - } - finally { - // Clean up after test. - redirectService.remove(redirect_key); + + // This should not throw anything... + redirectService.put(redirect_key, redirect); + } finally { + stopSignal.countDown(); + } } + }).start(); } -} + // Start all the threads at roughly the same time. + try { + startSignal.countDown(); + try { + stopSignal.await(); + } catch (InterruptedException e) { + throw new RuntimeException("Failed to wait for stop signal", e); + } + } finally { + // Clean up after test. + redirectService.remove(redirect_key); + } + } +} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/StrandedCoreTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/StrandedCoreTests.java index 7d02d44e8..588e11506 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/StrandedCoreTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/StrandedCoreTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dispatcher; import java.io.File; @@ -51,99 +47,82 @@ @ContextConfiguration public class StrandedCoreTests extends TransactionalTest { - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - GroupManager groupManager; - - @Resource - Dispatcher dispatcher; - - @Resource - DispatchSupport dispatchSupport; - - @Resource - FrameDao frameDao; - - private static final String HOSTNAME = "beta"; - - private static final String JOBNAME = - "pipe-dev.cue-testuser_shell_dispatch_test_v1"; - - private static final String TARGET_JOB = - "pipe-dev.cue-testuser_shell_dispatch_test_v2"; - - @Before - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - } - - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder() - .setName(HOSTNAME) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem(8173264) - .setTotalSwap(20960) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(200) - .setState(HardwareState.UP) - .setFacility("spi") - .addTags("test") - .putAttributes("SP_OS", "Linux") - .build(); - - hostManager.createHost(host, - adminManager.findAllocationDetail("spi", "general")); - } - - public JobDetail getJob() { - return jobManager.findJobDetail(JOBNAME); - } - - public JobDetail getTargetJob() { - return jobManager.findJobDetail(TARGET_JOB); - } - - public DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } - - @Test - @Transactional - @Rollback(true) - public void dispatchStrandedCores() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - - dispatchSupport.strandCores(host, 200); - List procs = dispatcher.dispatchHost(host, job); - assertTrue("No procs were booked by the dispatcher.", procs.size() > 0); - assertEquals(400, procs.get(0).coresReserved); - } + @Resource + JobManager jobManager; -} + @Resource + JobLauncher jobLauncher; + + @Resource + HostManager hostManager; + + @Resource + AdminManager adminManager; + + @Resource + GroupManager groupManager; + + @Resource + Dispatcher dispatcher; + + @Resource + DispatchSupport dispatchSupport; + + @Resource + FrameDao frameDao; + + private static final String HOSTNAME = "beta"; + + private static final String JOBNAME = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; + private static final String TARGET_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; + + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + } + + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) + .setNumProcs(2).setCoresPerProc(200).setState(HardwareState.UP).setFacility("spi") + .addTags("test").putAttributes("SP_OS", "Linux").build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } + + public JobDetail getJob() { + return jobManager.findJobDetail(JOBNAME); + } + + public JobDetail getTargetJob() { + return jobManager.findJobDetail(TARGET_JOB); + } + + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } + + @Test + @Transactional + @Rollback(true) + public void dispatchStrandedCores() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + + dispatchSupport.strandCores(host, 200); + List procs = dispatcher.dispatchHost(host, job); + assertTrue("No procs were booked by the dispatcher.", procs.size() > 0); + assertEquals(400, procs.get(0).coresReserved); + } + +} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/TestBookingQueue.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/TestBookingQueue.java index 16b4b23a4..a51fec72e 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/TestBookingQueue.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/TestBookingQueue.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.dispatcher; import javax.annotation.Resource; @@ -44,83 +40,66 @@ import com.imageworks.spcue.util.CueUtil; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class TestBookingQueue extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - HostDao hostDao; - - @Resource - Dispatcher dispatcher; - - @Resource - HostManager hostManager; - - @Resource - BookingQueue bookingQueue; - - @Autowired - Environment env; - - private static final String HOSTNAME = "beta"; - - @Before - public void create() { - RenderHost host = RenderHost.newBuilder() - .setName(HOSTNAME) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem(8173264) - .setTotalSwap(20960) - .setNimbyEnabled(false) - .setNumProcs(1) - .setCoresPerProc(100) - .setState(HardwareState.UP) - .setFacility("spi") - .addAllTags(ImmutableList.of("mcore", "4core", "8g")) - .setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512) - .build(); - - hostManager.createHost(host); - } - - @Test - @Transactional - @Rollback(true) - public void testBookingQueue() { - - int healthThreshold = 10; - int minUnhealthyPeriodMin = 3; - int queueCapacity = 2000; - int corePoolSize = 10; - int maxPoolSize = 14; - - DispatchHost host1 = hostDao.findDispatchHost(HOSTNAME); - host1.idleCores = 500; - DispatchHost host2 = hostDao.findDispatchHost(HOSTNAME); - DispatchHost host3 = hostDao.findDispatchHost(HOSTNAME); - BookingQueue queue = new BookingQueue(healthThreshold, minUnhealthyPeriodMin, queueCapacity, - corePoolSize, maxPoolSize); - bookingQueue.execute(new DispatchBookHost(host2,dispatcher, env)); - bookingQueue.execute(new DispatchBookHost(host3,dispatcher, env)); - bookingQueue.execute(new DispatchBookHost(host1,dispatcher, env)); - try { - Thread.sleep(10000); - } catch (InterruptedException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - + @Resource + HostDao hostDao; + + @Resource + Dispatcher dispatcher; + + @Resource + HostManager hostManager; + + @Resource + BookingQueue bookingQueue; + + @Autowired + Environment env; + + private static final String HOSTNAME = "beta"; + + @Before + public void create() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) + .setNumProcs(1).setCoresPerProc(100).setState(HardwareState.UP).setFacility("spi") + .addAllTags(ImmutableList.of("mcore", "4core", "8g")).setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); + + hostManager.createHost(host); + } + + @Test + @Transactional + @Rollback(true) + public void testBookingQueue() { + + int healthThreshold = 10; + int minUnhealthyPeriodMin = 3; + int queueCapacity = 2000; + int corePoolSize = 10; + int maxPoolSize = 14; + + DispatchHost host1 = hostDao.findDispatchHost(HOSTNAME); + host1.idleCores = 500; + DispatchHost host2 = hostDao.findDispatchHost(HOSTNAME); + DispatchHost host3 = hostDao.findDispatchHost(HOSTNAME); + BookingQueue queue = new BookingQueue(healthThreshold, minUnhealthyPeriodMin, queueCapacity, + corePoolSize, maxPoolSize); + bookingQueue.execute(new DispatchBookHost(host2, dispatcher, env)); + bookingQueue.execute(new DispatchBookHost(host3, dispatcher, env)); + bookingQueue.execute(new DispatchBookHost(host1, dispatcher, env)); + try { + Thread.sleep(10000); + } catch (InterruptedException e) { + // TODO Auto-generated catch block + e.printStackTrace(); } - - + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/servant/FakeStreamObserver.java b/cuebot/src/test/java/com/imageworks/spcue/test/servant/FakeStreamObserver.java index 3ac9bcc93..6db8d7119 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/servant/FakeStreamObserver.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/servant/FakeStreamObserver.java @@ -2,36 +2,29 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.test.servant; import io.grpc.stub.StreamObserver; public class FakeStreamObserver implements StreamObserver { - @Override - public void onNext(T value) { - } + @Override + public void onNext(T value) {} - @Override - public void onError(Throwable t) { - } + @Override + public void onError(Throwable t) {} - @Override - public void onCompleted() { - } + @Override + public void onCompleted() {} } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/servant/ManageAllocationTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/servant/ManageAllocationTests.java index ed2a83cd6..85d9eeb64 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/servant/ManageAllocationTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/servant/ManageAllocationTests.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.test.servant; import javax.annotation.Resource; @@ -47,108 +44,88 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; - @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class ManageAllocationTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Resource - AllocationDao allocationDao; - - @Resource - FacilityDao facilityDao; - - @Resource - ManageAllocation manageAllocation; - - @Test - @Transactional - @Rollback(true) - public void testCreate() { - Facility facility = Facility.newBuilder() - .setName(facilityDao.getFacility("spi").getName()) - .build(); - - // Use . name - AllocCreateRequest request = AllocCreateRequest.newBuilder() - .setName("spi.test_tag") - .setTag("test_tag") - .setFacility(facility) - .build(); - - FakeStreamObserver responseObserver = - new FakeStreamObserver(); - manageAllocation.create(request, responseObserver); - - allocationDao.findAllocationEntity("spi", "test_tag"); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class ManageAllocationTests extends AbstractTransactionalJUnit4SpringContextTests { - @Test - @Transactional - @Rollback(true) - public void testDelete() { - Facility facility = Facility.newBuilder() - .setName(facilityDao.getFacility("spi").getName()) - .build(); - - // Non . name should work too. - AllocCreateRequest createRequest = AllocCreateRequest.newBuilder() - .setName("test_tag") - .setTag("test_tag") - .setFacility(facility) - .build(); - - FakeStreamObserver createResponseObserver = - new FakeStreamObserver(); - manageAllocation.create(createRequest, createResponseObserver); - - Allocation allocation = Allocation.newBuilder() - .setName("spi.test_tag") - .setTag("test_tag") - .setFacility("spi") - .build(); - - AllocDeleteRequest deleteRequest = AllocDeleteRequest.newBuilder() - .setAllocation(allocation) - .build(); - - FakeStreamObserver deleteResponseObserver = - new FakeStreamObserver(); - - manageAllocation.delete(deleteRequest, deleteResponseObserver); - - try { - allocationDao.findAllocationEntity("spi", "test_tag"); - fail("Expected exception"); - } catch (EmptyResultDataAccessException e) { - assertEquals(e.getMessage(), - "Incorrect result size: expected 1, actual 0"); - } - } + @Resource + AllocationDao allocationDao; - @Test - @Transactional - @Rollback(true) - public void testSetDefault() { - AllocationEntity alloc = allocationDao.getDefaultAllocationEntity(); - assertEquals(alloc.getName(), "lax.unassigned"); - - Allocation allocation = Allocation.newBuilder() - .setName("spi.general") - .setTag("general") - .setFacility("spi") - .build(); - AllocSetDefaultRequest request = AllocSetDefaultRequest.newBuilder() - .setAllocation(allocation) - .build(); - - FakeStreamObserver observer = - new FakeStreamObserver(); - manageAllocation.setDefault(request, observer); - - alloc = allocationDao.getDefaultAllocationEntity(); - assertEquals(alloc.getName(), "spi.general"); - } -} + @Resource + FacilityDao facilityDao; + + @Resource + ManageAllocation manageAllocation; + + @Test + @Transactional + @Rollback(true) + public void testCreate() { + Facility facility = + Facility.newBuilder().setName(facilityDao.getFacility("spi").getName()).build(); + + // Use . name + AllocCreateRequest request = AllocCreateRequest.newBuilder().setName("spi.test_tag") + .setTag("test_tag").setFacility(facility).build(); + + FakeStreamObserver responseObserver = + new FakeStreamObserver(); + manageAllocation.create(request, responseObserver); + allocationDao.findAllocationEntity("spi", "test_tag"); + } + @Test + @Transactional + @Rollback(true) + public void testDelete() { + Facility facility = + Facility.newBuilder().setName(facilityDao.getFacility("spi").getName()).build(); + + // Non . name should work too. + AllocCreateRequest createRequest = AllocCreateRequest.newBuilder().setName("test_tag") + .setTag("test_tag").setFacility(facility).build(); + + FakeStreamObserver createResponseObserver = + new FakeStreamObserver(); + manageAllocation.create(createRequest, createResponseObserver); + + Allocation allocation = Allocation.newBuilder().setName("spi.test_tag").setTag("test_tag") + .setFacility("spi").build(); + + AllocDeleteRequest deleteRequest = + AllocDeleteRequest.newBuilder().setAllocation(allocation).build(); + + FakeStreamObserver deleteResponseObserver = + new FakeStreamObserver(); + + manageAllocation.delete(deleteRequest, deleteResponseObserver); + + try { + allocationDao.findAllocationEntity("spi", "test_tag"); + fail("Expected exception"); + } catch (EmptyResultDataAccessException e) { + assertEquals(e.getMessage(), "Incorrect result size: expected 1, actual 0"); + } + } + + @Test + @Transactional + @Rollback(true) + public void testSetDefault() { + AllocationEntity alloc = allocationDao.getDefaultAllocationEntity(); + assertEquals(alloc.getName(), "lax.unassigned"); + + Allocation allocation = + Allocation.newBuilder().setName("spi.general").setTag("general").setFacility("spi").build(); + AllocSetDefaultRequest request = + AllocSetDefaultRequest.newBuilder().setAllocation(allocation).build(); + + FakeStreamObserver observer = + new FakeStreamObserver(); + manageAllocation.setDefault(request, observer); + + alloc = allocationDao.getDefaultAllocationEntity(); + assertEquals(alloc.getName(), "spi.general"); + } +} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/servant/ManageFrameTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/servant/ManageFrameTests.java index db9720406..d9890e894 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/servant/ManageFrameTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/servant/ManageFrameTests.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.test.servant; import javax.annotation.Resource; @@ -41,93 +38,80 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; - @Transactional -@ContextConfiguration(classes= TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class ManageFrameTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - FrameDao frameDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - ManageFrame manageFrame; - - public JobDetail launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - private FrameStateDisplayOverride createFrameStateDisplayOverride(FrameState state, String text, - int red, int green, int blue) { - FrameStateDisplayOverride override = FrameStateDisplayOverride.newBuilder() - .setState(state) - .setText(text) - .setColor(FrameStateDisplayOverride.RGB.newBuilder() - .setRed(red) - .setGreen(green) - .setBlue(blue) - .build()) - .build(); - - return override; - } - @Test - @Transactional - @Rollback(true) - public void testFrameStateOverride() { - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - Frame jobFrame = Frame.newBuilder() - .setId(frame.getFrameId()) - .setName(frame.getName()) - .setState(frame.state) - .build(); - - //create initial override - FrameStateDisplayOverride override = createFrameStateDisplayOverride(FrameState.SUCCEEDED, - "FINISHED", 200, 200, 123); - FrameStateDisplayOverrideRequest req = FrameStateDisplayOverrideRequest.newBuilder() - .setFrame(jobFrame) - .setOverride(override) - .build(); - FakeStreamObserver responseObserver = - new FakeStreamObserver(); - manageFrame.setFrameStateDisplayOverride(req, responseObserver); - FrameStateDisplayOverrideSeq results = frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); - assertEquals(1, results.getOverridesCount()); - - //try to create same override - manageFrame.setFrameStateDisplayOverride(req, responseObserver); - results = frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); - assertEquals(1, results.getOverridesCount()); - - //try to update override text - FrameStateDisplayOverride overrideUpdated = createFrameStateDisplayOverride(FrameState.SUCCEEDED, - "DONE", 200, 200, 123); - FrameStateDisplayOverrideRequest reqUpdated = FrameStateDisplayOverrideRequest.newBuilder() - .setFrame(jobFrame) - .setOverride(overrideUpdated) - .build(); - manageFrame.setFrameStateDisplayOverride(reqUpdated, responseObserver); - results = frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); - assertEquals(1, results.getOverridesCount()); - assertEquals(overrideUpdated, results.getOverridesList().get(0)); - - //add a new override - FrameStateDisplayOverride overrideNew = createFrameStateDisplayOverride(FrameState.EATEN, - "NOMNOM", 120, 50, 123); - FrameStateDisplayOverrideRequest reqNew = FrameStateDisplayOverrideRequest.newBuilder() - .setFrame(jobFrame) - .setOverride(overrideNew) - .build(); - manageFrame.setFrameStateDisplayOverride(reqNew, responseObserver); - results = frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); - assertEquals(2, results.getOverridesCount()); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class ManageFrameTests extends AbstractTransactionalJUnit4SpringContextTests { + @Resource + FrameDao frameDao; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + ManageFrame manageFrame; + + public JobDetail launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + } + + private FrameStateDisplayOverride createFrameStateDisplayOverride(FrameState state, String text, + int red, int green, int blue) { + FrameStateDisplayOverride override = FrameStateDisplayOverride.newBuilder().setState(state) + .setText(text).setColor(FrameStateDisplayOverride.RGB.newBuilder().setRed(red) + .setGreen(green).setBlue(blue).build()) + .build(); + + return override; + } + + @Test + @Transactional + @Rollback(true) + public void testFrameStateOverride() { + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + Frame jobFrame = Frame.newBuilder().setId(frame.getFrameId()).setName(frame.getName()) + .setState(frame.state).build(); + + // create initial override + FrameStateDisplayOverride override = + createFrameStateDisplayOverride(FrameState.SUCCEEDED, "FINISHED", 200, 200, 123); + FrameStateDisplayOverrideRequest req = FrameStateDisplayOverrideRequest.newBuilder() + .setFrame(jobFrame).setOverride(override).build(); + FakeStreamObserver responseObserver = + new FakeStreamObserver(); + manageFrame.setFrameStateDisplayOverride(req, responseObserver); + FrameStateDisplayOverrideSeq results = + frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); + assertEquals(1, results.getOverridesCount()); + + // try to create same override + manageFrame.setFrameStateDisplayOverride(req, responseObserver); + results = frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); + assertEquals(1, results.getOverridesCount()); + + // try to update override text + FrameStateDisplayOverride overrideUpdated = + createFrameStateDisplayOverride(FrameState.SUCCEEDED, "DONE", 200, 200, 123); + FrameStateDisplayOverrideRequest reqUpdated = FrameStateDisplayOverrideRequest.newBuilder() + .setFrame(jobFrame).setOverride(overrideUpdated).build(); + manageFrame.setFrameStateDisplayOverride(reqUpdated, responseObserver); + results = frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); + assertEquals(1, results.getOverridesCount()); + assertEquals(overrideUpdated, results.getOverridesList().get(0)); + + // add a new override + FrameStateDisplayOverride overrideNew = + createFrameStateDisplayOverride(FrameState.EATEN, "NOMNOM", 120, 50, 123); + FrameStateDisplayOverrideRequest reqNew = FrameStateDisplayOverrideRequest.newBuilder() + .setFrame(jobFrame).setOverride(overrideNew).build(); + manageFrame.setFrameStateDisplayOverride(reqNew, responseObserver); + results = frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); + assertEquals(2, results.getOverridesCount()); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/AdminManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/AdminManagerTests.java index c4538f79f..454eeef81 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/AdminManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/AdminManagerTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.service; import javax.annotation.Resource; @@ -41,148 +37,146 @@ import static org.junit.Assert.fail; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class AdminManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Resource - AdminManager adminManager; - - @Resource - FacilityDao facilityDao; - - @Resource - ShowDao showDao; - - private static final String TEST_ALLOC_NAME = "testAlloc"; - - @Test - @Transactional - @Rollback(true) - public void createAllocation() { - AllocationEntity a = new AllocationEntity(); - a.name = TEST_ALLOC_NAME; - a.tag = "general"; - adminManager.createAllocation(facilityDao.getDefaultFacility(), a); - } - - @Test - @Transactional - @Rollback(true) - public void deleteAllocation() { - AllocationEntity a = new AllocationEntity(); - a.name = facilityDao.getDefaultFacility().getName() + "." + TEST_ALLOC_NAME; - a.tag = "general"; - adminManager.createAllocation(facilityDao.getDefaultFacility(), a); - adminManager.deleteAllocation(a); - } - - @Test - @Transactional - @Rollback(true) - public void setDefaultAllocation() { - AllocationEntity a = adminManager.getDefaultAllocation(); - assertEquals(a.name, facilityDao.getDefaultFacility().getName() + ".unassigned"); - - a = new AllocationEntity(); - a.name = TEST_ALLOC_NAME; - a.tag = "general"; - adminManager.createAllocation(facilityDao.getDefaultFacility(), a); - adminManager.setDefaultAllocation(a); - - a = adminManager.getDefaultAllocation(); - assertEquals(a.name, facilityDao.getDefaultFacility().getName() + "." + TEST_ALLOC_NAME); - } - - @Test - @Transactional - @Rollback(true) - public void createShow() { - ShowEntity show = new ShowEntity(); - show.name = "testtest"; - adminManager.createShow(show); - ShowEntity result = showDao.findShowDetail(show.name); - assertEquals(result.name, show.name); - } - - @Test - @Transactional - @Rollback(true) - public void createInvalidShow() { - ShowEntity show = new ShowEntity(); - show.name = "test/test"; - try { - adminManager.createShow(show); - fail("Expected exception"); - } catch (SpecBuilderException e) { - assertEquals(e.getMessage(), - "The show name: test/test is not in the proper format. " + - "Show names must be alpha numeric, no dashes or punctuation."); - } - } - - @Test - @Transactional - @Rollback(true) - public void getFacility() { - adminManager.getFacility("spi"); - adminManager.getFacility("AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA1"); - } - - @Test - @Transactional - @Rollback(true) - public void createLimit() { - String limitName = "testlimit"; - adminManager.createLimit(limitName, 42); - } - - @Test - @Transactional - @Rollback(true) - public void deleteLimit() { - String limitName = "testlimit"; - adminManager.createLimit(limitName, 42); - LimitInterface limit = adminManager.findLimit(limitName); - adminManager.deleteLimit(limit); - } - - @Test - @Transactional - @Rollback(true) - public void findLimit() { - String limitName = "testlimit"; - adminManager.createLimit(limitName, 42); - adminManager.findLimit(limitName); - } - - @Test - @Transactional - @Rollback(true) - public void getLimit() { - String limitName = "testlimit"; - String limitId = adminManager.createLimit(limitName, 42); - - adminManager.getLimit(limitId); - } - - @Test - @Transactional - @Rollback(true) - public void setLimitName() { - String limitName = "testlimit"; - adminManager.createLimit(limitName, 42); - LimitInterface limit = adminManager.findLimit(limitName); - adminManager.setLimitName(limit, "newLimitName"); - } - - @Test - @Transactional - @Rollback(true) - public void setLimitMaxValue() { - String limitName = "testlimit"; - adminManager.createLimit(limitName, 42); - LimitInterface limit = adminManager.findLimit(limitName); - adminManager.setLimitMaxValue(limit, 16); +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class AdminManagerTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Resource + AdminManager adminManager; + + @Resource + FacilityDao facilityDao; + + @Resource + ShowDao showDao; + + private static final String TEST_ALLOC_NAME = "testAlloc"; + + @Test + @Transactional + @Rollback(true) + public void createAllocation() { + AllocationEntity a = new AllocationEntity(); + a.name = TEST_ALLOC_NAME; + a.tag = "general"; + adminManager.createAllocation(facilityDao.getDefaultFacility(), a); + } + + @Test + @Transactional + @Rollback(true) + public void deleteAllocation() { + AllocationEntity a = new AllocationEntity(); + a.name = facilityDao.getDefaultFacility().getName() + "." + TEST_ALLOC_NAME; + a.tag = "general"; + adminManager.createAllocation(facilityDao.getDefaultFacility(), a); + adminManager.deleteAllocation(a); + } + + @Test + @Transactional + @Rollback(true) + public void setDefaultAllocation() { + AllocationEntity a = adminManager.getDefaultAllocation(); + assertEquals(a.name, facilityDao.getDefaultFacility().getName() + ".unassigned"); + + a = new AllocationEntity(); + a.name = TEST_ALLOC_NAME; + a.tag = "general"; + adminManager.createAllocation(facilityDao.getDefaultFacility(), a); + adminManager.setDefaultAllocation(a); + + a = adminManager.getDefaultAllocation(); + assertEquals(a.name, facilityDao.getDefaultFacility().getName() + "." + TEST_ALLOC_NAME); + } + + @Test + @Transactional + @Rollback(true) + public void createShow() { + ShowEntity show = new ShowEntity(); + show.name = "testtest"; + adminManager.createShow(show); + ShowEntity result = showDao.findShowDetail(show.name); + assertEquals(result.name, show.name); + } + + @Test + @Transactional + @Rollback(true) + public void createInvalidShow() { + ShowEntity show = new ShowEntity(); + show.name = "test/test"; + try { + adminManager.createShow(show); + fail("Expected exception"); + } catch (SpecBuilderException e) { + assertEquals(e.getMessage(), "The show name: test/test is not in the proper format. " + + "Show names must be alpha numeric, no dashes or punctuation."); } + } + + @Test + @Transactional + @Rollback(true) + public void getFacility() { + adminManager.getFacility("spi"); + adminManager.getFacility("AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA1"); + } + + @Test + @Transactional + @Rollback(true) + public void createLimit() { + String limitName = "testlimit"; + adminManager.createLimit(limitName, 42); + } + + @Test + @Transactional + @Rollback(true) + public void deleteLimit() { + String limitName = "testlimit"; + adminManager.createLimit(limitName, 42); + LimitInterface limit = adminManager.findLimit(limitName); + adminManager.deleteLimit(limit); + } + + @Test + @Transactional + @Rollback(true) + public void findLimit() { + String limitName = "testlimit"; + adminManager.createLimit(limitName, 42); + adminManager.findLimit(limitName); + } + + @Test + @Transactional + @Rollback(true) + public void getLimit() { + String limitName = "testlimit"; + String limitId = adminManager.createLimit(limitName, 42); + + adminManager.getLimit(limitId); + } + + @Test + @Transactional + @Rollback(true) + public void setLimitName() { + String limitName = "testlimit"; + adminManager.createLimit(limitName, 42); + LimitInterface limit = adminManager.findLimit(limitName); + adminManager.setLimitName(limit, "newLimitName"); + } + + @Test + @Transactional + @Rollback(true) + public void setLimitMaxValue() { + String limitName = "testlimit"; + adminManager.createLimit(limitName, 42); + LimitInterface limit = adminManager.findLimit(limitName); + adminManager.setLimitMaxValue(limit, 16); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/BookingManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/BookingManagerTests.java index 1e894eb1c..4161ea409 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/BookingManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/BookingManagerTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.service; import java.io.File; @@ -61,341 +57,325 @@ import static org.junit.Assert.fail; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class BookingManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - JobManager jobManager; - - @Resource - HostDao hostDao; - - @Resource - BookingDao bookingDao; - - @Resource - DispatcherDao dispatcherDao; - - @Resource - ProcDao procDao; - - @Resource - BookingManager bookingManager; - - @Resource - Dispatcher localDispatcher; - - @Resource - RqdClient rqdClient; - - @Resource - Whiteboard whiteboard; - - - @Before - public void setTestMode() { - localDispatcher.setTestMode(true); - rqdClient.setTestMode(true); - } - - public DispatchHost createHost() { - - RenderHost host = RenderHost.newBuilder() - .setName("test_host") - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem((int) CueUtil.GB16) - .setTotalSwap((int) CueUtil.GB16) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(100) - .setState(HardwareState.UP) - .setFacility("spi") - .addTags("general") - .setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512) - .build(); - - DispatchHost dh = hostManager.createHost(host); - hostManager.setAllocation(dh, - adminManager.findAllocationDetail("spi", "general")); - - return dh; - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class BookingManagerTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Resource + HostManager hostManager; + + @Resource + AdminManager adminManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + JobManager jobManager; + + @Resource + HostDao hostDao; + + @Resource + BookingDao bookingDao; + + @Resource + DispatcherDao dispatcherDao; + + @Resource + ProcDao procDao; - public JobDetail launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail d = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - jobManager.setJobPaused(d, false); - return d; - } - - public JobDetail launchJob2() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - JobDetail d = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); - jobManager.setJobPaused(d, false); - return d; - } - - @Test - @Transactional - @Rollback(true) - public void createLocalHostAssignment() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - LocalHostAssignment l1 = new LocalHostAssignment(); - l1.setMaxCoreUnits(200); - l1.setMaxMemory(CueUtil.GB4); - l1.setThreads(2); - - bookingManager.createLocalHostAssignment(h, j, l1); - LocalHostAssignment l2 = bookingManager.getLocalHostAssignment(h.getHostId(), - j.getJobId()); - - assertEquals(l1.id, l2.id); - assertEquals(l1.getFrameId(), l2.getFrameId()); - assertEquals(l1.getLayerId(), l2.getLayerId()); - assertEquals(l1.getJobId(), l2.getJobId()); - assertEquals(l1.getIdleCoreUnits(), l2.getIdleCoreUnits()); - assertEquals(l1.getMaxCoreUnits(), l2.getMaxCoreUnits()); - assertEquals(l1.getThreads(), l2.getThreads()); - assertEquals(l1.getIdleMemory(), l2.getIdleMemory()); - assertEquals(l1.getMaxMemory(), l2.getMaxMemory()); - assertFalse(bookingManager.hasActiveLocalFrames(h)); - } + @Resource + BookingManager bookingManager; - @Test - @Transactional - @Rollback(true) - public void hasLocalHostAssignment() { + @Resource + Dispatcher localDispatcher; + + @Resource + RqdClient rqdClient; + + @Resource + Whiteboard whiteboard; + + @Before + public void setTestMode() { + localDispatcher.setTestMode(true); + rqdClient.setTestMode(true); + } + + public DispatchHost createHost() { + + RenderHost host = RenderHost.newBuilder().setName("test_host").setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16).setTotalSwap((int) CueUtil.GB16) + .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(100).setState(HardwareState.UP) + .setFacility("spi").addTags("general").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); + + DispatchHost dh = hostManager.createHost(host); + hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); + + return dh; + } + + public JobDetail launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail d = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + jobManager.setJobPaused(d, false); + return d; + } + + public JobDetail launchJob2() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + JobDetail d = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); + jobManager.setJobPaused(d, false); + return d; + } + + @Test + @Transactional + @Rollback(true) + public void createLocalHostAssignment() { + + DispatchHost h = createHost(); + JobDetail j = launchJob(); + + LocalHostAssignment l1 = new LocalHostAssignment(); + l1.setMaxCoreUnits(200); + l1.setMaxMemory(CueUtil.GB4); + l1.setThreads(2); + + bookingManager.createLocalHostAssignment(h, j, l1); + LocalHostAssignment l2 = bookingManager.getLocalHostAssignment(h.getHostId(), j.getJobId()); + + assertEquals(l1.id, l2.id); + assertEquals(l1.getFrameId(), l2.getFrameId()); + assertEquals(l1.getLayerId(), l2.getLayerId()); + assertEquals(l1.getJobId(), l2.getJobId()); + assertEquals(l1.getIdleCoreUnits(), l2.getIdleCoreUnits()); + assertEquals(l1.getMaxCoreUnits(), l2.getMaxCoreUnits()); + assertEquals(l1.getThreads(), l2.getThreads()); + assertEquals(l1.getIdleMemory(), l2.getIdleMemory()); + assertEquals(l1.getMaxMemory(), l2.getMaxMemory()); + assertFalse(bookingManager.hasActiveLocalFrames(h)); + } + + @Test + @Transactional + @Rollback(true) + public void hasLocalHostAssignment() { + + DispatchHost h = createHost(); + JobDetail j = launchJob(); + + LocalHostAssignment l1 = new LocalHostAssignment(); + l1.setMaxCoreUnits(200); + l1.setMaxMemory(CueUtil.GB4); + l1.setThreads(2); + + assertFalse(bookingManager.hasLocalHostAssignment(h)); + + bookingManager.createLocalHostAssignment(h, j, l1); + assertTrue(bookingManager.hasLocalHostAssignment(h)); + + bookingManager.removeLocalHostAssignment(l1); + assertFalse(bookingManager.hasLocalHostAssignment(h)); + + assertFalse(bookingManager.hasActiveLocalFrames(h)); + } + + @Test + @Transactional + @Rollback(true) + public void hasActiveLocalFrames() { + // See LocalDispatcherTests + } + + @Test + @Transactional + @Rollback(true) + public void createLocalHostAssignmentForJob() { + + DispatchHost h = createHost(); + JobDetail job = launchJob(); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + + bookingManager.createLocalHostAssignment(h, job, lja); + + assertNotNull(lja.getJobId()); + assertEquals(job.getJobId(), lja.getJobId()); + assertEquals(RenderPartitionType.JOB_PARTITION, lja.getType()); + assertFalse(bookingManager.hasActiveLocalFrames(h)); + + whiteboard.getRenderPartition(lja); + } + + @Test + @Transactional + @Rollback(true) + public void createLocalHostAssignmentForLayer() { + + DispatchHost h = createHost(); + JobDetail job = launchJob2(); + LayerInterface layer = jobManager.getLayers(job).get(0); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB8); + lja.setThreads(1); + + bookingManager.createLocalHostAssignment(h, layer, lja); + + assertNotNull(layer.getLayerId()); + assertEquals(layer.getLayerId(), lja.getLayerId()); + assertEquals(RenderPartitionType.LAYER_PARTITION, lja.getType()); + assertFalse(bookingManager.hasActiveLocalFrames(h)); + + whiteboard.getRenderPartition(lja); + } + + @Test + @Transactional + @Rollback(true) + public void createLocalHostAssignmentForFrame() { + + DispatchHost h = createHost(); + JobDetail job = launchJob2(); + LayerInterface layer = jobManager.getLayers(job).get(0); + FrameInterface frame = jobManager.findFrame(layer, 5); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB8); + lja.setThreads(1); + + bookingManager.createLocalHostAssignment(h, frame, lja); + + assertNotNull(frame.getFrameId()); + assertEquals(frame.getFrameId(), lja.getFrameId()); + assertEquals(RenderPartitionType.FRAME_PARTITION, lja.getType()); + assertFalse(bookingManager.hasActiveLocalFrames(h)); - DispatchHost h = createHost(); - JobDetail j = launchJob(); + whiteboard.getRenderPartition(lja); + } - LocalHostAssignment l1 = new LocalHostAssignment(); - l1.setMaxCoreUnits(200); - l1.setMaxMemory(CueUtil.GB4); - l1.setThreads(2); - - assertFalse(bookingManager.hasLocalHostAssignment(h)); - - bookingManager.createLocalHostAssignment(h, j, l1); - assertTrue(bookingManager.hasLocalHostAssignment(h)); - - bookingManager.removeLocalHostAssignment(l1); - assertFalse(bookingManager.hasLocalHostAssignment(h)); - - assertFalse(bookingManager.hasActiveLocalFrames(h)); + @Test + @Transactional + @Rollback(true) + public void deactivateLocalHostAssignment() { + + DispatchHost h = createHost(); + JobDetail j = launchJob(); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + + bookingManager.createLocalHostAssignment(h, j, lja); + bookingManager.deactivateLocalHostAssignment(lja); + } + + @Test + @Transactional + @Rollback(true) + public void setMaxResources() { + + DispatchHost h = createHost(); + JobDetail j = launchJob(); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + + bookingManager.createLocalHostAssignment(h, j, lja); + + /* + * Lower the cores. + */ + bookingManager.setMaxResources(lja, 100, CueUtil.GB2, 1, CueUtil.MB256); + + LocalHostAssignment l2 = bookingManager.getLocalHostAssignment(lja.id); + + assertEquals(100, l2.getMaxCoreUnits()); + assertEquals(CueUtil.GB2, l2.getMaxMemory()); + assertEquals(CueUtil.MB256, l2.getMaxGpuMemory()); + + /* + * Raise the values. + */ + bookingManager.setMaxResources(lja, 200, CueUtil.GB4, 1, CueUtil.MB512); + + l2 = bookingManager.getLocalHostAssignment(lja.id); + assertEquals(200, l2.getMaxCoreUnits()); + assertEquals(CueUtil.GB4, l2.getMaxMemory()); + assertEquals(CueUtil.MB512, l2.getMaxGpuMemory()); + } + + @Test + @Transactional + @Rollback(true) + public void setIllegalMaxResources() { + + DispatchHost h = createHost(); + JobDetail j = launchJob(); + + assertEquals(200, h.idleCores); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setMaxGpuMemory(CueUtil.MB512); + lja.setThreads(2); + + bookingManager.createLocalHostAssignment(h, j, lja); + + /* + * Raise the cores too high + */ + bookingManager.setMaxResources(lja, 800, CueUtil.GB2, 0, 0); + } + + @Test + @Transactional + @Rollback(true) + public void removeLocalHostAssignment() { + + DispatchHost h = createHost(); + JobDetail j = launchJob(); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + + bookingManager.createLocalHostAssignment(h, j, lja); + assertFalse(bookingManager.hasActiveLocalFrames(h)); + + /* + * Now remove the local host assignment. + */ + bookingManager.removeLocalHostAssignment(lja); + + /* + * Ensure its gone. + */ + try { + hostDao.getHost(lja); + fail("Local host is still present but should be gone"); + } catch (EmptyResultDataAccessException e) { } - @Test - @Transactional - @Rollback(true) - public void hasActiveLocalFrames() { - // See LocalDispatcherTests - } - - @Test - @Transactional - @Rollback(true) - public void createLocalHostAssignmentForJob() { - - DispatchHost h = createHost(); - JobDetail job = launchJob(); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - - bookingManager.createLocalHostAssignment(h, job, lja); - - assertNotNull(lja.getJobId()); - assertEquals(job.getJobId(), lja.getJobId()); - assertEquals(RenderPartitionType.JOB_PARTITION, lja.getType()); - assertFalse(bookingManager.hasActiveLocalFrames(h)); - - whiteboard.getRenderPartition(lja); - } - - @Test - @Transactional - @Rollback(true) - public void createLocalHostAssignmentForLayer() { - - DispatchHost h = createHost(); - JobDetail job = launchJob2(); - LayerInterface layer = jobManager.getLayers(job).get(0); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB8); - lja.setThreads(1); - - bookingManager.createLocalHostAssignment(h, layer, lja); - - assertNotNull(layer.getLayerId()); - assertEquals(layer.getLayerId(), lja.getLayerId()); - assertEquals(RenderPartitionType.LAYER_PARTITION, lja.getType()); - assertFalse(bookingManager.hasActiveLocalFrames(h)); - - whiteboard.getRenderPartition(lja); - } - - @Test - @Transactional - @Rollback(true) - public void createLocalHostAssignmentForFrame() { - - DispatchHost h = createHost(); - JobDetail job = launchJob2(); - LayerInterface layer = jobManager.getLayers(job).get(0); - FrameInterface frame = jobManager.findFrame(layer, 5); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB8); - lja.setThreads(1); - - bookingManager.createLocalHostAssignment(h, frame, lja); - - assertNotNull(frame.getFrameId()); - assertEquals(frame.getFrameId(), lja.getFrameId()); - assertEquals(RenderPartitionType.FRAME_PARTITION, lja.getType()); - assertFalse(bookingManager.hasActiveLocalFrames(h)); - - whiteboard.getRenderPartition(lja); - } - - @Test - @Transactional - @Rollback(true) - public void deactivateLocalHostAssignment() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - - bookingManager.createLocalHostAssignment(h, j, lja); - bookingManager.deactivateLocalHostAssignment(lja); - } - - @Test - @Transactional - @Rollback(true) - public void setMaxResources() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - - bookingManager.createLocalHostAssignment(h, j, lja); - - /* - * Lower the cores. - */ - bookingManager.setMaxResources(lja, 100, CueUtil.GB2, 1, CueUtil.MB256); - - LocalHostAssignment l2 = bookingManager.getLocalHostAssignment(lja.id); - - assertEquals(100, l2.getMaxCoreUnits()); - assertEquals(CueUtil.GB2, l2.getMaxMemory()); - assertEquals(CueUtil.MB256, l2.getMaxGpuMemory()); - - /* - * Raise the values. - */ - bookingManager.setMaxResources(lja, 200, CueUtil.GB4, 1, CueUtil.MB512); - - l2 = bookingManager.getLocalHostAssignment(lja.id); - assertEquals(200, l2.getMaxCoreUnits()); - assertEquals(CueUtil.GB4, l2.getMaxMemory()); - assertEquals(CueUtil.MB512, l2.getMaxGpuMemory()); - } - - @Test - @Transactional - @Rollback(true) - public void setIllegalMaxResources() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - assertEquals(200, h.idleCores); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setMaxGpuMemory(CueUtil.MB512); - lja.setThreads(2); - - bookingManager.createLocalHostAssignment(h, j, lja); - - /* - * Raise the cores too high - */ - bookingManager.setMaxResources(lja, 800, CueUtil.GB2, 0, 0); - } - - @Test - @Transactional - @Rollback(true) - public void removeLocalHostAssignment() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - - bookingManager.createLocalHostAssignment(h, j, lja); - assertFalse(bookingManager.hasActiveLocalFrames(h)); - - /* - * Now remove the local host assignment. - */ - bookingManager.removeLocalHostAssignment(lja); - - /* - * Ensure its gone. - */ - try { - hostDao.getHost(lja); - fail("Local host is still present but should be gone"); - } catch (EmptyResultDataAccessException e) {} - - /* - * Ensure the cores are back on the host. - */ - assertEquals(200, hostDao.getDispatchHost(h.getId()).idleCores); - } + /* + * Ensure the cores are back on the host. + */ + assertEquals(200, hostDao.getDispatchHost(h.getId()).idleCores); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/CommentManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/CommentManagerTests.java index 3a49239d2..46a667763 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/CommentManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/CommentManagerTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.service; import java.io.File; @@ -36,44 +32,41 @@ import com.imageworks.spcue.service.JobLauncher; import com.imageworks.spcue.service.JobManager; - @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class CommentManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - JobManager jobManager; + @Resource + JobManager jobManager; - @Resource - CommentManager commentManager; + @Resource + CommentManager commentManager; - public JobDetail launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail d = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - jobManager.setJobPaused(d, false); - return d; - } + public JobDetail launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail d = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + jobManager.setJobPaused(d, false); + return d; + } - @Test - @Transactional - @Rollback(true) - public void testJobComment() { + @Test + @Transactional + @Rollback(true) + public void testJobComment() { - JobDetail j = launchJob(); + JobDetail j = launchJob(); - CommentDetail c = new CommentDetail(); - c.message = "A test comment"; - c.subject = "A test subject"; - c.user = "Mr. Bigglesworth"; - c.timestamp = new java.sql.Timestamp(System.currentTimeMillis()); + CommentDetail c = new CommentDetail(); + c.message = "A test comment"; + c.subject = "A test subject"; + c.user = "Mr. Bigglesworth"; + c.timestamp = new java.sql.Timestamp(System.currentTimeMillis()); - commentManager.addComment(j, c); + commentManager.addComment(j, c); - - } + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/DepartmentManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/DepartmentManagerTests.java index 4dd2bd13f..4f3b70ec0 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/DepartmentManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/DepartmentManagerTests.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.test.service; import javax.annotation.Resource; @@ -41,62 +38,59 @@ import static org.junit.Assert.assertTrue; - @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class DepartmentManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Resource - DepartmentManager departmentManager; - - @Resource - ShowDao showDao; - - @Resource - DepartmentDao departmentDao; - - @Resource - AdminManager adminManager; - - @Resource - PointDao pointDao; - - private static final String TEST_TI_TASK_NAME = "RINT"; - - @Test - @Transactional - @Rollback(true) - public void enableTiManaged() { - ShowInterface show = showDao.findShowDetail("pipe"); - DepartmentInterface dept = departmentDao.getDefaultDepartment(); - PointInterface rp = pointDao.getPointConfigDetail(show, dept); - - departmentManager.disableTiManaged(rp); - departmentManager.enableTiManaged(rp, TEST_TI_TASK_NAME, 1000); +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class DepartmentManagerTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Resource + DepartmentManager departmentManager; + + @Resource + ShowDao showDao; + + @Resource + DepartmentDao departmentDao; + + @Resource + AdminManager adminManager; + + @Resource + PointDao pointDao; + + private static final String TEST_TI_TASK_NAME = "RINT"; + + @Test + @Transactional + @Rollback(true) + public void enableTiManaged() { + ShowInterface show = showDao.findShowDetail("pipe"); + DepartmentInterface dept = departmentDao.getDefaultDepartment(); + PointInterface rp = pointDao.getPointConfigDetail(show, dept); + + departmentManager.disableTiManaged(rp); + departmentManager.enableTiManaged(rp, TEST_TI_TASK_NAME, 1000); + } + + @Test + @Transactional + @Rollback(true) + public void updateTiManagedTasks() { + ShowInterface show = showDao.findShowDetail("pipe"); + DepartmentInterface dept = departmentDao.getDefaultDepartment(); + PointInterface rp; + + try { + rp = pointDao.getPointConfigDetail(show, dept); + } catch (org.springframework.dao.DataRetrievalFailureException e) { + pointDao.insertPointConf(show, dept); + rp = pointDao.getPointConfigDetail(show, dept); } + departmentManager.disableTiManaged(rp); + departmentManager.enableTiManaged(rp, TEST_TI_TASK_NAME, 1000); - @Test - @Transactional - @Rollback(true) - public void updateTiManagedTasks() { - ShowInterface show = showDao.findShowDetail("pipe"); - DepartmentInterface dept = departmentDao.getDefaultDepartment(); - PointInterface rp; - - try { - rp = pointDao.getPointConfigDetail(show, dept); - } - catch (org.springframework.dao.DataRetrievalFailureException e) { - pointDao.insertPointConf(show, dept); - rp = pointDao.getPointConfigDetail(show,dept); - } - departmentManager.disableTiManaged(rp); - departmentManager.enableTiManaged(rp, TEST_TI_TASK_NAME, 1000); + departmentManager.updateManagedTasks(rp); - departmentManager.updateManagedTasks(rp); + departmentManager.disableTiManaged(rp); - departmentManager.disableTiManaged(rp); - - } + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/DependManagerChunkingTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/DependManagerChunkingTests.java index 0ff7c2ac1..595895b92 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/DependManagerChunkingTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/DependManagerChunkingTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.service; import java.io.File; @@ -54,193 +50,164 @@ @ContextConfiguration public class DependManagerChunkingTests extends TransactionalTest { - @Resource - DependDao dependDao; - - @Resource - DependManager dependManager; - - @Resource - FrameDao frameDao; - - @Resource - LayerDao layerDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - FrameSearchFactory frameSearchFactory; - - @Before - public void launchTestJobs() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/chunk_depend.xml")); + @Resource + DependDao dependDao; + + @Resource + DependManager dependManager; + + @Resource + FrameDao frameDao; + + @Resource + LayerDao layerDao; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + FrameSearchFactory frameSearchFactory; + + @Before + public void launchTestJobs() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/chunk_depend.xml")); + } + + private JobDetail getJob() { + return jobManager.findJobDetail("pipe-dev.cue-testuser_chunked_depend"); + } + + private int getTotalDependSum(LayerInterface layer) { + return frameDao.findFrameDetails(frameSearchFactory.create(layer)).stream() + .mapToInt(frame -> frame.dependCount).sum(); + } + + private boolean hasDependFrames(LayerInterface layer) { + FrameSearchInterface search = frameSearchFactory.create(layer); + search.filterByFrameStates(ImmutableList.of(FrameState.DEPEND)); + return frameDao.findFrames(search).size() > 0; + } + + private int getDependRecordCount(LayerInterface l) { + List activeDeps = + dependDao.getWhatThisDependsOn(l, DependTarget.ANY_TARGET); + int numChildDeps = + activeDeps.stream().mapToInt(dep -> dependDao.getChildDepends(dep).size()).sum(); + return numChildDeps + activeDeps.size(); + } + + /** + * Test a non-chunked layer depending on a large chunked layer. <1> <1> <2> <3> <4> <5> + */ + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyNonChunkOnLargeChunk() { + + JobDetail job = getJob(); + LayerInterface layer_a = layerDao.findLayer(job, "no_chunk"); + LayerInterface layer_b = layerDao.findLayer(job, "large_chunk"); + + FrameByFrame depend = new FrameByFrame(layer_a, layer_b); + dependManager.createDepend(depend); + + assertTrue(hasDependFrames(layer_a)); + assertEquals(100, getTotalDependSum(layer_a)); + // Optimized to LayerOnLayer + assertEquals(1, getDependRecordCount(layer_a)); + + for (LightweightDependency lwd : dependDao.getWhatDependsOn(layer_b)) { + assertEquals(DependType.LAYER_ON_LAYER, lwd.type); + dependManager.satisfyDepend(lwd); } - private JobDetail getJob() { - return jobManager.findJobDetail("pipe-dev.cue-testuser_chunked_depend"); + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependSum(layer_a)); + } + + /** + * Test a large chunked layer depending on a non-chunked layer. <1> <1> <2> <3> <4> <5> + */ + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyLargeChunkOnNonChunk() { + + JobDetail job = getJob(); + LayerInterface layer_a = layerDao.findLayer(job, "large_chunk"); + LayerInterface layer_b = layerDao.findLayer(job, "no_chunk"); + + FrameByFrame depend = new FrameByFrame(layer_a, layer_b); + dependManager.createDepend(depend); + + assertTrue(hasDependFrames(layer_a)); + // Optimized to LayerOnLayer + assertEquals(1, getTotalDependSum(layer_a)); + assertEquals(1, getDependRecordCount(layer_a)); + + for (LightweightDependency lwd : dependDao.getWhatDependsOn(layer_b)) { + assertEquals(DependType.LAYER_ON_LAYER, lwd.type); + dependManager.satisfyDepend(lwd); } - private int getTotalDependSum(LayerInterface layer) { - return frameDao.findFrameDetails(frameSearchFactory.create(layer)) - .stream() - .mapToInt(frame -> frame.dependCount) - .sum(); - } - - private boolean hasDependFrames(LayerInterface layer) { - FrameSearchInterface search = frameSearchFactory.create(layer); - search.filterByFrameStates(ImmutableList.of(FrameState.DEPEND)); - return frameDao.findFrames(search).size() > 0; - } - - private int getDependRecordCount(LayerInterface l) { - List activeDeps = dependDao.getWhatThisDependsOn( - l, DependTarget.ANY_TARGET); - int numChildDeps = activeDeps.stream().mapToInt( - dep -> dependDao.getChildDepends(dep).size()).sum(); - return numChildDeps + activeDeps.size(); - } - - /** - * Test a non-chunked layer depending on a large chunked layer. - * <1> <1> - * <2> - * <3> - * <4> - * <5> - */ - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyNonChunkOnLargeChunk() { - - JobDetail job = getJob(); - LayerInterface layer_a = layerDao.findLayer(job, "no_chunk"); - LayerInterface layer_b = layerDao.findLayer(job, "large_chunk"); - - FrameByFrame depend = new FrameByFrame(layer_a, layer_b); - dependManager.createDepend(depend); - - assertTrue(hasDependFrames(layer_a)); - assertEquals(100, getTotalDependSum(layer_a)); - // Optimized to LayerOnLayer - assertEquals(1, getDependRecordCount(layer_a)); - - for (LightweightDependency lwd: dependDao.getWhatDependsOn(layer_b)) { - assertEquals(DependType.LAYER_ON_LAYER, lwd.type); - dependManager.satisfyDepend(lwd); - } - - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependSum(layer_a)); - } - - /** - * Test a large chunked layer depending on a non-chunked layer. - * <1> <1> - * <2> - * <3> - * <4> - * <5> - */ - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyLargeChunkOnNonChunk() { - - JobDetail job = getJob(); - LayerInterface layer_a = layerDao.findLayer(job, "large_chunk"); - LayerInterface layer_b = layerDao.findLayer(job, "no_chunk"); - - FrameByFrame depend = new FrameByFrame(layer_a, layer_b); - dependManager.createDepend(depend); - - assertTrue(hasDependFrames(layer_a)); - // Optimized to LayerOnLayer - assertEquals(1, getTotalDependSum(layer_a)); - assertEquals(1, getDependRecordCount(layer_a)); - - for (LightweightDependency lwd: dependDao.getWhatDependsOn(layer_b)) { - assertEquals(DependType.LAYER_ON_LAYER, lwd.type); - dependManager.satisfyDepend(lwd); - } - - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependSum(layer_a)); - } - - /** - * Test a small chunk depending on a non chunk - * <1> <1> - * <2> - * <3> - * <4> <4> - * <5> - * <6> - * <7> - */ - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfySmallChunkOnNonChunk() { - - JobDetail job = getJob(); - LayerInterface layer_a = layerDao.findLayer(job, "small_chunk"); - LayerInterface layer_b = layerDao.findLayer(job, "no_chunk"); - - FrameByFrame depend = new FrameByFrame(layer_a, layer_b); - dependManager.createDepend(depend); - - assertTrue(hasDependFrames(layer_a)); - assertEquals(100, getTotalDependSum(layer_a)); - assertEquals(101, getDependRecordCount(layer_a)); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - dependManager.satisfyDepend(lwd); - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependSum(layer_a)); - } - - /** - * Test a non chunk depending on a small chunk - * - * <1> <1> - * <2> - * <3> - * <4> - * <5> <5> - * <6> - * <7> - * <8> - * <9> - * <10> - */ - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyNonChunkOnSmallChunk() { - - JobDetail job = getJob(); - LayerInterface layer_a = layerDao.findLayer(job, "no_chunk"); - LayerInterface layer_b = layerDao.findLayer(job, "small_chunk"); - - FrameByFrame depend = new FrameByFrame(layer_a, layer_b); - dependManager.createDepend(depend); - - assertEquals(101, getDependRecordCount(layer_a)); - assertTrue(hasDependFrames(layer_a)); - assertEquals(100, getTotalDependSum(layer_a)); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - dependManager.satisfyDepend(lwd); - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependSum(layer_a)); - } + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependSum(layer_a)); + } + + /** + * Test a small chunk depending on a non chunk <1> <1> <2> <3> <4> <4> <5> <6> <7> + */ + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfySmallChunkOnNonChunk() { + + JobDetail job = getJob(); + LayerInterface layer_a = layerDao.findLayer(job, "small_chunk"); + LayerInterface layer_b = layerDao.findLayer(job, "no_chunk"); + + FrameByFrame depend = new FrameByFrame(layer_a, layer_b); + dependManager.createDepend(depend); + + assertTrue(hasDependFrames(layer_a)); + assertEquals(100, getTotalDependSum(layer_a)); + assertEquals(101, getDependRecordCount(layer_a)); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + dependManager.satisfyDepend(lwd); + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependSum(layer_a)); + } + + /** + * Test a non chunk depending on a small chunk + * + * <1> <1> <2> <3> <4> <5> <5> <6> <7> <8> <9> <10> + */ + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyNonChunkOnSmallChunk() { + + JobDetail job = getJob(); + LayerInterface layer_a = layerDao.findLayer(job, "no_chunk"); + LayerInterface layer_b = layerDao.findLayer(job, "small_chunk"); + + FrameByFrame depend = new FrameByFrame(layer_a, layer_b); + dependManager.createDepend(depend); + + assertEquals(101, getDependRecordCount(layer_a)); + assertTrue(hasDependFrames(layer_a)); + assertEquals(100, getTotalDependSum(layer_a)); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + dependManager.satisfyDepend(lwd); + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependSum(layer_a)); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/DependManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/DependManagerTests.java index 956932e9b..1b7fd5c54 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/DependManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/DependManagerTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.service; import java.io.File; @@ -65,520 +61,500 @@ @ContextConfiguration public class DependManagerTests extends TransactionalTest { - @Resource - DependDao dependDao; - - @Resource - DependManager dependManager; - - @Resource - FrameDao frameDao; - - @Resource - LayerDao layerDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - FrameSearchFactory frameSearchFactory; - - @Before - public void launchTestJobs() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_depend_test.xml")); - } - - public JobDetail getJobA() { - return jobManager.findJobDetail("pipe-dev.cue-testuser_depend_test_a"); - } - - public JobDetail getJobB() { - return jobManager.findJobDetail("pipe-dev.cue-testuser_depend_test_b"); - } - - private int getTotalDependCount(JobInterface job) { - return frameDao.findFrameDetails(frameSearchFactory.create(job)) - .stream() - .mapToInt(frame -> frame.dependCount) - .sum(); + @Resource + DependDao dependDao; + + @Resource + DependManager dependManager; + + @Resource + FrameDao frameDao; + + @Resource + LayerDao layerDao; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + FrameSearchFactory frameSearchFactory; + + @Before + public void launchTestJobs() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_depend_test.xml")); + } + + public JobDetail getJobA() { + return jobManager.findJobDetail("pipe-dev.cue-testuser_depend_test_a"); + } + + public JobDetail getJobB() { + return jobManager.findJobDetail("pipe-dev.cue-testuser_depend_test_b"); + } + + private int getTotalDependCount(JobInterface job) { + return frameDao.findFrameDetails(frameSearchFactory.create(job)).stream() + .mapToInt(frame -> frame.dependCount).sum(); + } + + private boolean hasDependFrames(JobInterface job) { + FrameSearchInterface search = frameSearchFactory.create(job); + search.filterByFrameStates(ImmutableList.of(FrameState.DEPEND)); + return frameDao.findFrames(search).size() > 0; + } + + private int getTotalDependCount(LayerInterface layer) { + return frameDao.findFrameDetails(frameSearchFactory.create(layer)).stream() + .mapToInt(frame -> frame.dependCount).sum(); + } + + private boolean hasDependFrames(LayerInterface layer) { + FrameSearchInterface search = frameSearchFactory.create(layer); + search.filterByFrameStates(ImmutableList.of(FrameState.DEPEND)); + return frameDao.findFrames(search).size() > 0; + } + + private int getTotalDependCount(FrameInterface frame) { + return frameDao.findFrameDetails(frameSearchFactory.create(frame)).stream() + .mapToInt(frameDetail -> frameDetail.dependCount).sum(); + } + + private boolean hasDependFrames(FrameInterface frame) { + FrameSearchInterface search = frameSearchFactory.create(frame); + search.filterByFrameStates(ImmutableList.of(FrameState.DEPEND)); + return frameDao.findFrames(search).size() > 0; + } + + @Test + @Transactional + @Rollback(true) + public void testUnsatisfyFrameOnFrame() { + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + FrameInterface frame_a = frameDao.findFrame(layer_a, 1); + FrameInterface frame_b = frameDao.findFrame(layer_b, 1); + + FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); + dependManager.createDepend(depend); + + // Check to ensure depend was setup properly. + assertTrue(hasDependFrames(layer_a)); + assertEquals(1, getTotalDependCount(layer_a)); + assertTrue(hasDependFrames(frame_a)); + assertEquals(1, getTotalDependCount(frame_a)); + + LightweightDependency lwd = dependManager.getDepend(depend.getId()); + dependManager.satisfyDepend(lwd); + + // Check to ensure it was satisfied properly. + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); + assertFalse(hasDependFrames(frame_a)); + assertEquals(0, getTotalDependCount(frame_a)); + + // Now unsatisfy it. + dependManager.unsatisfyDepend(lwd); + + assertTrue(hasDependFrames(layer_a)); + assertEquals(1, getTotalDependCount(layer_a)); + assertTrue(hasDependFrames(frame_a)); + assertEquals(1, getTotalDependCount(frame_a)); + } + + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyJobOnJob() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + + JobOnJob depend = new JobOnJob(job_a, job_b); + dependManager.createDepend(depend); + + assertTrue(hasDependFrames(job_a)); + assertFalse(hasDependFrames(job_b)); + assertEquals(20, getTotalDependCount(job_a)); + assertEquals(0, getTotalDependCount(job_b)); + + for (LightweightDependency lwd : dependDao.getWhatDependsOn(job_b)) { + dependManager.satisfyDepend(lwd); } - private boolean hasDependFrames(JobInterface job) { - FrameSearchInterface search = frameSearchFactory.create(job); - search.filterByFrameStates(ImmutableList.of(FrameState.DEPEND)); - return frameDao.findFrames(search).size() > 0; - } + assertFalse(hasDependFrames(job_a)); + assertFalse(hasDependFrames(job_b)); + assertEquals(0, getTotalDependCount(job_a)); + assertEquals(0, getTotalDependCount(job_b)); + } - private int getTotalDependCount(LayerInterface layer) { - return frameDao.findFrameDetails(frameSearchFactory.create(layer)) - .stream() - .mapToInt(frame -> frame.dependCount) - .sum(); - } + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyJobOnLayer() { - private boolean hasDependFrames(LayerInterface layer) { - FrameSearchInterface search = frameSearchFactory.create(layer); - search.filterByFrameStates(ImmutableList.of(FrameState.DEPEND)); - return frameDao.findFrames(search).size() > 0; - } + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - private int getTotalDependCount(FrameInterface frame) { - return frameDao.findFrameDetails(frameSearchFactory.create(frame)) - .stream() - .mapToInt(frameDetail -> frameDetail.dependCount) - .sum(); - } + JobOnLayer depend = new JobOnLayer(job_a, layer_b); + dependManager.createDepend(depend); - private boolean hasDependFrames(FrameInterface frame) { - FrameSearchInterface search = frameSearchFactory.create(frame); - search.filterByFrameStates(ImmutableList.of(FrameState.DEPEND)); - return frameDao.findFrames(search).size() > 0; - } + assertTrue(hasDependFrames(job_a)); + assertFalse(hasDependFrames(job_b)); + assertEquals(20, getTotalDependCount(job_a)); + assertEquals(0, getTotalDependCount(job_b)); - @Test - @Transactional - @Rollback(true) - public void testUnsatisfyFrameOnFrame() { - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - FrameInterface frame_a = frameDao.findFrame(layer_a, 1); - FrameInterface frame_b = frameDao.findFrame(layer_b, 1); - - FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); - dependManager.createDepend(depend); - - // Check to ensure depend was setup properly. - assertTrue(hasDependFrames(layer_a)); - assertEquals(1, getTotalDependCount(layer_a)); - assertTrue(hasDependFrames(frame_a)); - assertEquals(1, getTotalDependCount(frame_a)); - - LightweightDependency lwd = dependManager.getDepend(depend.getId()); - dependManager.satisfyDepend(lwd); - - // Check to ensure it was satisfied properly. - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); - assertFalse(hasDependFrames(frame_a)); - assertEquals(0, getTotalDependCount(frame_a)); - - // Now unsatisfy it. - dependManager.unsatisfyDepend(lwd); - - assertTrue(hasDependFrames(layer_a)); - assertEquals(1, getTotalDependCount(layer_a)); - assertTrue(hasDependFrames(frame_a)); - assertEquals(1, getTotalDependCount(frame_a)); + for (LightweightDependency lwd : dependDao.getWhatDependsOn(layer_b)) { + dependManager.satisfyDepend(lwd); } + assertFalse(hasDependFrames(job_a)); + assertFalse(hasDependFrames(job_b)); + assertEquals(0, getTotalDependCount(job_a)); + assertEquals(0, getTotalDependCount(job_b)); + } - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyJobOnJob() { + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyJobOnFrame() { - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + FrameInterface frame_b = frameDao.findFrame(layer_b, 1); - JobOnJob depend = new JobOnJob(job_a, job_b); - dependManager.createDepend(depend); + JobOnFrame depend = new JobOnFrame(job_a, frame_b); + dependManager.createDepend(depend); - assertTrue(hasDependFrames(job_a)); - assertFalse(hasDependFrames(job_b)); - assertEquals(20, getTotalDependCount(job_a)); - assertEquals(0, getTotalDependCount(job_b)); + assertTrue(hasDependFrames(job_a)); + assertFalse(hasDependFrames(job_b)); + assertEquals(20, getTotalDependCount(job_a)); + assertEquals(0, getTotalDependCount(job_b)); - for (LightweightDependency lwd: dependDao.getWhatDependsOn(job_b)) { - dependManager.satisfyDepend(lwd); - } - - assertFalse(hasDependFrames(job_a)); - assertFalse(hasDependFrames(job_b)); - assertEquals(0, getTotalDependCount(job_a)); - assertEquals(0, getTotalDependCount(job_b)); + for (LightweightDependency lwd : dependDao.getWhatDependsOn(frame_b)) { + dependManager.satisfyDepend(lwd); } - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyJobOnLayer() { + assertFalse(hasDependFrames(job_a)); + assertFalse(hasDependFrames(job_b)); + assertEquals(0, getTotalDependCount(job_a)); + assertEquals(0, getTotalDependCount(job_b)); + } - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyLayerOnJob() { - JobOnLayer depend = new JobOnLayer(job_a, layer_b); - dependManager.createDepend(depend); + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - assertTrue(hasDependFrames(job_a)); - assertFalse(hasDependFrames(job_b)); - assertEquals(20, getTotalDependCount(job_a)); - assertEquals(0, getTotalDependCount(job_b)); + LayerOnJob depend = new LayerOnJob(layer_a, job_b); + dependManager.createDepend(depend); - for (LightweightDependency lwd: dependDao.getWhatDependsOn(layer_b)) { - dependManager.satisfyDepend(lwd); - } + assertTrue(hasDependFrames(layer_a)); + assertEquals(10, getTotalDependCount(layer_a)); - assertFalse(hasDependFrames(job_a)); - assertFalse(hasDependFrames(job_b)); - assertEquals(0, getTotalDependCount(job_a)); - assertEquals(0, getTotalDependCount(job_b)); + for (LightweightDependency lwd : dependDao.getWhatDependsOn(job_b)) { + dependManager.satisfyDepend(lwd); } - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyJobOnFrame() { + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); + } - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - FrameInterface frame_b = frameDao.findFrame(layer_b, 1); + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyLayerOnLayer() { - JobOnFrame depend = new JobOnFrame(job_a, frame_b); - dependManager.createDepend(depend); + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - assertTrue(hasDependFrames(job_a)); - assertFalse(hasDependFrames(job_b)); - assertEquals(20, getTotalDependCount(job_a)); - assertEquals(0, getTotalDependCount(job_b)); + LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); + dependManager.createDepend(depend); - for (LightweightDependency lwd: dependDao.getWhatDependsOn(frame_b)) { - dependManager.satisfyDepend(lwd); - } + assertTrue(hasDependFrames(layer_a)); + assertEquals(10, getTotalDependCount(layer_a)); - assertFalse(hasDependFrames(job_a)); - assertFalse(hasDependFrames(job_b)); - assertEquals(0, getTotalDependCount(job_a)); - assertEquals(0, getTotalDependCount(job_b)); + for (LightweightDependency lwd : dependDao.getWhatDependsOn(layer_b)) { + dependManager.satisfyDepend(lwd); } - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyLayerOnJob() { + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); + } - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyLayerOnFrame() { - LayerOnJob depend = new LayerOnJob(layer_a, job_b); - dependManager.createDepend(depend); + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + FrameInterface frame_b = frameDao.findFrame(layer_b, 1); - assertTrue(hasDependFrames(layer_a)); - assertEquals(10, getTotalDependCount(layer_a)); + LayerOnFrame depend = new LayerOnFrame(layer_a, frame_b); + dependManager.createDepend(depend); - for (LightweightDependency lwd: dependDao.getWhatDependsOn(job_b)) { - dependManager.satisfyDepend(lwd); - } + assertTrue(hasDependFrames(layer_a)); + assertEquals(10, getTotalDependCount(layer_a)); - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); + for (LightweightDependency lwd : dependDao.getWhatDependsOn(frame_b)) { + dependManager.satisfyDepend(lwd); } - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyLayerOnLayer() { + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); + } - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyLayerOnSimFrame() { - LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); - dependManager.createDepend(depend); + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + FrameInterface frame_b = frameDao.findFrame(layer_b, 1); - assertTrue(hasDependFrames(layer_a)); - assertEquals(10, getTotalDependCount(layer_a)); + LayerOnSimFrame depend = new LayerOnSimFrame(layer_a, frame_b); + dependManager.createDepend(depend); - for (LightweightDependency lwd: dependDao.getWhatDependsOn(layer_b)) { - dependManager.satisfyDepend(lwd); - } + assertTrue(hasDependFrames(layer_a)); + assertEquals(10, getTotalDependCount(layer_a)); - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); + for (LightweightDependency lwd : dependDao.getWhatDependsOn(frame_b)) { + dependManager.satisfyDepend(lwd); } - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyLayerOnFrame() { + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); + } - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - FrameInterface frame_b = frameDao.findFrame(layer_b, 1); + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyFrameOnJob() { - LayerOnFrame depend = new LayerOnFrame(layer_a, frame_b); - dependManager.createDepend(depend); + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + FrameInterface frame_a = frameDao.findFrame(layer_a, 1); - assertTrue(hasDependFrames(layer_a)); - assertEquals(10, getTotalDependCount(layer_a)); + FrameOnJob depend = new FrameOnJob(frame_a, job_b); + dependManager.createDepend(depend); - for (LightweightDependency lwd: dependDao.getWhatDependsOn(frame_b)) { - dependManager.satisfyDepend(lwd); - } + assertTrue(hasDependFrames(layer_a)); + assertEquals(1, getTotalDependCount(layer_a)); + assertTrue(hasDependFrames(frame_a)); + assertEquals(1, getTotalDependCount(frame_a)); - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); + for (LightweightDependency lwd : dependDao.getWhatDependsOn(job_b)) { + dependManager.satisfyDepend(lwd); } - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyLayerOnSimFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - FrameInterface frame_b = frameDao.findFrame(layer_b, 1); - - LayerOnSimFrame depend = new LayerOnSimFrame(layer_a, frame_b); - dependManager.createDepend(depend); - - assertTrue(hasDependFrames(layer_a)); - assertEquals(10, getTotalDependCount(layer_a)); - - for (LightweightDependency lwd: dependDao.getWhatDependsOn(frame_b)) { - dependManager.satisfyDepend(lwd); - } + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); + assertFalse(hasDependFrames(frame_a)); + assertEquals(0, getTotalDependCount(frame_a)); + } + + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyFrameOnLayer() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + FrameInterface frame_a = frameDao.findFrame(layer_a, 1); + + FrameOnLayer depend = new FrameOnLayer(frame_a, layer_b); + dependManager.createDepend(depend); + + assertTrue(hasDependFrames(layer_a)); + assertEquals(1, getTotalDependCount(layer_a)); + assertTrue(hasDependFrames(frame_a)); + assertEquals(1, getTotalDependCount(frame_a)); + + for (LightweightDependency lwd : dependDao.getWhatDependsOn(layer_b)) { + dependManager.satisfyDepend(lwd); + } - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); + assertFalse(hasDependFrames(frame_a)); + assertEquals(0, getTotalDependCount(frame_a)); + } + + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyFrameOnFrame() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + FrameInterface frame_a = frameDao.findFrame(layer_a, 1); + FrameInterface frame_b = frameDao.findFrame(layer_b, 1); + + FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); + dependManager.createDepend(depend); + + assertTrue(hasDependFrames(layer_a)); + assertEquals(1, getTotalDependCount(layer_a)); + assertTrue(hasDependFrames(frame_a)); + assertEquals(1, getTotalDependCount(frame_a)); + + for (LightweightDependency lwd : dependDao.getWhatDependsOn(frame_b)) { + dependManager.satisfyDepend(lwd); } - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyFrameOnJob() { + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); + assertFalse(hasDependFrames(frame_a)); + assertEquals(0, getTotalDependCount(frame_a)); + } - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - FrameInterface frame_a = frameDao.findFrame(layer_a, 1); + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyFrameByFrame() { - FrameOnJob depend = new FrameOnJob(frame_a, job_b); - dependManager.createDepend(depend); + /** + * A compound depend, like FrameByFrame or PreviousFrame cannot be satisfied by using + * dependDao.getWhatDependsOn. You must have a reference to the actual dependency. + */ - assertTrue(hasDependFrames(layer_a)); - assertEquals(1, getTotalDependCount(layer_a)); - assertTrue(hasDependFrames(frame_a)); - assertEquals(1, getTotalDependCount(frame_a)); + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - for (LightweightDependency lwd: dependDao.getWhatDependsOn(job_b)) { - dependManager.satisfyDepend(lwd); - } + FrameByFrame depend = new FrameByFrame(layer_a, layer_b); + dependManager.createDepend(depend); - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); - assertFalse(hasDependFrames(frame_a)); - assertEquals(0, getTotalDependCount(frame_a)); - } + assertTrue(hasDependFrames(layer_a)); + assertEquals(10, getTotalDependCount(layer_a)); - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyFrameOnLayer() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - FrameInterface frame_a = frameDao.findFrame(layer_a, 1); - - FrameOnLayer depend = new FrameOnLayer(frame_a, layer_b); - dependManager.createDepend(depend); - - assertTrue(hasDependFrames(layer_a)); - assertEquals(1, getTotalDependCount(layer_a)); - assertTrue(hasDependFrames(frame_a)); - assertEquals(1, getTotalDependCount(frame_a)); - - for (LightweightDependency lwd: dependDao.getWhatDependsOn(layer_b)) { - dependManager.satisfyDepend(lwd); - } - - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); - assertFalse(hasDependFrames(frame_a)); - assertEquals(0, getTotalDependCount(frame_a)); - } + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + dependManager.satisfyDepend(lwd); - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyFrameOnFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - FrameInterface frame_a = frameDao.findFrame(layer_a, 1); - FrameInterface frame_b = frameDao.findFrame(layer_b, 1); - - FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); - dependManager.createDepend(depend); - - assertTrue(hasDependFrames(layer_a)); - assertEquals(1, getTotalDependCount(layer_a)); - assertTrue(hasDependFrames(frame_a)); - assertEquals(1, getTotalDependCount(frame_a)); - - for (LightweightDependency lwd: dependDao.getWhatDependsOn(frame_b)) { - dependManager.satisfyDepend(lwd); - } - - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); - assertFalse(hasDependFrames(frame_a)); - assertEquals(0, getTotalDependCount(frame_a)); - } - - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyFrameByFrame() { + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); + } - /** - * A compound depend, like FrameByFrame or PreviousFrame cannot - * be satisfied by using dependDao.getWhatDependsOn. You must - * have a reference to the actual dependency. - */ + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyLayerOnLayerAnyFrame() { - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - FrameByFrame depend = new FrameByFrame(layer_a, layer_b); - dependManager.createDepend(depend); + LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); + depend.setAnyFrame(true); + dependManager.createDepend(depend); - assertTrue(hasDependFrames(layer_a)); - assertEquals(10, getTotalDependCount(layer_a)); + assertTrue(hasDependFrames(layer_a)); + assertEquals(10, getTotalDependCount(layer_a)); - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - dependManager.satisfyDepend(lwd); + FrameInterface frame_b = frameDao.findFrame(layer_b, 5); - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); + for (LightweightDependency lwd : dependDao.getWhatDependsOn(frame_b)) { + dependManager.satisfyDepend(lwd); } - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyLayerOnLayerAnyFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); + } - LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); - depend.setAnyFrame(true); - dependManager.createDepend(depend); + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyPreviousFrame() { - assertTrue(hasDependFrames(layer_a)); - assertEquals(10, getTotalDependCount(layer_a)); + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - FrameInterface frame_b = frameDao.findFrame(layer_b, 5); + PreviousFrame depend = new PreviousFrame(layer_a, layer_b); + dependManager.createDepend(depend); - for (LightweightDependency lwd: dependDao.getWhatDependsOn(frame_b)) { - dependManager.satisfyDepend(lwd); - } - - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); - } + assertTrue(hasDependFrames(layer_a)); + assertEquals(9, getTotalDependCount(layer_a)); - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyPreviousFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - - PreviousFrame depend = new PreviousFrame(layer_a, layer_b); - dependManager.createDepend(depend); - - assertTrue(hasDependFrames(layer_a)); - assertEquals(9, getTotalDependCount(layer_a)); - - FrameInterface frame_b = frameDao.findFrame(layer_b, 9); - for (LightweightDependency lwd: dependDao.getWhatDependsOn(frame_b)) { - dependManager.satisfyDepend(lwd); - for (FrameDetail f: frameDao.findFrameDetails( - frameSearchFactory.create(layer_a))) { - logger.info(f.getName() + " " + f.state.toString()); - } - } - - assertTrue(hasDependFrames(layer_a)); - assertEquals(8, getTotalDependCount(layer_a)); + FrameInterface frame_b = frameDao.findFrame(layer_b, 9); + for (LightweightDependency lwd : dependDao.getWhatDependsOn(frame_b)) { + dependManager.satisfyDepend(lwd); + for (FrameDetail f : frameDao.findFrameDetails(frameSearchFactory.create(layer_a))) { + logger.info(f.getName() + " " + f.state.toString()); + } } - /** - * In this test, some of the dependOnFrames are already - * completed. The FrameOnFrame depends - * that get setup on those frames should be inactive, - * and the depend count should not be updated the corresponding - * dependErFrames. - */ - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyFrameByFrameParital() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - - FrameSearchInterface search = frameSearchFactory.create(layer_b); - search.filterByFrameSet("1-3"); - frameDao.findFrames(search) - .forEach(frame -> frameDao.updateFrameState(frame, FrameState.SUCCEEDED)); - - FrameByFrame depend = new FrameByFrame(layer_a, layer_b); - dependManager.createDepend(depend); - - /** Check the active state **/ - assertTrue( - dependDao.getWhatDependsOn(frameDao.findFrame(layer_b, 1)) - .stream() - .noneMatch(dep -> dep.active)); - assertTrue( - dependDao.getWhatDependsOn(frameDao.findFrame(layer_b, 2)) - .stream() - .noneMatch(dep -> dep.active)); - assertTrue( - dependDao.getWhatDependsOn(frameDao.findFrame(layer_b, 3)) - .stream() - .noneMatch(dep -> dep.active)); - assertTrue( - dependDao.getWhatDependsOn(frameDao.findFrame(layer_b, 4)) - .stream() - .allMatch(dep -> dep.active)); - - assertTrue(hasDependFrames(layer_a)); - assertEquals(7, getTotalDependCount(layer_a)); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - dependManager.satisfyDepend(lwd); - - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); - } + assertTrue(hasDependFrames(layer_a)); + assertEquals(8, getTotalDependCount(layer_a)); + } + + /** + * In this test, some of the dependOnFrames are already completed. The FrameOnFrame depends that + * get setup on those frames should be inactive, and the depend count should not be updated the + * corresponding dependErFrames. + */ + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyFrameByFrameParital() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + + FrameSearchInterface search = frameSearchFactory.create(layer_b); + search.filterByFrameSet("1-3"); + frameDao.findFrames(search) + .forEach(frame -> frameDao.updateFrameState(frame, FrameState.SUCCEEDED)); + + FrameByFrame depend = new FrameByFrame(layer_a, layer_b); + dependManager.createDepend(depend); + + /** Check the active state **/ + assertTrue(dependDao.getWhatDependsOn(frameDao.findFrame(layer_b, 1)).stream() + .noneMatch(dep -> dep.active)); + assertTrue(dependDao.getWhatDependsOn(frameDao.findFrame(layer_b, 2)).stream() + .noneMatch(dep -> dep.active)); + assertTrue(dependDao.getWhatDependsOn(frameDao.findFrame(layer_b, 3)).stream() + .noneMatch(dep -> dep.active)); + assertTrue(dependDao.getWhatDependsOn(frameDao.findFrame(layer_b, 4)).stream() + .allMatch(dep -> dep.active)); + + assertTrue(hasDependFrames(layer_a)); + assertEquals(7, getTotalDependCount(layer_a)); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + dependManager.satisfyDepend(lwd); + + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/EmailSupportTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/EmailSupportTests.java index 6e34195d4..17df2e023 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/EmailSupportTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/EmailSupportTests.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.test.service; import java.io.File; @@ -43,83 +40,79 @@ import com.imageworks.spcue.service.JobLauncher; import com.imageworks.spcue.service.JobSpec; - @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class EmailSupportTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - EmailSupport emailSupport; + @Resource + EmailSupport emailSupport; - @Resource - JobDao jobDao; + @Resource + JobDao jobDao; - @Resource - FrameDao frameDao; + @Resource + FrameDao frameDao; - @Resource - DependDao dependDao; + @Resource + DependDao dependDao; - @Resource - LayerDao layerDao; + @Resource + LayerDao layerDao; - @Resource - DependManager dependManager; + @Resource + DependManager dependManager; - @Resource - FrameSearchFactory frameSearchFactory; + @Resource + FrameSearchFactory frameSearchFactory; - @Before - public void setTestMode() { - jobLauncher.testMode = true; - } + @Before + public void setTestMode() { + jobLauncher.testMode = true; + } - @Test - @Transactional - @Rollback(true) - public void testJobCompleteEmailSuccess() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - jobLauncher.launch(spec); + @Test + @Transactional + @Rollback(true) + public void testJobCompleteEmailSuccess() { + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); + jobLauncher.launch(spec); - JobDetail job = spec.getJobs().get(0).detail; + JobDetail job = spec.getJobs().get(0).detail; - jobDao.updateEmail(job, System.getProperty("user.name")); + jobDao.updateEmail(job, System.getProperty("user.name")); - // Satisfy all dependencies, this will allow us to mark frames as complete. - layerDao.getLayers(job) - .forEach(layer -> dependDao.getWhatThisDependsOn(layer, DependTarget.ANY_TARGET) - .forEach(dep -> dependManager.satisfyDepend(dep))); + // Satisfy all dependencies, this will allow us to mark frames as complete. + layerDao.getLayers(job) + .forEach(layer -> dependDao.getWhatThisDependsOn(layer, DependTarget.ANY_TARGET) + .forEach(dep -> dependManager.satisfyDepend(dep))); - frameDao.findFrames(frameSearchFactory.create(job)).forEach( - frame -> frameDao.updateFrameState( - frameDao.getFrame(frame.getFrameId()), FrameState.SUCCEEDED)); + frameDao.findFrames(frameSearchFactory.create(job)).forEach(frame -> frameDao + .updateFrameState(frameDao.getFrame(frame.getFrameId()), FrameState.SUCCEEDED)); - emailSupport.sendShutdownEmail(job); - } + emailSupport.sendShutdownEmail(job); + } - @Test - @Transactional - @Rollback(true) - public void testJobCompleteEmailFail() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - jobLauncher.launch(spec); + @Test + @Transactional + @Rollback(true) + public void testJobCompleteEmailFail() { + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); + jobLauncher.launch(spec); - JobDetail job = spec.getJobs().get(0).detail; + JobDetail job = spec.getJobs().get(0).detail; - jobDao.updateEmail(job, System.getProperty("user.name")); + jobDao.updateEmail(job, System.getProperty("user.name")); - layerDao.getLayers(job) - .forEach(layer -> dependDao.getWhatThisDependsOn(layer, DependTarget.ANY_TARGET) - .forEach(dep -> dependManager.satisfyDepend(dep))); + layerDao.getLayers(job) + .forEach(layer -> dependDao.getWhatThisDependsOn(layer, DependTarget.ANY_TARGET) + .forEach(dep -> dependManager.satisfyDepend(dep))); - frameDao.findFrames(frameSearchFactory.create(job)).forEach( - frame -> frameDao.updateFrameState( - frameDao.getFrame(frame.getFrameId()), FrameState.DEAD)); + frameDao.findFrames(frameSearchFactory.create(job)).forEach( + frame -> frameDao.updateFrameState(frameDao.getFrame(frame.getFrameId()), FrameState.DEAD)); - emailSupport.sendShutdownEmail(job); - } + emailSupport.sendShutdownEmail(job); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/FilterManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/FilterManagerTests.java index 13da1c7de..5443e9171 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/FilterManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/FilterManagerTests.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.test.service; import java.io.File; @@ -63,366 +60,335 @@ import static org.junit.Assert.assertTrue; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class FilterManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - FilterDao filterDao; - - @Resource - ShowDao showDao; + @Resource + FilterDao filterDao; - @Resource - DepartmentDao departmentDao; + @Resource + ShowDao showDao; - @Resource - GroupManager groupManager; + @Resource + DepartmentDao departmentDao; - @Resource - JobManager jobManager; + @Resource + GroupManager groupManager; - @Resource - FilterManager filterManager; + @Resource + JobManager jobManager; - @Resource - JobLauncher jobLauncher; + @Resource + FilterManager filterManager; - @Resource - JobDao jobDao; + @Resource + JobLauncher jobLauncher; - @Resource - LayerDao layerDao; + @Resource + JobDao jobDao; - @Resource - GroupDao groupDao; + @Resource + LayerDao layerDao; - @Resource - Whiteboard whiteboard; + @Resource + GroupDao groupDao; - private static String FILTER_NAME = "test_filter"; + @Resource + Whiteboard whiteboard; - @Before - public void setTestMode() { - jobLauncher.testMode = true; - } + private static String FILTER_NAME = "test_filter"; + + @Before + public void setTestMode() { + jobLauncher.testMode = true; + } + + public ShowInterface getShow() { + return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); + } - public ShowInterface getShow() { - return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); - } + public FilterEntity buildFilter() { + FilterEntity filter = new FilterEntity(); + filter.name = FILTER_NAME; + filter.showId = "00000000-0000-0000-0000-000000000000"; + filter.type = FilterType.MATCH_ANY; + filter.enabled = true; + + return filter; + } + + @Test + @Transactional + @Rollback(true) + public void testShotEndsWith() { + + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + MatcherEntity m = new MatcherEntity(); + m.filterId = f.getFilterId(); + m.name = "match end of shot"; + m.subject = MatchSubject.SHOT; + m.type = MatchType.ENDS_WITH; + m.value = ".cue"; + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + + assertTrue(filterManager.isMatch(m, job)); + m.value = "layout"; + assertFalse(filterManager.isMatch(m, job)); + } + + @Test + @Transactional + @Rollback(true) + public void testLayerNameContains() { + + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + MatcherEntity m = new MatcherEntity(); + m.filterId = f.getFilterId(); + m.name = "layer name contains"; + m.subject = MatchSubject.LAYER_NAME; + m.type = MatchType.CONTAINS; + m.value = "pass_1"; + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + + assertTrue(filterManager.isMatch(m, job)); + m.value = "pass_11111"; + assertFalse(filterManager.isMatch(m, job)); + } + + @Test + @Transactional + @Rollback(true) + public void testApplyActionPauseJob() { + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.PAUSE_JOB; + a1.filterId = f.getFilterId(); + a1.valueType = ActionValueType.BOOLEAN_TYPE; + a1.booleanValue = true; + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + filterManager.applyAction(a1, job); + + assertTrue(jobDao.getJobDetail(job.getJobId()).isPaused); + + a1.booleanValue = false; + filterManager.applyAction(a1, job); + assertFalse(jobDao.getJobDetail(job.getJobId()).isPaused); + } + + @Test + @Transactional + @Rollback(true) + public void testApplyActionSetMemoryOptimizer() { + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.SET_MEMORY_OPTIMIZER; + a1.filterId = f.getFilterId(); + a1.valueType = ActionValueType.BOOLEAN_TYPE; + a1.booleanValue = false; + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + filterManager.applyAction(a1, job); + + assertTrue(whiteboard.getLayers(job).getLayersList().stream() + .noneMatch(Layer::getMemoryOptimizerEnabled)); + + a1.booleanValue = true; + filterManager.applyAction(a1, job); + assertTrue(whiteboard.getLayers(job).getLayersList().stream() + .allMatch(Layer::getMemoryOptimizerEnabled)); + } + + @Test + @Transactional + @Rollback(true) + public void testApplyActionSetMinCores() { + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.SET_JOB_MIN_CORES; + a1.filterId = f.getFilterId(); + a1.valueType = ActionValueType.FLOAT_TYPE; + a1.floatValue = 10f; + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + filterManager.applyAction(a1, job); + + assertEquals(Convert.coresToCoreUnits(a1.floatValue), + jobDao.getJobDetail(job.getJobId()).minCoreUnits, 0); + + a1.floatValue = 100f; + filterManager.applyAction(a1, job); + assertEquals(Convert.coresToCoreUnits(a1.floatValue), + jobDao.getJobDetail(job.getJobId()).minCoreUnits, 0); + } + + @Test + @Transactional + @Rollback(true) + public void testApplyActionSetMaxCores() { + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.SET_JOB_MAX_CORES; + a1.filterId = f.getFilterId(); + a1.valueType = ActionValueType.FLOAT_TYPE; + a1.floatValue = 10f; + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + filterManager.applyAction(a1, job); + + assertEquals(Convert.coresToCoreUnits(a1.floatValue), + jobDao.getJobDetail(job.getJobId()).maxCoreUnits, 0); + + a1.intValue = 100; + filterManager.applyAction(a1, job); + assertEquals(Convert.coresToCoreUnits(a1.floatValue), + jobDao.getJobDetail(job.getJobId()).maxCoreUnits, 0); + } + + @Test + @Transactional + @Rollback(true) + public void testApplyActionSetPriority() { + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.SET_JOB_PRIORITY; + a1.filterId = f.getFilterId(); + a1.valueType = ActionValueType.INTEGER_TYPE; + a1.intValue = 100; + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + filterManager.applyAction(a1, job); + + assertEquals(a1.intValue, jobDao.getJobDetail(job.getJobId()).priority); + + a1.intValue = 1001; + filterManager.applyAction(a1, job); + assertEquals(a1.intValue, jobDao.getJobDetail(job.getJobId()).priority); + } + + @Test + @Transactional + @Rollback(true) + public void testApplyActionMoveToGroup() { + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + GroupDetail g = new GroupDetail(); + g.name = "Testest"; + g.showId = job.getShowId(); + g.deptId = departmentDao.getDefaultDepartment().getId(); + + groupManager.createGroup(g, groupManager.getRootGroupDetail(job)); + + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.MOVE_JOB_TO_GROUP; + a1.filterId = f.getFilterId(); + a1.valueType = ActionValueType.GROUP_TYPE; + a1.groupValue = g.id; + + filterManager.applyAction(a1, job); + + assertEquals(g.id, jobDao.getJobDetail(job.getJobId()).groupId); + + assertEquals(groupDao.getGroupDetail(a1.groupValue).deptId, + jobDao.getJobDetail(job.getJobId()).deptId); + } + + @Test + @Transactional + @Rollback(true) + public void testApplyActionSetRenderCoreLayers() { - public FilterEntity buildFilter() { - FilterEntity filter = new FilterEntity(); - filter.name = FILTER_NAME; - filter.showId = "00000000-0000-0000-0000-000000000000"; - filter.type = FilterType.MATCH_ANY; - filter.enabled = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.SET_ALL_RENDER_LAYER_MIN_CORES; + a1.filterId = f.getFilterId(); + a1.valueType = ActionValueType.FLOAT_TYPE; + a1.floatValue = 40f; - return filter; - } + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + filterManager.applyAction(a1, job); + + assertEquals(Convert.coresToCoreUnits(a1.floatValue), + layerDao.findLayerDetail(job, "pass_1").minimumCores, 0); - @Test - @Transactional - @Rollback(true) - public void testShotEndsWith() { - - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - MatcherEntity m = new MatcherEntity(); - m.filterId = f.getFilterId(); - m.name = "match end of shot"; - m.subject = MatchSubject.SHOT; - m.type = MatchType.ENDS_WITH; - m.value = ".cue"; - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - - assertTrue(filterManager.isMatch(m, job)); - m.value = "layout"; - assertFalse(filterManager.isMatch(m, job)); - } - - @Test - @Transactional - @Rollback(true) - public void testLayerNameContains() { - - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - MatcherEntity m = new MatcherEntity(); - m.filterId = f.getFilterId(); - m.name = "layer name contains"; - m.subject = MatchSubject.LAYER_NAME; - m.type = MatchType.CONTAINS; - m.value = "pass_1"; - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - - assertTrue(filterManager.isMatch(m, job)); - m.value = "pass_11111"; - assertFalse(filterManager.isMatch(m, job)); - } - - @Test - @Transactional - @Rollback(true) - public void testApplyActionPauseJob() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.PAUSE_JOB; - a1.filterId = f.getFilterId(); - a1.valueType = ActionValueType.BOOLEAN_TYPE; - a1.booleanValue = true; - - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - filterManager.applyAction(a1, job); - - assertTrue(jobDao.getJobDetail(job.getJobId()).isPaused); - - a1.booleanValue = false; - filterManager.applyAction(a1, job); - assertFalse(jobDao.getJobDetail(job.getJobId()).isPaused); - } - - @Test - @Transactional - @Rollback(true) - public void testApplyActionSetMemoryOptimizer() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.SET_MEMORY_OPTIMIZER; - a1.filterId = f.getFilterId(); - a1.valueType = ActionValueType.BOOLEAN_TYPE; - a1.booleanValue = false; - - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - filterManager.applyAction(a1, job); - - assertTrue( - whiteboard.getLayers(job) - .getLayersList() - .stream() - .noneMatch(Layer::getMemoryOptimizerEnabled)); - - a1.booleanValue = true; - filterManager.applyAction(a1, job); - assertTrue( - whiteboard.getLayers(job) - .getLayersList() - .stream() - .allMatch(Layer::getMemoryOptimizerEnabled)); - } - - @Test - @Transactional - @Rollback(true) - public void testApplyActionSetMinCores() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.SET_JOB_MIN_CORES; - a1.filterId = f.getFilterId(); - a1.valueType = ActionValueType.FLOAT_TYPE; - a1.floatValue = 10f; - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - filterManager.applyAction(a1, job); - - assertEquals( - Convert.coresToCoreUnits(a1.floatValue), - jobDao.getJobDetail(job.getJobId()).minCoreUnits, - 0); - - a1.floatValue = 100f; - filterManager.applyAction(a1, job); - assertEquals( - Convert.coresToCoreUnits(a1.floatValue), - jobDao.getJobDetail(job.getJobId()).minCoreUnits, - 0); - } - - @Test - @Transactional - @Rollback(true) - public void testApplyActionSetMaxCores() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.SET_JOB_MAX_CORES; - a1.filterId = f.getFilterId(); - a1.valueType = ActionValueType.FLOAT_TYPE; - a1.floatValue = 10f; - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - filterManager.applyAction(a1, job); - - assertEquals( - Convert.coresToCoreUnits(a1.floatValue), - jobDao.getJobDetail(job.getJobId()).maxCoreUnits, - 0); - - a1.intValue = 100; - filterManager.applyAction(a1, job); - assertEquals( - Convert.coresToCoreUnits(a1.floatValue), - jobDao.getJobDetail(job.getJobId()).maxCoreUnits, - 0); - } - - @Test - @Transactional - @Rollback(true) - public void testApplyActionSetPriority() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.SET_JOB_PRIORITY; - a1.filterId = f.getFilterId(); - a1.valueType = ActionValueType.INTEGER_TYPE; - a1.intValue = 100; - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - filterManager.applyAction(a1, job); - - assertEquals( - a1.intValue, - jobDao.getJobDetail(job.getJobId()).priority); - - a1.intValue = 1001; - filterManager.applyAction(a1, job); - assertEquals( - a1.intValue, - jobDao.getJobDetail(job.getJobId()).priority); - } - - - @Test - @Transactional - @Rollback(true) - public void testApplyActionMoveToGroup() { - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - GroupDetail g = new GroupDetail(); - g.name = "Testest"; - g.showId = job.getShowId(); - g.deptId = departmentDao.getDefaultDepartment().getId(); - - groupManager.createGroup(g, groupManager.getRootGroupDetail(job)); - - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.MOVE_JOB_TO_GROUP; - a1.filterId = f.getFilterId(); - a1.valueType = ActionValueType.GROUP_TYPE; - a1.groupValue = g.id; - - - filterManager.applyAction(a1, job); - - assertEquals(g.id, jobDao.getJobDetail(job.getJobId()).groupId); - - assertEquals( - groupDao.getGroupDetail(a1.groupValue).deptId, - jobDao.getJobDetail(job.getJobId()).deptId); - } - - - @Test - @Transactional - @Rollback(true) - public void testApplyActionSetRenderCoreLayers() { - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.SET_ALL_RENDER_LAYER_MIN_CORES; - a1.filterId = f.getFilterId(); - a1.valueType = ActionValueType.FLOAT_TYPE; - a1.floatValue = 40f; - - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - filterManager.applyAction(a1, job); + assertEquals(Convert.coresToCoreUnits(.25f), + layerDao.findLayerDetail(job, "pass_1_preprocess").minimumCores, 0); + } - assertEquals( - Convert.coresToCoreUnits(a1.floatValue), - layerDao.findLayerDetail(job, "pass_1").minimumCores, - 0); - - assertEquals( - Convert.coresToCoreUnits(.25f), - layerDao.findLayerDetail(job, "pass_1_preprocess").minimumCores, - 0); - } + @Test + @Transactional + @Rollback(true) + public void testApplyActionSetRenderLayerMemory() { - @Test - @Transactional - @Rollback(true) - public void testApplyActionSetRenderLayerMemory() { + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.SET_ALL_RENDER_LAYER_MEMORY; + a1.filterId = f.getFilterId(); + a1.valueType = ActionValueType.INTEGER_TYPE; + a1.intValue = CueUtil.GB8; - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.SET_ALL_RENDER_LAYER_MEMORY; - a1.filterId = f.getFilterId(); - a1.valueType = ActionValueType.INTEGER_TYPE; - a1.intValue = CueUtil.GB8; + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + filterManager.applyAction(a1, job); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - filterManager.applyAction(a1, job); + assertEquals(CueUtil.GB8, layerDao.findLayerDetail(job, "pass_1").minimumMemory); + } - assertEquals( - CueUtil.GB8, - layerDao.findLayerDetail(job, "pass_1").minimumMemory); - } + @Test + @Transactional + @Rollback(true) + public void testApplyActionSetAllRenderLayerTags() { - @Test - @Transactional - @Rollback(true) - public void testApplyActionSetAllRenderLayerTags() { + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.SET_ALL_RENDER_LAYER_TAGS; + a1.filterId = f.getFilterId(); + a1.valueType = ActionValueType.STRING_TYPE; + a1.stringValue = "blah"; - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.SET_ALL_RENDER_LAYER_TAGS; - a1.filterId = f.getFilterId(); - a1.valueType = ActionValueType.STRING_TYPE; - a1.stringValue = "blah"; + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + filterManager.applyAction(a1, job); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - filterManager.applyAction(a1, job); - - assertThat(layerDao.findLayerDetail(job, "pass_1").tags, contains("blah")); - assertThat(layerDao.findLayerDetail(job, "pass_1_preprocess").tags, contains("general")); - } + assertThat(layerDao.findLayerDetail(job, "pass_1").tags, contains("blah")); + assertThat(layerDao.findLayerDetail(job, "pass_1_preprocess").tags, contains("general")); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/GroupManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/GroupManagerTests.java index f145d32e8..d99e0cc98 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/GroupManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/GroupManagerTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.service; import java.io.File; @@ -47,71 +43,70 @@ import static org.junit.Assert.assertEquals; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class GroupManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Resource - GroupManager groupManager; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - GroupDao groupDao; - - @Resource - JobDao jobDao; - - @Resource - DepartmentDao departmentDao; - - @Resource - ShowDao showDao; - - @Before - public void setTestMode() { - jobLauncher.testMode = true; - } - - @Test - @Transactional - @Rollback(true) - public void createGroup() { - ShowInterface pipe = showDao.findShowDetail("pipe"); - GroupDetail group = new GroupDetail(); - group.name = "testGroup"; - group.showId = pipe.getId(); - group.parentId = groupDao.getRootGroupDetail(pipe).getId(); - group.deptId = departmentDao.getDefaultDepartment().getId(); - groupManager.createGroup(group, null); - } - - @Test - @Transactional - @Rollback(true) - public void setGroupDepartment() { - ShowInterface pipe = showDao.findShowDetail("pipe"); - GroupDetail group = groupDao.getRootGroupDetail(pipe); - - // Launch a test job - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - jobLauncher.launch(spec); - JobInterface job = jobManager.getJob(spec.getJobs().get(0).detail.id); - - // Set the group's department property to Lighting, it should - // currently be Unknown - DepartmentInterface dept = departmentDao.findDepartment("Lighting"); - jobDao.updateParent(job, group); - - // Update the group to the Lighting department - groupManager.setGroupDepartment(group, dept); - - // Now check if the job we launched was also updated to the lighting department - assertEquals(dept.getDepartmentId(), jobDao.getJobDetail(job.getJobId()).deptId); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class GroupManagerTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Resource + GroupManager groupManager; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + GroupDao groupDao; + + @Resource + JobDao jobDao; + + @Resource + DepartmentDao departmentDao; + + @Resource + ShowDao showDao; + + @Before + public void setTestMode() { + jobLauncher.testMode = true; + } + + @Test + @Transactional + @Rollback(true) + public void createGroup() { + ShowInterface pipe = showDao.findShowDetail("pipe"); + GroupDetail group = new GroupDetail(); + group.name = "testGroup"; + group.showId = pipe.getId(); + group.parentId = groupDao.getRootGroupDetail(pipe).getId(); + group.deptId = departmentDao.getDefaultDepartment().getId(); + groupManager.createGroup(group, null); + } + + @Test + @Transactional + @Rollback(true) + public void setGroupDepartment() { + ShowInterface pipe = showDao.findShowDetail("pipe"); + GroupDetail group = groupDao.getRootGroupDetail(pipe); + + // Launch a test job + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); + jobLauncher.launch(spec); + JobInterface job = jobManager.getJob(spec.getJobs().get(0).detail.id); + + // Set the group's department property to Lighting, it should + // currently be Unknown + DepartmentInterface dept = departmentDao.findDepartment("Lighting"); + jobDao.updateParent(job, group); + + // Update the group to the Lighting department + groupManager.setGroupDepartment(group, dept); + + // Now check if the job we launched was also updated to the lighting department + assertEquals(dept.getDepartmentId(), jobDao.getJobDetail(job.getJobId()).deptId); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/HostManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/HostManagerTests.java index 29970441d..f6673b106 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/HostManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/HostManagerTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.service; import java.io.File; @@ -61,149 +57,130 @@ import static org.junit.Assert.assertTrue; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class HostManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Resource - AdminManager adminManager; - - @Resource - HostManager hostManager; - - @Resource - HostDao hostDao; - - @Resource - FacilityDao facilityDao; - - @Resource - FrameDao frameDao; - - @Resource - ProcDao procDao; - - @Resource - AllocationDao allocationDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - OwnerManager ownerManager; - - private static final String HOST_NAME = "alpha1"; - - public DispatchHost createHost() { - - RenderHost host = RenderHost.newBuilder() - .setName(HOST_NAME) - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(15290520) - .setFreeSwap(2076) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem((int) CueUtil.GB16) - .setTotalSwap(2076) - .setNimbyEnabled(true) - .setNumProcs(2) - .setCoresPerProc(400) - .setState(HardwareState.UP) - .setFacility("spi") - .addAllTags(ImmutableList.of("linux", "64bit")) - .setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512) - .build(); - - hostDao.insertRenderHost(host, - adminManager.findAllocationDetail("spi", "general"), - false); - - return hostDao.findDispatchHost(HOST_NAME); - } - - @Before - public void setTestMode() { - jobLauncher.testMode = true; - } - - /** - * Test that moves a host from one allocation to another. - */ - @Test - @Transactional - @Rollback(true) - public void setAllocation() { - HostInterface h = createHost(); - hostManager.setAllocation(h, - allocationDao.findAllocationEntity("spi", "general")); - } - - /** - * This test ensures you can't transfer a host that has a proc - * assigned to a show without a subscription to the destination - * allocation. - */ - @Test(expected=EntityModificationError.class) - @Transactional - @Rollback(true) - public void setBadAllocation() { - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/facility.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - - DispatchHost h = createHost(); - - AllocationEntity ad = - allocationDao.findAllocationEntity("spi", "desktop"); - - VirtualProc proc = VirtualProc.build(h, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); - - AllocationEntity ad2 = allocationDao.findAllocationEntity("spi", "desktop"); - hostManager.setAllocation(h, ad2); - } - - @Test - @Transactional - @Rollback(true) - public void testGetPrefferedShow() { - DispatchHost h = createHost(); - - ShowInterface pshow = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("spongebob", pshow); - - ownerManager.takeOwnership(o, h); - - ShowInterface show = hostManager.getPreferredShow(h); - assertEquals(pshow, show); - } - - @Test - @Transactional - @Rollback(true) - public void testisPrefferedShow() { - DispatchHost h = createHost(); - - assertFalse(hostManager.isPreferShow(h)); - - ShowInterface pshow = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("spongebob", pshow); - - ownerManager.takeOwnership(o, h); - - ShowInterface show = hostManager.getPreferredShow(h); - assertEquals(pshow, show); - - assertTrue(hostManager.isPreferShow(h)); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class HostManagerTests extends AbstractTransactionalJUnit4SpringContextTests { -} + @Resource + AdminManager adminManager; + + @Resource + HostManager hostManager; + + @Resource + HostDao hostDao; + + @Resource + FacilityDao facilityDao; + + @Resource + FrameDao frameDao; + + @Resource + ProcDao procDao; + + @Resource + AllocationDao allocationDao; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + OwnerManager ownerManager; + + private static final String HOST_NAME = "alpha1"; + + public DispatchHost createHost() { + + RenderHost host = RenderHost.newBuilder().setName(HOST_NAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(15290520).setFreeSwap(2076).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16).setTotalSwap(2076) + .setNimbyEnabled(true).setNumProcs(2).setCoresPerProc(400).setState(HardwareState.UP) + .setFacility("spi").addAllTags(ImmutableList.of("linux", "64bit")) + .setFreeGpuMem((int) CueUtil.MB512).setTotalGpuMem((int) CueUtil.MB512).build(); + + hostDao.insertRenderHost(host, adminManager.findAllocationDetail("spi", "general"), false); + + return hostDao.findDispatchHost(HOST_NAME); + } + + @Before + public void setTestMode() { + jobLauncher.testMode = true; + } + /** + * Test that moves a host from one allocation to another. + */ + @Test + @Transactional + @Rollback(true) + public void setAllocation() { + HostInterface h = createHost(); + hostManager.setAllocation(h, allocationDao.findAllocationEntity("spi", "general")); + } + + /** + * This test ensures you can't transfer a host that has a proc assigned to a show without a + * subscription to the destination allocation. + */ + @Test(expected = EntityModificationError.class) + @Transactional + @Rollback(true) + public void setBadAllocation() { + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/facility.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1"); + DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); + + DispatchHost h = createHost(); + + AllocationEntity ad = allocationDao.findAllocationEntity("spi", "desktop"); + + VirtualProc proc = VirtualProc.build(h, frame); + proc.frameId = frame.id; + procDao.insertVirtualProc(proc); + + AllocationEntity ad2 = allocationDao.findAllocationEntity("spi", "desktop"); + hostManager.setAllocation(h, ad2); + } + + @Test + @Transactional + @Rollback(true) + public void testGetPrefferedShow() { + DispatchHost h = createHost(); + + ShowInterface pshow = adminManager.findShowEntity("pipe"); + OwnerEntity o = ownerManager.createOwner("spongebob", pshow); + + ownerManager.takeOwnership(o, h); + + ShowInterface show = hostManager.getPreferredShow(h); + assertEquals(pshow, show); + } + + @Test + @Transactional + @Rollback(true) + public void testisPrefferedShow() { + DispatchHost h = createHost(); + + assertFalse(hostManager.isPreferShow(h)); + + ShowInterface pshow = adminManager.findShowEntity("pipe"); + OwnerEntity o = ownerManager.createOwner("spongebob", pshow); + + ownerManager.takeOwnership(o, h); + + ShowInterface show = hostManager.getPreferredShow(h); + assertEquals(pshow, show); + + assertTrue(hostManager.isPreferShow(h)); + } + +} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/JobManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/JobManagerTests.java index b7a4dfbf8..e6a9601dd 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/JobManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/JobManagerTests.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.test.service; import java.io.File; @@ -72,457 +69,432 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; - @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class JobManagerTests extends AbstractTransactionalJUnit4SpringContextTests { +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class JobManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - JobManager jobManager; + @Resource + JobManager jobManager; - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - JobManagerSupport jobManagerSupport; + @Resource + JobManagerSupport jobManagerSupport; - @Resource - HostManager hostManager; + @Resource + HostManager hostManager; - @Resource - AdminManager adminManager; + @Resource + AdminManager adminManager; - @Resource - LayerDao layerDao; + @Resource + LayerDao layerDao; - @Resource - DispatcherDao dispatcherDao; + @Resource + DispatcherDao dispatcherDao; - @Resource - FrameDao frameDao; + @Resource + FrameDao frameDao; - @Resource - JobDao jobDao; + @Resource + JobDao jobDao; - @Resource - FrameSearchFactory frameSearchFactory; + @Resource + FrameSearchFactory frameSearchFactory; - private static final String JOB1 = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; - private static final String JOB2 = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; - private static final String JOB3 = "pipe-dev.cue-testuser_shell_v1"; + private static final String JOB1 = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; + private static final String JOB2 = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; + private static final String JOB3 = "pipe-dev.cue-testuser_shell_v1"; - public JobDetail getJob1() { - return jobManager.findJobDetail(JOB1); - } + public JobDetail getJob1() { + return jobManager.findJobDetail(JOB1); + } - public JobDetail getJob2() { - return jobManager.findJobDetail(JOB2); - } + public JobDetail getJob2() { + return jobManager.findJobDetail(JOB2); + } - public JobDetail getJob3() { - return jobManager.findJobDetail(JOB3); - } + public JobDetail getJob3() { + return jobManager.findJobDetail(JOB3); + } - public DispatchHost createHost() { - - RenderHost host = RenderHost.newBuilder() - .setName("test_host") - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem((int) CueUtil.GB16) - .setTotalSwap((int) CueUtil.GB16) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(100) - .setState(HardwareState.UP) - .setFacility("spi") - .addTags("general") - .build(); - - DispatchHost dh = hostManager.createHost(host); - hostManager.setAllocation(dh, - adminManager.findAllocationDetail("spi", "general")); - - return dh; - } + public DispatchHost createHost() { - @BeforeTransaction - public void init() { - jobLauncher.testMode = true; - - for (String jobName : ImmutableList.of(JOB1, JOB2, JOB3)) { - try { - JobInterface job = jobDao.findJob(jobName); - jobDao.updateJobFinished(job); - jobDao.deleteJob(job); - } catch (EmptyResultDataAccessException e) { - // Job doesn't exist, ignore. - } - } - - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - jobLauncher.launch(spec); - - spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - jobLauncher.launch(spec); - - for (String jobName : ImmutableList.of(JOB1, JOB2, JOB3)) { - jobDao.updatePaused(jobDao.findJob(jobName), true); - } - } + RenderHost host = RenderHost.newBuilder().setName("test_host").setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16).setTotalSwap((int) CueUtil.GB16) + .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(100).setState(HardwareState.UP) + .setFacility("spi").addTags("general").build(); - @AfterTransaction - public void destroy() { - for (String jobName : ImmutableList.of(JOB1, JOB2, JOB3)) { - JobInterface job = jobDao.findJob(jobName); - jobDao.updateJobFinished(job); - jobDao.deleteJob(job); - } - } + DispatchHost dh = hostManager.createHost(host); + hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); - @Test - @Transactional - @Rollback(true) - public void testLaunchAutoEatJob() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/autoeat.xml")); - jobLauncher.launch(spec); + return dh; + } - assertTrue(jobDao.getDispatchJob(spec.getJobs().get(0).detail.id).autoEat); - } + @BeforeTransaction + public void init() { + jobLauncher.testMode = true; - @Test - @Transactional - @Rollback(true) - public void testLaunchJob() { - LayerDetail job1Layer = layerDao.findLayerDetail(jobDao.findJob(JOB1), "pass_1"); - assertEquals(CueUtil.GB2, job1Layer.minimumMemory); - assertEquals(100, job1Layer.minimumCores); - - // check some job_stats values - assertEquals(20, getJob1().totalFrames); - assertEquals(10, jobDao.getFrameStateTotals(jobDao.findJob(JOB2)).waiting); - assertEquals(0, jobDao.getFrameStateTotals(jobDao.findJob(JOB1)).depend); - - FrameStateTotals job3FrameStates = jobDao.getFrameStateTotals(jobDao.findJob(JOB3)); - assertEquals(1, job3FrameStates.waiting); - assertEquals(10, job3FrameStates.depend); + for (String jobName : ImmutableList.of(JOB1, JOB2, JOB3)) { + try { + JobInterface job = jobDao.findJob(jobName); + jobDao.updateJobFinished(job); + jobDao.deleteJob(job); + } catch (EmptyResultDataAccessException e) { + // Job doesn't exist, ignore. + } } - @Test - @Transactional - @Rollback(true) - public void testShutdownRelaunchJob() { - JobDetail job1 = getJob1(); - JobDetail job2 = getJob2(); - logger.info("job detail: " + job2.getName()); - logger.info("job state " + job2.state.toString()); - - jobManager.shutdownJob(job1); - jobManager.shutdownJob(job2); + JobSpec spec = + jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + jobLauncher.launch(spec); - assertEquals(JobState.FINISHED, jobDao.getJobDetail(job1.id).state); + spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); + jobLauncher.launch(spec); - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - - getJob1(); + for (String jobName : ImmutableList.of(JOB1, JOB2, JOB3)) { + jobDao.updatePaused(jobDao.findJob(jobName), true); } - - @Test - @Transactional - @Rollback(true) - public void testShutdownJob() { - JobDetail job = getJob1(); - logger.info("job detail: " + job.getName()); - logger.info("job state " + job.state.toString()); - - jobManager.shutdownJob(getJob1()); - - assertEquals(JobState.FINISHED, jobDao.getJobDetail(job.id).state); + } + + @AfterTransaction + public void destroy() { + for (String jobName : ImmutableList.of(JOB1, JOB2, JOB3)) { + JobInterface job = jobDao.findJob(jobName); + jobDao.updateJobFinished(job); + jobDao.deleteJob(job); } - - @Test - @Transactional - @Rollback(true) - public void testAutoNameJob() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_autoname.xml")); - jobLauncher.launch(spec); - - assertEquals(JobState.PENDING, jobDao.findJobDetail(spec.conformJobName("autoname")).state); + } + + @Test + @Transactional + @Rollback(true) + public void testLaunchAutoEatJob() { + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/autoeat.xml")); + jobLauncher.launch(spec); + + assertTrue(jobDao.getDispatchJob(spec.getJobs().get(0).detail.id).autoEat); + } + + @Test + @Transactional + @Rollback(true) + public void testLaunchJob() { + LayerDetail job1Layer = layerDao.findLayerDetail(jobDao.findJob(JOB1), "pass_1"); + assertEquals(CueUtil.GB2, job1Layer.minimumMemory); + assertEquals(100, job1Layer.minimumCores); + + // check some job_stats values + assertEquals(20, getJob1().totalFrames); + assertEquals(10, jobDao.getFrameStateTotals(jobDao.findJob(JOB2)).waiting); + assertEquals(0, jobDao.getFrameStateTotals(jobDao.findJob(JOB1)).depend); + + FrameStateTotals job3FrameStates = jobDao.getFrameStateTotals(jobDao.findJob(JOB3)); + assertEquals(1, job3FrameStates.waiting); + assertEquals(10, job3FrameStates.depend); + } + + @Test + @Transactional + @Rollback(true) + public void testShutdownRelaunchJob() { + JobDetail job1 = getJob1(); + JobDetail job2 = getJob2(); + logger.info("job detail: " + job2.getName()); + logger.info("job state " + job2.state.toString()); + + jobManager.shutdownJob(job1); + jobManager.shutdownJob(job2); + + assertEquals(JobState.FINISHED, jobDao.getJobDetail(job1.id).state); + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + + getJob1(); + } + + @Test + @Transactional + @Rollback(true) + public void testShutdownJob() { + JobDetail job = getJob1(); + logger.info("job detail: " + job.getName()); + logger.info("job state " + job.state.toString()); + + jobManager.shutdownJob(getJob1()); + + assertEquals(JobState.FINISHED, jobDao.getJobDetail(job.id).state); + } + + @Test + @Transactional + @Rollback(true) + public void testAutoNameJob() { + JobSpec spec = + jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_autoname.xml")); + jobLauncher.launch(spec); + + assertEquals(JobState.PENDING, jobDao.findJobDetail(spec.conformJobName("autoname")).state); + } + + @Test + @Transactional + @Rollback(true) + public void testShowAlias() { + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/show_alias.xml")); + jobLauncher.launch(spec); + } + + @Test + @Transactional + @Rollback(true) + public void testMisNamedJob() { + JobSpec spec = + jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_misnamed.xml")); + assertEquals("pipe-dev.cue-testuser_pipe_dev.cue_testuser_blah_blah_v1", + spec.getJobs().get(0).detail.name); + } + + @Test + @Transactional + @Rollback(true) + public void testMisNamedJob2() { + JobSpec spec = + jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_misnamed.xml")); + + assertEquals(spec.conformJobName("blah_____blah_v1"), "pipe-dev.cue-testuser_blah_blah_v1"); + } + + @Test + @Transactional + @Rollback(true) + public void testNonExistentShow() { + JobSpec spec = + jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_nonexistent_show.xml")); + try { + jobLauncher.launch(spec); + fail("Expected exception"); + } catch (EntityCreationError e) { + assertEquals(e.getMessage(), + "The nonexistentshow does not exist. Please contact administrator of your " + + "OpenCue deployment to have this show created."); } - - - @Test - @Transactional - @Rollback(true) - public void testShowAlias() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/show_alias.xml")); - jobLauncher.launch(spec); + } + + @Test + @Transactional + @Rollback(true) + public void testPostFrameJobLaunch() { + JobSpec spec = + jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_postframes.xml")); + jobLauncher.launch(spec); + + String jobId = spec.getJobs().get(0).detail.id; + String postJobId = spec.getJobs().get(0).getPostJob().detail.id; + + assertEquals(JobState.PENDING, jobDao.getJobDetail(jobId).state); + assertTrue(jobManager.shutdownJob(jobManager.getJob(jobId))); + assertEquals(JobState.FINISHED, jobDao.getJobDetail(jobId).state); + assertEquals(JobState.PENDING, jobDao.getJobDetail(postJobId).state); + } + + @Test + @Transactional + @Rollback(true) + public void testReorderLayerFirst() { + + JobDetail job = getJob1(); + LayerInterface layer = layerDao.findLayer(job, "pass_2"); + + jobManager.reorderLayer(layer, new FrameSet("5-10"), Order.FIRST); + + assertEquals(-6, frameDao.findFrameDetail(job, "0005-pass_2").dispatchOrder); + assertEquals(-5, frameDao.findFrameDetail(job, "0006-pass_2").dispatchOrder); + assertEquals(-4, frameDao.findFrameDetail(job, "0007-pass_2").dispatchOrder); + assertEquals(-3, frameDao.findFrameDetail(job, "0008-pass_2").dispatchOrder); + assertEquals(-2, frameDao.findFrameDetail(job, "0009-pass_2").dispatchOrder); + assertEquals(-1, frameDao.findFrameDetail(job, "0010-pass_2").dispatchOrder); + assertEquals(3, frameDao.findFrameDetail(job, "0004-pass_2").dispatchOrder); + assertEquals(2, frameDao.findFrameDetail(job, "0003-pass_2").dispatchOrder); + assertEquals(1, frameDao.findFrameDetail(job, "0002-pass_2").dispatchOrder); + assertEquals(0, frameDao.findFrameDetail(job, "0001-pass_2").dispatchOrder); + + DispatchHost host = createHost(); + jobManager.setJobPaused(job, false); + + String[] order = new String[] {"0005-pass_2", "0006-pass_2", "0007-pass_2", "0008-pass_2", + "0009-pass_2", "0010-pass_2", "0001-pass_1", "0001-pass_2"}; + + for (String f : order) { + DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job, host); + frameDao.updateFrameState(frame, FrameState.SUCCEEDED); + assertEquals(f, frame.getName()); } + } - @Test - @Transactional - @Rollback(true) - public void testMisNamedJob() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_misnamed.xml")); - assertEquals("pipe-dev.cue-testuser_pipe_dev.cue_testuser_blah_blah_v1",spec.getJobs().get(0).detail.name); - } + @Test + @Transactional + @Rollback(true) + public void testReorderLayerLast() { - @Test - @Transactional - @Rollback(true) - public void testMisNamedJob2() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_misnamed.xml")); + JobDetail job = getJob1(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); - assertEquals(spec.conformJobName("blah_____blah_v1"), - "pipe-dev.cue-testuser_blah_blah_v1"); - } + jobManager.reorderLayer(layer, new FrameSet("1-5"), Order.LAST); - @Test - @Transactional - @Rollback(true) - public void testNonExistentShow() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_nonexistent_show.xml")); - try { - jobLauncher.launch(spec); - fail("Expected exception"); - } catch (EntityCreationError e) { - assertEquals(e.getMessage(), - "The nonexistentshow does not exist. Please contact administrator of your " + - "OpenCue deployment to have this show created."); - } - } + assertEquals(11, frameDao.findFrameDetail(job, "0001-pass_1").dispatchOrder); + assertEquals(12, frameDao.findFrameDetail(job, "0002-pass_1").dispatchOrder); + assertEquals(13, frameDao.findFrameDetail(job, "0003-pass_1").dispatchOrder); + assertEquals(14, frameDao.findFrameDetail(job, "0004-pass_1").dispatchOrder); + assertEquals(15, frameDao.findFrameDetail(job, "0005-pass_1").dispatchOrder); - @Test - @Transactional - @Rollback(true) - public void testPostFrameJobLaunch() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_postframes.xml")); - jobLauncher.launch(spec); + DispatchHost host = createHost(); + jobManager.setJobPaused(job, false); - String jobId = spec.getJobs().get(0).detail.id; - String postJobId = spec.getJobs().get(0).getPostJob().detail.id; + String[] order = new String[] {"0001-pass_2", "0002-pass_2", "0003-pass_2", "0004-pass_2", + "0005-pass_2", "0006-pass_1", "0006-pass_2", "0007-pass_1"}; - assertEquals(JobState.PENDING, jobDao.getJobDetail(jobId).state); - assertTrue(jobManager.shutdownJob(jobManager.getJob(jobId))); - assertEquals(JobState.FINISHED, jobDao.getJobDetail(jobId).state); - assertEquals(JobState.PENDING, jobDao.getJobDetail(postJobId).state); + for (String f : order) { + DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job, host); + frameDao.updateFrameState(frame, FrameState.SUCCEEDED); + assertEquals(f, frame.getName()); } - - - @Test - @Transactional - @Rollback(true) - public void testReorderLayerFirst() { - - JobDetail job = getJob1(); - LayerInterface layer = layerDao.findLayer(job, "pass_2"); - - jobManager.reorderLayer(layer, new FrameSet("5-10"), Order.FIRST); - - assertEquals(-6, frameDao.findFrameDetail(job, "0005-pass_2").dispatchOrder); - assertEquals(-5, frameDao.findFrameDetail(job, "0006-pass_2").dispatchOrder); - assertEquals(-4, frameDao.findFrameDetail(job, "0007-pass_2").dispatchOrder); - assertEquals(-3, frameDao.findFrameDetail(job, "0008-pass_2").dispatchOrder); - assertEquals(-2, frameDao.findFrameDetail(job, "0009-pass_2").dispatchOrder); - assertEquals(-1, frameDao.findFrameDetail(job, "0010-pass_2").dispatchOrder); - assertEquals(3, frameDao.findFrameDetail(job, "0004-pass_2").dispatchOrder); - assertEquals(2, frameDao.findFrameDetail(job, "0003-pass_2").dispatchOrder); - assertEquals(1, frameDao.findFrameDetail(job, "0002-pass_2").dispatchOrder); - assertEquals(0, frameDao.findFrameDetail(job, "0001-pass_2").dispatchOrder); - - DispatchHost host = createHost(); - jobManager.setJobPaused(job, false); - - String[] order = new String[] { - "0005-pass_2","0006-pass_2","0007-pass_2","0008-pass_2", - "0009-pass_2","0010-pass_2","0001-pass_1","0001-pass_2" - }; - - for (String f: order) { - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job, host); - frameDao.updateFrameState(frame, FrameState.SUCCEEDED); - assertEquals(f, frame.getName()); - } + } + + @Test + @Transactional + @Rollback(true) + public void testReorderLayerReverse() { + + JobDetail job = getJob1(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); + + jobManager.reorderLayer(layer, new FrameSet("1-5"), Order.REVERSE); + + assertEquals(0, frameDao.findFrameDetail(job, "0005-pass_1").dispatchOrder); + assertEquals(1, frameDao.findFrameDetail(job, "0004-pass_1").dispatchOrder); + assertEquals(2, frameDao.findFrameDetail(job, "0003-pass_1").dispatchOrder); + assertEquals(3, frameDao.findFrameDetail(job, "0002-pass_1").dispatchOrder); + assertEquals(4, frameDao.findFrameDetail(job, "0001-pass_1").dispatchOrder); + } + + @Test + @Transactional + @Rollback(true) + public void testStaggerLayer() { + + JobDetail job = getJob1(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); + FrameSet staggeredFrameSet = new FrameSet("1-10:2"); + jobManager.staggerLayer(layer, "1-10", 2); + + for (int i = 0; i < staggeredFrameSet.size(); i++) { + assertEquals(staggeredFrameSet.get(i), frameDao.findFrameDetail(job, + CueUtil.buildFrameName(layer, staggeredFrameSet.get(i))).number); } - @Test - @Transactional - @Rollback(true) - public void testReorderLayerLast() { - - JobDetail job = getJob1(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - - jobManager.reorderLayer(layer, new FrameSet("1-5"), Order.LAST); - - assertEquals(11, frameDao.findFrameDetail(job, "0001-pass_1").dispatchOrder); - assertEquals(12, frameDao.findFrameDetail(job, "0002-pass_1").dispatchOrder); - assertEquals(13, frameDao.findFrameDetail(job, "0003-pass_1").dispatchOrder); - assertEquals(14, frameDao.findFrameDetail(job, "0004-pass_1").dispatchOrder); - assertEquals(15, frameDao.findFrameDetail(job, "0005-pass_1").dispatchOrder); - - DispatchHost host = createHost(); - jobManager.setJobPaused(job, false); - - String[] order = new String[] { - "0001-pass_2","0002-pass_2","0003-pass_2","0004-pass_2", - "0005-pass_2","0006-pass_1","0006-pass_2","0007-pass_1" - }; - - for (String f: order) { - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job, host); - frameDao.updateFrameState(frame, FrameState.SUCCEEDED); - assertEquals(f, frame.getName()); - } - } - - @Test - @Transactional - @Rollback(true) - public void testReorderLayerReverse() { - - JobDetail job = getJob1(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - - jobManager.reorderLayer(layer, new FrameSet("1-5"), Order.REVERSE); - - assertEquals(0, frameDao.findFrameDetail(job, "0005-pass_1").dispatchOrder); - assertEquals(1, frameDao.findFrameDetail(job, "0004-pass_1").dispatchOrder); - assertEquals(2, frameDao.findFrameDetail(job, "0003-pass_1").dispatchOrder); - assertEquals(3, frameDao.findFrameDetail(job, "0002-pass_1").dispatchOrder); - assertEquals(4, frameDao.findFrameDetail(job, "0001-pass_1").dispatchOrder); - } - - @Test - @Transactional - @Rollback(true) - public void testStaggerLayer() { - - JobDetail job = getJob1(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - FrameSet staggeredFrameSet = new FrameSet("1-10:2"); - jobManager.staggerLayer(layer,"1-10",2); - - for (int i=0; i < staggeredFrameSet.size(); i++) { - assertEquals( - staggeredFrameSet.get(i), - frameDao.findFrameDetail( - job, CueUtil.buildFrameName(layer, staggeredFrameSet.get(i))).number); - } - - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayers() { - JobDetail job = getJob1(); - jobManager.getLayerDetails(job); - jobManager.getLayers(job); - } - - @Test - @Transactional - @Rollback(true) - public void eatLayer() { - JobInterface job = getJob1(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - FrameSearchInterface r = frameSearchFactory.create(layer); - FrameSearchCriteria criteria = r.getCriteria(); - r.setCriteria(criteria.toBuilder() - .setPage(1) - .setLimit(5) - .build()); - jobManagerSupport.eatFrames(r, new Source()); - - assertTrue( - frameDao.findFrameDetails(frameSearchFactory.create(layer)) - .stream() - .allMatch(frame -> frame.state == FrameState.EATEN)); - } - - @Test - @Transactional - @Rollback(true) - public void optimizeLayer() { - JobInterface job = getJob3(); - LayerDetail layer = layerDao.findLayerDetail(job, "pass_1"); - - // Hardcoded value of dispatcher.memory.mem_reserved_default - // to avoid having to read opencue.properties on a test setting - long memReservedDefault = 3355443; - - assertEquals(memReservedDefault, layer.minimumMemory); - assertThat(layer.tags, contains("general")); - - /* - * Make sure the layer is optimizable. - */ - frameDao.findFrames(frameSearchFactory.create(layer)) - .stream() - .limit(5) - .forEach(frame -> frameDao.updateFrameState(frame, FrameState.SUCCEEDED)); - layerDao.updateUsage(layer, new ResourceUsage(100, 3500 * 5, 0), 0); - - // Test to make sure our optimization - jobManager.optimizeLayer(layer, 100, CueUtil.MB512, 120); - - assertEquals( - CueUtil.MB512 + CueUtil.MB256, - layerDao.findLayerDetail(job, "pass_1").minimumMemory); - } - - @Test - @Transactional - @Rollback(true) - public void testIsLayerThreadable() { - JobInterface job = getJob3(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - - assertFalse(jobManager.isLayerThreadable(layer)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayer() { - JobInterface job = getJob3(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - assertEquals(layer, jobManager.getLayer(layer.getId())); - } - - - @Test - @Transactional - @Rollback(true) - public void testFindFrame() { - JobInterface job = getJob3(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - - FrameInterface frame = jobManager.findFrame(layer, 1); - assertEquals("0001-pass_1", frame.getName()); - } - - @Test - @Transactional - @Rollback(true) - public void testAddLayerLimit() { - JobInterface job = getJob3(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - jobManager.addLayerLimit(layer, "0001-limit-1"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayerLimits() { - JobInterface job = getJob3(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - jobManager.getLayerLimits(layer); - } + } + + @Test + @Transactional + @Rollback(true) + public void testGetLayers() { + JobDetail job = getJob1(); + jobManager.getLayerDetails(job); + jobManager.getLayers(job); + } + + @Test + @Transactional + @Rollback(true) + public void eatLayer() { + JobInterface job = getJob1(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); + FrameSearchInterface r = frameSearchFactory.create(layer); + FrameSearchCriteria criteria = r.getCriteria(); + r.setCriteria(criteria.toBuilder().setPage(1).setLimit(5).build()); + jobManagerSupport.eatFrames(r, new Source()); + + assertTrue(frameDao.findFrameDetails(frameSearchFactory.create(layer)).stream() + .allMatch(frame -> frame.state == FrameState.EATEN)); + } + + @Test + @Transactional + @Rollback(true) + public void optimizeLayer() { + JobInterface job = getJob3(); + LayerDetail layer = layerDao.findLayerDetail(job, "pass_1"); + + // Hardcoded value of dispatcher.memory.mem_reserved_default + // to avoid having to read opencue.properties on a test setting + long memReservedDefault = 3355443; + + assertEquals(memReservedDefault, layer.minimumMemory); + assertThat(layer.tags, contains("general")); + + /* + * Make sure the layer is optimizable. + */ + frameDao.findFrames(frameSearchFactory.create(layer)).stream().limit(5) + .forEach(frame -> frameDao.updateFrameState(frame, FrameState.SUCCEEDED)); + layerDao.updateUsage(layer, new ResourceUsage(100, 3500 * 5, 0), 0); + + // Test to make sure our optimization + jobManager.optimizeLayer(layer, 100, CueUtil.MB512, 120); + + assertEquals(CueUtil.MB512 + CueUtil.MB256, + layerDao.findLayerDetail(job, "pass_1").minimumMemory); + } + + @Test + @Transactional + @Rollback(true) + public void testIsLayerThreadable() { + JobInterface job = getJob3(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); + + assertFalse(jobManager.isLayerThreadable(layer)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLayer() { + JobInterface job = getJob3(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); + assertEquals(layer, jobManager.getLayer(layer.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void testFindFrame() { + JobInterface job = getJob3(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); + + FrameInterface frame = jobManager.findFrame(layer, 1); + assertEquals("0001-pass_1", frame.getName()); + } + + @Test + @Transactional + @Rollback(true) + public void testAddLayerLimit() { + JobInterface job = getJob3(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); + jobManager.addLayerLimit(layer, "0001-limit-1"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLayerLimits() { + JobInterface job = getJob3(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); + jobManager.getLayerLimits(layer); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/JobSpecTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/JobSpecTests.java index 87b3c415c..4e7863ae2 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/JobSpecTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/JobSpecTests.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.test.service; import java.io.IOException; @@ -40,91 +37,86 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; - -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class JobSpecTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - JobLauncher jobLauncher; - - private static String readJobSpec(String name) - { - String path = "src/test/resources/conf/jobspec/" + name; - byte[] encoded = null; - - try { - encoded = Files.readAllBytes(Paths.get(path)); - } catch (IOException e) { - fail("readJobSpec should succeed to read jobspec file"); - } - - return new String(encoded, StandardCharsets.UTF_8); - } + @Resource + JobLauncher jobLauncher; - @Test - public void testParseSuccess() { - String xml = readJobSpec("jobspec_1_10.xml"); - JobSpec spec = jobLauncher.parse(xml); - assertEquals(spec.getDoc().getDocType().getPublicID(), - "SPI Cue Specification Language"); - assertEquals(spec.getDoc().getDocType().getSystemID(), - "http://localhost:8080/spcue/dtd/cjsl-1.10.dtd"); - assertEquals(spec.getJobs().size(), 1); - assertEquals(spec.getJobs().get(0).detail.name, "testing-default-testuser_test"); - } + private static String readJobSpec(String name) { + String path = "src/test/resources/conf/jobspec/" + name; + byte[] encoded = null; - @Test - public void testParseNonExistent() { - String xml = readJobSpec("jobspec_nonexistent_dtd.xml"); - try { - jobLauncher.parse(xml); - fail("Expected exception"); - } catch (SpecBuilderException e) { - assertTrue(e.getMessage().startsWith("Failed to parse job spec XML, java.net.MalformedURLException")); - } + try { + encoded = Files.readAllBytes(Paths.get(path)); + } catch (IOException e) { + fail("readJobSpec should succeed to read jobspec file"); } - @Test - public void testParseInvalidShot() { - String xml = readJobSpec("jobspec_invalid_shot.xml"); - try { - jobLauncher.parse(xml); - fail("Expected exception"); - } catch (SpecBuilderException e) { - assertEquals(e.getMessage(), - "The shot name: invalid/shot is not in the proper format. " + - "Shot names must be alpha numeric, no dashes or punctuation."); - } + return new String(encoded, StandardCharsets.UTF_8); + } + + @Test + public void testParseSuccess() { + String xml = readJobSpec("jobspec_1_10.xml"); + JobSpec spec = jobLauncher.parse(xml); + assertEquals(spec.getDoc().getDocType().getPublicID(), "SPI Cue Specification Language"); + assertEquals(spec.getDoc().getDocType().getSystemID(), + "http://localhost:8080/spcue/dtd/cjsl-1.10.dtd"); + assertEquals(spec.getJobs().size(), 1); + assertEquals(spec.getJobs().get(0).detail.name, "testing-default-testuser_test"); + } + + @Test + public void testParseNonExistent() { + String xml = readJobSpec("jobspec_nonexistent_dtd.xml"); + try { + jobLauncher.parse(xml); + fail("Expected exception"); + } catch (SpecBuilderException e) { + assertTrue(e.getMessage() + .startsWith("Failed to parse job spec XML, java.net.MalformedURLException")); } - - @Test - public void testParseGpuSuccess() { - String xml = readJobSpec("jobspec_1_12.xml"); - JobSpec spec = jobLauncher.parse(xml); - assertEquals(spec.getDoc().getDocType().getPublicID(), - "SPI Cue Specification Language"); - assertEquals(spec.getDoc().getDocType().getSystemID(), - "http://localhost:8080/spcue/dtd/cjsl-1.12.dtd"); - assertEquals(spec.getJobs().size(), 1); - BuildableJob job = spec.getJobs().get(0); - assertEquals(job.detail.name, "testing-default-testuser_test"); - LayerDetail layer = job.getBuildableLayers().get(0).layerDetail; - assertEquals(layer.getMinimumGpus(), 1); - assertEquals(layer.getMinimumGpuMemory(), 1048576); - } - - @Test - public void testParseMaxCoresAndMaxGpus() { - String xml = readJobSpec("jobspec_1_13.xml"); - JobSpec spec = jobLauncher.parse(xml); - assertEquals(spec.getDoc().getDocType().getPublicID(), - "SPI Cue Specification Language"); - assertEquals(spec.getDoc().getDocType().getSystemID(), - "http://localhost:8080/spcue/dtd/cjsl-1.13.dtd"); - assertEquals(spec.getJobs().size(), 1); - BuildableJob job = spec.getJobs().get(0); - assertEquals(job.maxCoresOverride, Integer.valueOf(420)); - assertEquals(job.maxGpusOverride, Integer.valueOf(42)); + } + + @Test + public void testParseInvalidShot() { + String xml = readJobSpec("jobspec_invalid_shot.xml"); + try { + jobLauncher.parse(xml); + fail("Expected exception"); + } catch (SpecBuilderException e) { + assertEquals(e.getMessage(), "The shot name: invalid/shot is not in the proper format. " + + "Shot names must be alpha numeric, no dashes or punctuation."); } + } + + @Test + public void testParseGpuSuccess() { + String xml = readJobSpec("jobspec_1_12.xml"); + JobSpec spec = jobLauncher.parse(xml); + assertEquals(spec.getDoc().getDocType().getPublicID(), "SPI Cue Specification Language"); + assertEquals(spec.getDoc().getDocType().getSystemID(), + "http://localhost:8080/spcue/dtd/cjsl-1.12.dtd"); + assertEquals(spec.getJobs().size(), 1); + BuildableJob job = spec.getJobs().get(0); + assertEquals(job.detail.name, "testing-default-testuser_test"); + LayerDetail layer = job.getBuildableLayers().get(0).layerDetail; + assertEquals(layer.getMinimumGpus(), 1); + assertEquals(layer.getMinimumGpuMemory(), 1048576); + } + + @Test + public void testParseMaxCoresAndMaxGpus() { + String xml = readJobSpec("jobspec_1_13.xml"); + JobSpec spec = jobLauncher.parse(xml); + assertEquals(spec.getDoc().getDocType().getPublicID(), "SPI Cue Specification Language"); + assertEquals(spec.getDoc().getDocType().getSystemID(), + "http://localhost:8080/spcue/dtd/cjsl-1.13.dtd"); + assertEquals(spec.getJobs().size(), 1); + BuildableJob job = spec.getJobs().get(0); + assertEquals(job.maxCoresOverride, Integer.valueOf(420)); + assertEquals(job.maxGpusOverride, Integer.valueOf(42)); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/MaintenanceManagerSupportTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/MaintenanceManagerSupportTests.java index edb55f519..ff4188cfb 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/MaintenanceManagerSupportTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/MaintenanceManagerSupportTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.service; import javax.annotation.Resource; @@ -31,21 +27,19 @@ import com.imageworks.spcue.service.MaintenanceManagerSupport; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class MaintenanceManagerSupportTests extends - AbstractTransactionalJUnit4SpringContextTests { - - @Resource - MaintenanceManagerSupport maintenanceManager; - - @Test - public void testCheckHardwareState() { - maintenanceManager.checkHardwareState(); - } - - @Test - public void archiveFinishedJobs() { - maintenanceManager.archiveFinishedJobs(); - } -} +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class MaintenanceManagerSupportTests extends AbstractTransactionalJUnit4SpringContextTests { + @Resource + MaintenanceManagerSupport maintenanceManager; + + @Test + public void testCheckHardwareState() { + maintenanceManager.checkHardwareState(); + } + + @Test + public void archiveFinishedJobs() { + maintenanceManager.archiveFinishedJobs(); + } +} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/OwnerManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/OwnerManagerTests.java index 51dcafec4..8b712a6a6 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/OwnerManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/OwnerManagerTests.java @@ -2,20 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.test.service; import javax.annotation.Resource; @@ -46,147 +43,122 @@ import static org.junit.Assert.assertTrue; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class OwnerManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Resource - OwnerManager ownerManager; - - @Resource - AdminManager adminManager; - - @Resource - HostManager hostManager; - - @Resource - DeedDao deedDao; - - @Resource - Whiteboard whiteboard; - - public DispatchHost createHost() { - - RenderHost host = RenderHost.newBuilder() - .setName("test_host") - .setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(CueUtil.GB4) - .setTotalMem((int) CueUtil.GB16) - .setTotalSwap((int) CueUtil.GB16) - .setNimbyEnabled(true) - .setNumProcs(2) - .setCoresPerProc(100) - .setState(HardwareState.UP) - .setFacility("spi") - .addTags("general") - .setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512) - .build(); - - DispatchHost dh = hostManager.createHost(host); - hostManager.setAllocation(dh, - adminManager.findAllocationDetail("spi", "general")); - - return dh; - } - - @Test - @Transactional - @Rollback(true) - public void testCreateOwner() { - ownerManager.createOwner("spongebob", - adminManager.findShowEntity("pipe")); - - OwnerEntity owner = ownerManager.findOwner("spongebob"); - assertEquals(owner.name, "spongebob"); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteOwner() { - ownerManager.createOwner("spongebob", - adminManager.findShowEntity("pipe")); - - assertTrue(ownerManager.deleteOwner( - ownerManager.findOwner("spongebob"))); - } - - @Test - @Transactional - @Rollback(true) - public void testGetOwner() { - OwnerEntity o1 = ownerManager.createOwner("spongebob", - adminManager.findShowEntity("pipe")); - - OwnerEntity o2 = ownerManager.getOwner(o1.id); - assertEquals(o1, o2); - } - - @Test - @Transactional - @Rollback(true) - public void testFindOwner() { - OwnerEntity o1 = ownerManager.createOwner("spongebob", - adminManager.findShowEntity("pipe")); - - OwnerEntity o2 = ownerManager.findOwner(o1.name); - assertEquals(o1, o2); - } - - @Test - @Transactional - @Rollback(true) - public void testSetShow() { - OwnerEntity o = ownerManager.createOwner("spongebob", - adminManager.findShowEntity("pipe")); - - ShowEntity newShow = adminManager.findShowEntity("edu"); - ownerManager.setShow(o, newShow); - - assertEquals(newShow.name, whiteboard.getOwner(o.name).getShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testTakeOwnership() { - OwnerEntity o = ownerManager.createOwner("spongebob", - adminManager.findShowEntity("pipe")); - - DispatchHost host = createHost(); - ownerManager.takeOwnership(o, host); - } - - - @Test - @Transactional - @Rollback(true) - public void testGetDeed() { - OwnerEntity o = ownerManager.createOwner("spongebob", - adminManager.findShowEntity("pipe")); - - DispatchHost host = createHost(); - DeedEntity d = ownerManager.takeOwnership(o, host); - - assertEquals(d, ownerManager.getDeed(d.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testRemoveDeed() { - OwnerEntity o = ownerManager.createOwner("spongebob", - adminManager.findShowEntity("pipe")); - - DispatchHost host = createHost(); - DeedEntity d = ownerManager.takeOwnership(o, host); - - ownerManager.removeDeed(d); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class OwnerManagerTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Resource + OwnerManager ownerManager; + + @Resource + AdminManager adminManager; + + @Resource + HostManager hostManager; + + @Resource + DeedDao deedDao; + + @Resource + Whiteboard whiteboard; + + public DispatchHost createHost() { + + RenderHost host = RenderHost.newBuilder().setName("test_host").setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16).setTotalSwap((int) CueUtil.GB16) + .setNimbyEnabled(true).setNumProcs(2).setCoresPerProc(100).setState(HardwareState.UP) + .setFacility("spi").addTags("general").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); + + DispatchHost dh = hostManager.createHost(host); + hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); + + return dh; + } + + @Test + @Transactional + @Rollback(true) + public void testCreateOwner() { + ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); + + OwnerEntity owner = ownerManager.findOwner("spongebob"); + assertEquals(owner.name, "spongebob"); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteOwner() { + ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); + + assertTrue(ownerManager.deleteOwner(ownerManager.findOwner("spongebob"))); + } + + @Test + @Transactional + @Rollback(true) + public void testGetOwner() { + OwnerEntity o1 = ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); + + OwnerEntity o2 = ownerManager.getOwner(o1.id); + assertEquals(o1, o2); + } + + @Test + @Transactional + @Rollback(true) + public void testFindOwner() { + OwnerEntity o1 = ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); + + OwnerEntity o2 = ownerManager.findOwner(o1.name); + assertEquals(o1, o2); + } + + @Test + @Transactional + @Rollback(true) + public void testSetShow() { + OwnerEntity o = ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); + + ShowEntity newShow = adminManager.findShowEntity("edu"); + ownerManager.setShow(o, newShow); + + assertEquals(newShow.name, whiteboard.getOwner(o.name).getShow()); + } + + @Test + @Transactional + @Rollback(true) + public void testTakeOwnership() { + OwnerEntity o = ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); + + DispatchHost host = createHost(); + ownerManager.takeOwnership(o, host); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDeed() { + OwnerEntity o = ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); + + DispatchHost host = createHost(); + DeedEntity d = ownerManager.takeOwnership(o, host); + + assertEquals(d, ownerManager.getDeed(d.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testRemoveDeed() { + OwnerEntity o = ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); + + DispatchHost host = createHost(); + DeedEntity d = ownerManager.takeOwnership(o, host); + + ownerManager.removeDeed(d); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/ServiceManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/ServiceManagerTests.java index 5354d763e..0217dd8e8 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/ServiceManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/ServiceManagerTests.java @@ -2,23 +2,19 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.test.service; - import java.io.File; import javax.annotation.Resource; @@ -47,140 +43,136 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; - @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class ServiceManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Resource - ServiceManager serviceManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - LayerDao layerDao; - - @Before - public void setTestMode() { - jobLauncher.testMode = true; - } - - @Test - @Transactional - @Rollback(true) - public void testGetDefaultService() { - ServiceEntity srv1 = serviceManager.getService("default"); - ServiceEntity srv2 = serviceManager.getDefaultService(); - - assertEquals(srv1, srv2); - } - - @Test - @Transactional - @Rollback(true) - public void testCreateService() { - ServiceEntity s = new ServiceEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.minMemory = CueUtil.GB4; - s.minGpuMemory = CueUtil.GB2; - s.threadable = false; - s.timeout = 0; - s.timeout_llu = 0; - s.tags.addAll(Sets.newHashSet("general")); - serviceManager.createService(s); - - ServiceEntity newService = serviceManager.getService(s.id); - assertEquals(s, newService); - } - - @Test - @Transactional - @Rollback(true) - public void testOverrideExistingService() { - ServiceOverrideEntity s = new ServiceOverrideEntity(); - s.name = "arnold"; - s.minCores = 400; - s.timeout = 10; - s.timeout_llu = 10; - s.minMemory = CueUtil.GB8; - s.minGpuMemory = CueUtil.GB2; - s.threadable = false; - s.tags.addAll(Sets.newHashSet("general")); - s.showId = "00000000-0000-0000-0000-000000000000"; - serviceManager.createService(s); - - // Check it was overridden - ServiceEntity newService = serviceManager.getService("arnold", s.showId); - assertEquals(s, newService); - assertEquals(400, newService.minCores); - assertEquals(10, newService.timeout); - assertEquals(10, newService.timeout_llu); - assertEquals(CueUtil.GB8, newService.minMemory); - assertEquals(CueUtil.GB2, newService.minGpuMemory); - assertFalse(newService.threadable); - assertTrue(s.tags.contains("general")); - - serviceManager.deleteService(s); - - // now check the original is back. - newService = serviceManager.getService("arnold", s.showId); - assertEquals(100, newService.minCores); - assertEquals(0, newService.minGpuMemory); - } - - @Test - @Transactional - @Rollback(true) - public void testJobLaunch() { - - JobSpec spec = jobLauncher.parse( - new File("src/test/resources/conf/jobspec/services.xml")); - jobLauncher.launch(spec); - - ServiceEntity shell = serviceManager.getService("shell"); - ServiceEntity prman = serviceManager.getService("prman"); - ServiceEntity cuda = serviceManager.getService("cuda"); - LayerDetail shellLayer = layerDao.getLayerDetail( - spec.getJobs().get(0).getBuildableLayers().get(0).layerDetail.id); - LayerDetail prmanLayer = layerDao.getLayerDetail( - spec.getJobs().get(0).getBuildableLayers().get(1).layerDetail.id); - LayerDetail cudaLayer = layerDao.getLayerDetail( - spec.getJobs().get(0).getBuildableLayers().get(3).layerDetail.id); - - assertEquals(shell.minCores, shellLayer.minimumCores); - assertEquals(shell.minMemory, shellLayer.minimumMemory); - assertEquals(shell.minGpuMemory, shellLayer.minimumGpuMemory); - assertFalse(shellLayer.isThreadable); - assertEquals(shell.tags, shellLayer.tags); - assertThat(shellLayer.services, contains("shell", "katana", "unknown")); - - assertEquals(prman.minCores, prmanLayer.minimumCores); - assertEquals(prman.minMemory, prmanLayer.minimumMemory); - assertFalse(prmanLayer.isThreadable); - assertEquals(prman.tags, prmanLayer.tags); - assertThat(prmanLayer.services, contains("prman", "katana")); - - assertEquals(cuda.minCores, cudaLayer.minimumCores); - assertEquals(cuda.minMemory, cudaLayer.minimumMemory); - assertEquals(cuda.minGpuMemory, cudaLayer.minimumGpuMemory); - assertFalse(cudaLayer.isThreadable); - assertEquals(cuda.tags, cudaLayer.tags); - assertThat(cudaLayer.services, contains("cuda")); - } - - @Test - @Transactional - @Rollback(true) - public void testManualOverrideThreading() { - - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/services.xml")); - jobLauncher.launch(spec); - - assertFalse( - layerDao.findLayerDetail( - spec.getJobs().get(0).detail, "arnold_layer").isThreadable); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class ServiceManagerTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Resource + ServiceManager serviceManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + LayerDao layerDao; + + @Before + public void setTestMode() { + jobLauncher.testMode = true; + } + + @Test + @Transactional + @Rollback(true) + public void testGetDefaultService() { + ServiceEntity srv1 = serviceManager.getService("default"); + ServiceEntity srv2 = serviceManager.getDefaultService(); + + assertEquals(srv1, srv2); + } + + @Test + @Transactional + @Rollback(true) + public void testCreateService() { + ServiceEntity s = new ServiceEntity(); + s.name = "dillweed"; + s.minCores = 100; + s.minMemory = CueUtil.GB4; + s.minGpuMemory = CueUtil.GB2; + s.threadable = false; + s.timeout = 0; + s.timeout_llu = 0; + s.tags.addAll(Sets.newHashSet("general")); + serviceManager.createService(s); + + ServiceEntity newService = serviceManager.getService(s.id); + assertEquals(s, newService); + } + + @Test + @Transactional + @Rollback(true) + public void testOverrideExistingService() { + ServiceOverrideEntity s = new ServiceOverrideEntity(); + s.name = "arnold"; + s.minCores = 400; + s.timeout = 10; + s.timeout_llu = 10; + s.minMemory = CueUtil.GB8; + s.minGpuMemory = CueUtil.GB2; + s.threadable = false; + s.tags.addAll(Sets.newHashSet("general")); + s.showId = "00000000-0000-0000-0000-000000000000"; + serviceManager.createService(s); + + // Check it was overridden + ServiceEntity newService = serviceManager.getService("arnold", s.showId); + assertEquals(s, newService); + assertEquals(400, newService.minCores); + assertEquals(10, newService.timeout); + assertEquals(10, newService.timeout_llu); + assertEquals(CueUtil.GB8, newService.minMemory); + assertEquals(CueUtil.GB2, newService.minGpuMemory); + assertFalse(newService.threadable); + assertTrue(s.tags.contains("general")); + + serviceManager.deleteService(s); + + // now check the original is back. + newService = serviceManager.getService("arnold", s.showId); + assertEquals(100, newService.minCores); + assertEquals(0, newService.minGpuMemory); + } + + @Test + @Transactional + @Rollback(true) + public void testJobLaunch() { + + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/services.xml")); + jobLauncher.launch(spec); + + ServiceEntity shell = serviceManager.getService("shell"); + ServiceEntity prman = serviceManager.getService("prman"); + ServiceEntity cuda = serviceManager.getService("cuda"); + LayerDetail shellLayer = + layerDao.getLayerDetail(spec.getJobs().get(0).getBuildableLayers().get(0).layerDetail.id); + LayerDetail prmanLayer = + layerDao.getLayerDetail(spec.getJobs().get(0).getBuildableLayers().get(1).layerDetail.id); + LayerDetail cudaLayer = + layerDao.getLayerDetail(spec.getJobs().get(0).getBuildableLayers().get(3).layerDetail.id); + + assertEquals(shell.minCores, shellLayer.minimumCores); + assertEquals(shell.minMemory, shellLayer.minimumMemory); + assertEquals(shell.minGpuMemory, shellLayer.minimumGpuMemory); + assertFalse(shellLayer.isThreadable); + assertEquals(shell.tags, shellLayer.tags); + assertThat(shellLayer.services, contains("shell", "katana", "unknown")); + + assertEquals(prman.minCores, prmanLayer.minimumCores); + assertEquals(prman.minMemory, prmanLayer.minimumMemory); + assertFalse(prmanLayer.isThreadable); + assertEquals(prman.tags, prmanLayer.tags); + assertThat(prmanLayer.services, contains("prman", "katana")); + + assertEquals(cuda.minCores, cudaLayer.minimumCores); + assertEquals(cuda.minMemory, cudaLayer.minimumMemory); + assertEquals(cuda.minGpuMemory, cudaLayer.minimumGpuMemory); + assertFalse(cudaLayer.isThreadable); + assertEquals(cuda.tags, cudaLayer.tags); + assertThat(cudaLayer.services, contains("cuda")); + } + + @Test + @Transactional + @Rollback(true) + public void testManualOverrideThreading() { + + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/services.xml")); + jobLauncher.launch(spec); + + assertFalse( + layerDao.findLayerDetail(spec.getJobs().get(0).detail, "arnold_layer").isThreadable); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/WhiteboardTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/WhiteboardTests.java index 88cd2acd4..2c3a9e694 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/WhiteboardTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/WhiteboardTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.service; import java.io.File; @@ -38,30 +34,28 @@ import static org.junit.Assert.assertTrue; @Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class WhiteboardTests extends - AbstractTransactionalJUnit4SpringContextTests { - - @Resource - Whiteboard whiteboard; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - public JobDetail launchJob() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - @Test - @Transactional - @Rollback(true) - public void testIsJobPending() { - JobDetail job = launchJob(); - assertTrue(whiteboard.isJobPending(job.getName())); - } +@ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) +public class WhiteboardTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Resource + Whiteboard whiteboard; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + public JobDetail launchJob() { + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + } + + @Test + @Transactional + @Rollback(true) + public void testIsJobPending() { + JobDetail job = launchJob(); + assertTrue(whiteboard.isJobPending(job.getName())); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/util/CoreSaturationTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/util/CoreSaturationTests.java index d6796b76e..5db5fa7f7 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/util/CoreSaturationTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/util/CoreSaturationTests.java @@ -2,24 +2,19 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.util; - import junit.framework.TestCase; import org.junit.Before; @@ -30,29 +25,28 @@ public class CoreSaturationTests extends TestCase { - DispatchHost host; - - @Before - public void setUp() throws Exception { - host = new DispatchHost(); - host.isNimby = false; - } - - public void testCoreAndMemorySaturation1() { - host.memory = CueUtil.GB32; - host.idleMemory = CueUtil.GB8; - host.cores = 800; - host.idleCores = 700; - - DispatchFrame frame = new DispatchFrame(); - frame.services = "NOTarnold"; - frame.minCores = 100; - frame.setMinMemory(CueUtil.GB * 7); - frame.threadable = true; - - VirtualProc proc = VirtualProc.build(host, frame); - assertEquals(700, proc.coresReserved); - assertEquals(CueUtil.GB * 7, proc.memoryReserved); - } + DispatchHost host; + + @Before + public void setUp() throws Exception { + host = new DispatchHost(); + host.isNimby = false; + } + + public void testCoreAndMemorySaturation1() { + host.memory = CueUtil.GB32; + host.idleMemory = CueUtil.GB8; + host.cores = 800; + host.idleCores = 700; + + DispatchFrame frame = new DispatchFrame(); + frame.services = "NOTarnold"; + frame.minCores = 100; + frame.setMinMemory(CueUtil.GB * 7); + frame.threadable = true; + + VirtualProc proc = VirtualProc.build(host, frame); + assertEquals(700, proc.coresReserved); + assertEquals(CueUtil.GB * 7, proc.memoryReserved); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/util/CoreSpanTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/util/CoreSpanTests.java index d64f42cfc..ab634fda2 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/util/CoreSpanTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/util/CoreSpanTests.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.util; import junit.framework.TestCase; @@ -30,126 +26,124 @@ public class CoreSpanTests extends TestCase { - DispatchHost host; - - @Before - public void setUp() throws Exception { - host = new DispatchHost(); - host.isNimby = false; - } - - /** - * The coreSpan calculation finds out how many cores a frames's - * requested memory covers and gives more cores when the requested - * memory spans more than 1 core. - */ - public void testCoreSpan() { - - /* 8 gigs and 7 cores idle, request 7g */ - host.memory = CueUtil.GB32; - host.idleMemory = CueUtil.GB8; - host.cores = 800; - host.idleCores = 700; - - DispatchFrame frame = new DispatchFrame(); - frame.minCores = 100; - frame.setMinMemory(CueUtil.GB * 7); - frame.threadable = true; - - VirtualProc proc = VirtualProc.build(host, frame); - assertEquals(700, proc.coresReserved); - } - - public void testCoreSpanTest1(){ - - /* 4 gigs and 1 cores idle, request 1g */ - host.memory = CueUtil.GB32; - host.idleMemory = CueUtil.GB4; - host.cores = 800; - host.idleCores = 100; - - DispatchFrame frame = new DispatchFrame(); - frame.minCores = 100; - frame.setMinMemory(CueUtil.GB); - - VirtualProc proc = VirtualProc.build(host, frame); - assertEquals(100, proc.coresReserved); - } - - public void testCoreSpanTest2() { - host.memory = CueUtil.GB32; - host.idleMemory = CueUtil.GB4; - host.cores = 800; - host.idleCores = 200; - - DispatchFrame frame = new DispatchFrame(); - frame.minCores = 100; - frame.setMinMemory(CueUtil.GB4); - frame.threadable = true; - - VirtualProc proc = VirtualProc.build(host, frame); - assertEquals(200, proc.coresReserved); - } - - public void testCoreSpanTest3() { - host.memory = CueUtil.GB8; - host.idleMemory = CueUtil.GB8; - host.cores = 800; - host.idleCores = 780; - // Hardcoded value of dispatcher.memory.mem_reserved_default - // to avoid having to read opencue.properties on a test setting - long memReservedDefault = 3355443; - - DispatchFrame frame = new DispatchFrame(); - frame.minCores = 100; - frame.setMinMemory(memReservedDefault); - frame.threadable = true; - - VirtualProc proc = VirtualProc.build(host, frame); - assertEquals(300, proc.coresReserved); - } - - public void testCoreSpanTest4() { - host.memory = CueUtil.GB32; - host.idleMemory = CueUtil.GB16; - host.cores = 800; - host.idleCores = 200; - - DispatchFrame frame = new DispatchFrame(); - frame.minCores = 100; - frame.setMinMemory(CueUtil.GB * 8); - frame.threadable = true; - - VirtualProc proc = VirtualProc.build(host, frame); - assertEquals(200, proc.coresReserved); - - } - - public void testBuildVirtualProc() { - VirtualProc proc; - - DispatchHost host = new DispatchHost(); - host.threadMode = ThreadMode.ALL_VALUE; - /* 8 gigs and 7 cores idle, request 7g */ - host.memory = CueUtil.GB8; - host.idleMemory = CueUtil.GB8; - host.cores = 800; - host.idleCores = 800; - // Hardcoded value of dispatcher.memory.mem_reserved_default - // to avoid having to read opencue.properties on a test setting - long memReservedDefault = 3355443; - - DispatchFrame frame = new DispatchFrame(); - frame.minCores = 100; - frame.setMinMemory(memReservedDefault); - frame.threadable = true; - - proc = VirtualProc.build(host, frame); - assertEquals(800, proc.coresReserved); - - host.threadMode = ThreadMode.AUTO_VALUE; - proc = VirtualProc.build(host, frame); - assertEquals(300, proc.coresReserved); - } + DispatchHost host; + + @Before + public void setUp() throws Exception { + host = new DispatchHost(); + host.isNimby = false; + } + + /** + * The coreSpan calculation finds out how many cores a frames's requested memory covers and gives + * more cores when the requested memory spans more than 1 core. + */ + public void testCoreSpan() { + + /* 8 gigs and 7 cores idle, request 7g */ + host.memory = CueUtil.GB32; + host.idleMemory = CueUtil.GB8; + host.cores = 800; + host.idleCores = 700; + + DispatchFrame frame = new DispatchFrame(); + frame.minCores = 100; + frame.setMinMemory(CueUtil.GB * 7); + frame.threadable = true; + + VirtualProc proc = VirtualProc.build(host, frame); + assertEquals(700, proc.coresReserved); + } + + public void testCoreSpanTest1() { + + /* 4 gigs and 1 cores idle, request 1g */ + host.memory = CueUtil.GB32; + host.idleMemory = CueUtil.GB4; + host.cores = 800; + host.idleCores = 100; + + DispatchFrame frame = new DispatchFrame(); + frame.minCores = 100; + frame.setMinMemory(CueUtil.GB); + + VirtualProc proc = VirtualProc.build(host, frame); + assertEquals(100, proc.coresReserved); + } + + public void testCoreSpanTest2() { + host.memory = CueUtil.GB32; + host.idleMemory = CueUtil.GB4; + host.cores = 800; + host.idleCores = 200; + + DispatchFrame frame = new DispatchFrame(); + frame.minCores = 100; + frame.setMinMemory(CueUtil.GB4); + frame.threadable = true; + + VirtualProc proc = VirtualProc.build(host, frame); + assertEquals(200, proc.coresReserved); + } + + public void testCoreSpanTest3() { + host.memory = CueUtil.GB8; + host.idleMemory = CueUtil.GB8; + host.cores = 800; + host.idleCores = 780; + // Hardcoded value of dispatcher.memory.mem_reserved_default + // to avoid having to read opencue.properties on a test setting + long memReservedDefault = 3355443; + + DispatchFrame frame = new DispatchFrame(); + frame.minCores = 100; + frame.setMinMemory(memReservedDefault); + frame.threadable = true; + + VirtualProc proc = VirtualProc.build(host, frame); + assertEquals(300, proc.coresReserved); + } + + public void testCoreSpanTest4() { + host.memory = CueUtil.GB32; + host.idleMemory = CueUtil.GB16; + host.cores = 800; + host.idleCores = 200; + + DispatchFrame frame = new DispatchFrame(); + frame.minCores = 100; + frame.setMinMemory(CueUtil.GB * 8); + frame.threadable = true; + + VirtualProc proc = VirtualProc.build(host, frame); + assertEquals(200, proc.coresReserved); + + } + + public void testBuildVirtualProc() { + VirtualProc proc; + + DispatchHost host = new DispatchHost(); + host.threadMode = ThreadMode.ALL_VALUE; + /* 8 gigs and 7 cores idle, request 7g */ + host.memory = CueUtil.GB8; + host.idleMemory = CueUtil.GB8; + host.cores = 800; + host.idleCores = 800; + // Hardcoded value of dispatcher.memory.mem_reserved_default + // to avoid having to read opencue.properties on a test setting + long memReservedDefault = 3355443; + + DispatchFrame frame = new DispatchFrame(); + frame.minCores = 100; + frame.setMinMemory(memReservedDefault); + frame.threadable = true; + + proc = VirtualProc.build(host, frame); + assertEquals(800, proc.coresReserved); + + host.threadMode = ThreadMode.AUTO_VALUE; + proc = VirtualProc.build(host, frame); + assertEquals(300, proc.coresReserved); + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/util/CueUtilTester.java b/cuebot/src/test/java/com/imageworks/spcue/test/util/CueUtilTester.java index d3a4abe76..348e514e1 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/util/CueUtilTester.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/util/CueUtilTester.java @@ -2,21 +2,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - - package com.imageworks.spcue.test.util; import java.util.List; @@ -29,141 +25,129 @@ public class CueUtilTester extends TestCase { - @Test - public void testFindChunk() { - - List dependOnFrameSet = - CueUtil.normalizeFrameRange("101-160x10", 1); - List dependErFrameSet = - CueUtil.normalizeFrameRange("101-160", 1); - - Integer[] results = new Integer[] { - 101,101,101,101,101,101,101,101,101,101, - 111,111,111,111,111,111,111,111,111,111, - 121,121,121,121,121,121,121,121,121,121, - 131,131,131,131,131,131,131,131,131,131, - 141,141,141,141,141,141,141,141,141,141, - 151,151,151,151,151,151,151,151,151,151}; - - for (int dependErFrameSetIdx = 0; - dependErFrameSetIdx < dependErFrameSet.size(); - dependErFrameSetIdx = dependErFrameSetIdx + 1) { - - int result = CueUtil.findChunk(dependOnFrameSet, - dependErFrameSet.get(dependErFrameSetIdx)); - assertEquals((int)results[dependErFrameSetIdx], result); - } + @Test + public void testFindChunk() { + + List dependOnFrameSet = CueUtil.normalizeFrameRange("101-160x10", 1); + List dependErFrameSet = CueUtil.normalizeFrameRange("101-160", 1); + + Integer[] results = new Integer[] {101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 111, 111, + 111, 111, 111, 111, 111, 111, 111, 111, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, + 131, 131, 131, 131, 131, 131, 131, 131, 131, 131, 141, 141, 141, 141, 141, 141, 141, 141, + 141, 141, 151, 151, 151, 151, 151, 151, 151, 151, 151, 151}; + + for (int dependErFrameSetIdx = 0; dependErFrameSetIdx < dependErFrameSet + .size(); dependErFrameSetIdx = dependErFrameSetIdx + 1) { + + int result = CueUtil.findChunk(dependOnFrameSet, dependErFrameSet.get(dependErFrameSetIdx)); + assertEquals((int) results[dependErFrameSetIdx], result); } + } + + @Test + public void testFindChunkStaggered() { + + List dependOnFrameSet = CueUtil.normalizeFrameRange("101-110:2", 1); + List dependErFrameSet = CueUtil.normalizeFrameRange("101-110", 1); - @Test - public void testFindChunkStaggered() { - - List dependOnFrameSet = - CueUtil.normalizeFrameRange("101-110:2", 1); - List dependErFrameSet = - CueUtil.normalizeFrameRange("101-110", 1); - - Integer[] results = new Integer[] { - 101,102,103,104,105,106,107,108,109,110 - }; - for (int i=0; i < dependErFrameSet.size(); i=i+1) { - int result = CueUtil.findChunk(dependOnFrameSet,dependErFrameSet.get(i)); - assertEquals((int)results[i], result); - } + Integer[] results = new Integer[] {101, 102, 103, 104, 105, 106, 107, 108, 109, 110}; + for (int i = 0; i < dependErFrameSet.size(); i = i + 1) { + int result = CueUtil.findChunk(dependOnFrameSet, dependErFrameSet.get(i)); + assertEquals((int) results[i], result); } + } - /** - * Removes all duplicates from the frame range, applies - * the chunk size, and maintains dispatch order. + /** + * Removes all duplicates from the frame range, applies the chunk size, and maintains dispatch + * order. + */ + public void testNormalizeFrameRange() { + + /* + * An array of frame numbers which is the known result */ - public void testNormalizeFrameRange() { - - /* - * An array of frame numbers which is the known result - */ - int[] knownResult; - - /* - * An array of frames returned from normalizeFrameRange - */ - List frames; - - /* - * Normal every day frame range. - */ - knownResult = new int[] { 1,2,3,4,5 }; - frames = CueUtil.normalizeFrameRange("1-5", 1); - for (int i=0; i frames; - @Test - public void testCoreUnitsToCores() { - assertEquals(1.0f, Convert.coreUnitsToCores(100), 0.0001f); + /* + * Normal every day frame range. + */ + knownResult = new int[] {1, 2, 3, 4, 5}; + frames = CueUtil.normalizeFrameRange("1-5", 1); + for (int i = 0; i < frames.size(); i++) { + assertEquals(knownResult[i], (int) frames.get(i)); } - @Test - public void testCoreUnitsToCoresWithScale() { - assertEquals(100, Convert.coresToWholeCoreUnits(1.132132f)); - assertEquals(19900, Convert.coresToWholeCoreUnits(199.232f)); + /* + * Frame range with chunking + */ + knownResult = new int[] {1, 5, 9}; + frames = CueUtil.normalizeFrameRange("1-10", 4); + for (int i = 0; i < frames.size(); i++) { + assertEquals(knownResult[i], (int) frames.get(i)); } - @Test - public void testBuildProcName() { - assertEquals("drack100/1.00/1", CueUtil.buildProcName("drack100",100,1)); - assertEquals("drack100/1.40/0", CueUtil.buildProcName("drack100",140,0)); - assertEquals("drack100/2.01/2", CueUtil.buildProcName("drack100",201,2)); + /* + * Frame range with duplicates... + */ + knownResult = new int[] {1, 3, 5, 7, 9, 2, 4, 6, 8, 10}; + frames = CueUtil.normalizeFrameRange("1-10x2,1-10", 1); + for (int i = 0; i < frames.size(); i++) { + assertEquals(knownResult[i], (int) frames.get(i)); } - @Test - public void testCoreUnitsToWholecores() { - float cores = Convert.coreUnitsToWholeCores(149); - assertEquals(1.0f, cores); + /* + * Frame range with duplicates..with chunking! + */ + knownResult = new int[] {1, 5, 9, 4, 8}; + frames = CueUtil.normalizeFrameRange("1-10x2,1-10", 2); + for (int i = 0; i < frames.size(); i++) { + assertEquals(knownResult[i], (int) frames.get(i)); + } + /* + * Frame range with no duplicates..with chunking! + */ + knownResult = new int[] {1, 5, 9, 4, 8}; + frames = CueUtil.normalizeFrameRange("1-10:2", 2); + for (int i = 0; i < frames.size(); i++) { + assertEquals(knownResult[i], (int) frames.get(i)); } + } + + @Test + public void testProcsToCores() { + assertEquals(200, Convert.coresToCoreUnits(2.0f)); + assertEquals(235, Convert.coresToCoreUnits(2.35f)); + assertEquals(299, Convert.coresToCoreUnits(2.999f)); + } + + @Test + public void testCoreUnitsToCores() { + assertEquals(1.0f, Convert.coreUnitsToCores(100), 0.0001f); + } + + @Test + public void testCoreUnitsToCoresWithScale() { + assertEquals(100, Convert.coresToWholeCoreUnits(1.132132f)); + assertEquals(19900, Convert.coresToWholeCoreUnits(199.232f)); + } + + @Test + public void testBuildProcName() { + assertEquals("drack100/1.00/1", CueUtil.buildProcName("drack100", 100, 1)); + assertEquals("drack100/1.40/0", CueUtil.buildProcName("drack100", 140, 0)); + assertEquals("drack100/2.01/2", CueUtil.buildProcName("drack100", 201, 2)); + } + + @Test + public void testCoreUnitsToWholecores() { + float cores = Convert.coreUnitsToWholeCores(149); + assertEquals(1.0f, cores); + + } } - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameRangeTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameRangeTests.java index aa9cfcc07..279c3a664 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameRangeTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameRangeTests.java @@ -8,185 +8,185 @@ import static org.junit.Assert.fail; public class FrameRangeTests { - @Test - public void testSingleFrame() { - Integer frame = 4927; + @Test + public void testSingleFrame() { + Integer frame = 4927; - FrameRange result = new FrameRange(frame.toString()); + FrameRange result = new FrameRange(frame.toString()); - assertThat(result.getAll()).containsExactly(frame); - } + assertThat(result.getAll()).containsExactly(frame); + } - @Test - public void testNegativeSingleFrame() { - Integer frame = -4982; + @Test + public void testNegativeSingleFrame() { + Integer frame = -4982; - FrameRange result = new FrameRange(frame.toString()); + FrameRange result = new FrameRange(frame.toString()); - assertThat(result.getAll()).containsExactly(frame); - } + assertThat(result.getAll()).containsExactly(frame); + } - @Test - public void testFrameRange() { - FrameRange result = new FrameRange("1-7"); + @Test + public void testFrameRange() { + FrameRange result = new FrameRange("1-7"); - assertThat(result.getAll()).containsExactly(1, 2, 3, 4, 5, 6, 7); - } + assertThat(result.getAll()).containsExactly(1, 2, 3, 4, 5, 6, 7); + } - @Test - public void testNegativeFrameRange() { - FrameRange result = new FrameRange("-20--13"); + @Test + public void testNegativeFrameRange() { + FrameRange result = new FrameRange("-20--13"); - assertThat(result.getAll()).containsExactly(-20, -19, -18, -17, -16, -15, -14, -13); - } + assertThat(result.getAll()).containsExactly(-20, -19, -18, -17, -16, -15, -14, -13); + } - @Test - public void testNegativeToPositiveFrameRange() { - FrameRange result = new FrameRange("-5-3"); + @Test + public void testNegativeToPositiveFrameRange() { + FrameRange result = new FrameRange("-5-3"); - assertThat(result.getAll()).containsExactly(-5, -4, -3, -2, -1, 0, 1, 2, 3); - } + assertThat(result.getAll()).containsExactly(-5, -4, -3, -2, -1, 0, 1, 2, 3); + } - @Test - public void testReverseFrameRange() { - FrameRange result = new FrameRange("6-2"); + @Test + public void testReverseFrameRange() { + FrameRange result = new FrameRange("6-2"); - assertThat(result.getAll()).containsExactly(6, 5, 4, 3, 2); - } + assertThat(result.getAll()).containsExactly(6, 5, 4, 3, 2); + } - @Test - public void testReverseNegativeFrameRange() { - FrameRange result = new FrameRange("-2--6"); + @Test + public void testReverseNegativeFrameRange() { + FrameRange result = new FrameRange("-2--6"); - assertThat(result.getAll()).containsExactly(-2, -3, -4, -5, -6); - } + assertThat(result.getAll()).containsExactly(-2, -3, -4, -5, -6); + } - @Test - public void testStep() { - FrameRange result = new FrameRange("1-8x2"); + @Test + public void testStep() { + FrameRange result = new FrameRange("1-8x2"); - assertThat(result.getAll()).containsExactly(1, 3, 5, 7); - } + assertThat(result.getAll()).containsExactly(1, 3, 5, 7); + } - @Test - public void testNegativeStep() { - FrameRange result = new FrameRange("8-1x-2"); + @Test + public void testNegativeStep() { + FrameRange result = new FrameRange("8-1x-2"); - assertThat(result.getAll()).containsExactly(8, 6, 4, 2); - } + assertThat(result.getAll()).containsExactly(8, 6, 4, 2); + } - @Test - public void testNegativeStepInvalidRange() { - try { - new FrameRange("1-8x-2"); - fail("negative frame step should have been rejected"); - } catch (IllegalArgumentException e) { - // pass - } + @Test + public void testNegativeStepInvalidRange() { + try { + new FrameRange("1-8x-2"); + fail("negative frame step should have been rejected"); + } catch (IllegalArgumentException e) { + // pass } + } - @Test - public void testInvertedStep() { - FrameRange result = new FrameRange("1-8y2"); + @Test + public void testInvertedStep() { + FrameRange result = new FrameRange("1-8y2"); - assertThat(result.getAll()).containsExactly(2, 4, 6, 8); - } + assertThat(result.getAll()).containsExactly(2, 4, 6, 8); + } - @Test - public void testNegativeInvertedStep() { - FrameRange result = new FrameRange("8-1y-2"); + @Test + public void testNegativeInvertedStep() { + FrameRange result = new FrameRange("8-1y-2"); - assertThat(result.getAll()).containsExactly(7, 5, 3, 1); - } + assertThat(result.getAll()).containsExactly(7, 5, 3, 1); + } - @Test - public void testInterleave() { - FrameRange result = new FrameRange("1-10:5"); + @Test + public void testInterleave() { + FrameRange result = new FrameRange("1-10:5"); - assertThat(result.getAll()).containsExactly(1, 6, 3, 5, 7, 9, 2, 4, 8, 10); - } + assertThat(result.getAll()).containsExactly(1, 6, 3, 5, 7, 9, 2, 4, 8, 10); + } + + @Test + public void testNegativeInterleave() { + FrameRange result = new FrameRange("10-1:-5"); - @Test - public void testNegativeInterleave() { - FrameRange result = new FrameRange("10-1:-5"); + assertThat(result.getAll()).containsExactly(10, 5, 8, 6, 4, 2, 9, 7, 3, 1); + } - assertThat(result.getAll()).containsExactly(10, 5, 8, 6, 4, 2, 9, 7, 3, 1); + @Test + public void testNonNumericalInput() { + try { + new FrameRange("a"); + fail("non-numerical frame should have been rejected"); + } catch (IllegalArgumentException e) { + // pass } - @Test - public void testNonNumericalInput() { - try { - new FrameRange("a"); - fail("non-numerical frame should have been rejected"); - } catch (IllegalArgumentException e) { - // pass - } - - try { - new FrameRange("a-b"); - fail("non-numerical frame range should have been rejected"); - } catch (IllegalArgumentException e) { - // pass - } - - try { - new FrameRange("1-5xc"); - fail("non-numerical step size should have been rejected"); - } catch (IllegalArgumentException e) { - // pass - } - - try { - new FrameRange("1-5:c"); - fail("non-numerical interleave size should have been rejected"); - } catch (IllegalArgumentException e) { - // pass - } + try { + new FrameRange("a-b"); + fail("non-numerical frame range should have been rejected"); + } catch (IllegalArgumentException e) { + // pass } - @Test - public void testInvalidRange() { - try { - new FrameRange("1-10-20"); - fail("invalid frame range should have been rejected"); - } catch (IllegalArgumentException e) { - // pass - } - - try { - new FrameRange("1x10-20"); - fail("invalid frame range should have been rejected"); - } catch (IllegalArgumentException e) { - // pass - } - - try { - new FrameRange("1:10-20"); - fail("invalid frame range should have been rejected"); - } catch (IllegalArgumentException e) { - // pass - } + try { + new FrameRange("1-5xc"); + fail("non-numerical step size should have been rejected"); + } catch (IllegalArgumentException e) { + // pass } - @Test - public void testSize() { - FrameRange result = new FrameRange("1-7"); + try { + new FrameRange("1-5:c"); + fail("non-numerical interleave size should have been rejected"); + } catch (IllegalArgumentException e) { + // pass + } + } - assertEquals(7, result.size()); + @Test + public void testInvalidRange() { + try { + new FrameRange("1-10-20"); + fail("invalid frame range should have been rejected"); + } catch (IllegalArgumentException e) { + // pass } - @Test - public void testGet() { - FrameRange result = new FrameRange("1-7"); + try { + new FrameRange("1x10-20"); + fail("invalid frame range should have been rejected"); + } catch (IllegalArgumentException e) { + // pass + } - assertEquals(5, result.get(4)); + try { + new FrameRange("1:10-20"); + fail("invalid frame range should have been rejected"); + } catch (IllegalArgumentException e) { + // pass } + } - @Test - public void testIndex() { - FrameRange result = new FrameRange("1-7"); + @Test + public void testSize() { + FrameRange result = new FrameRange("1-7"); - assertEquals(5, result.index(6)); - assertEquals(-1, result.index(22)); - } + assertEquals(7, result.size()); + } + + @Test + public void testGet() { + FrameRange result = new FrameRange("1-7"); + + assertEquals(5, result.get(4)); + } + + @Test + public void testIndex() { + FrameRange result = new FrameRange("1-7"); + + assertEquals(5, result.index(6)); + assertEquals(-1, result.index(22)); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameSetTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameSetTests.java index a138f700d..3c194a445 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameSetTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameSetTests.java @@ -7,97 +7,100 @@ import static org.junit.Assert.assertEquals; public class FrameSetTests { - @Test - public void shouldSplitListAndMaintainOrder() { - FrameSet result = new FrameSet("57,1-3,4-2,12-15x2,76-70x-3,5-12y3,1-7:5"); + @Test + public void shouldSplitListAndMaintainOrder() { + FrameSet result = new FrameSet("57,1-3,4-2,12-15x2,76-70x-3,5-12y3,1-7:5"); - assertThat(result.getAll()).containsExactly( - 57, 1, 2, 3, 4, 3, 2, 12, 14, 76, 73, 70, 6, 7, 9, 10, 12, 1, 6, 3, 5, 7, 2, 4); - } + assertThat(result.getAll()).containsExactly(57, 1, 2, 3, 4, 3, 2, 12, 14, 76, 73, 70, 6, 7, 9, + 10, 12, 1, 6, 3, 5, 7, 2, 4); + } - @Test - public void shouldReturnCorrectSize() { - FrameSet result = new FrameSet("1-7"); + @Test + public void shouldReturnCorrectSize() { + FrameSet result = new FrameSet("1-7"); - assertEquals(7, result.size()); - } + assertEquals(7, result.size()); + } - @Test - public void shouldReturnSingleFrame() { - FrameSet result = new FrameSet("1-7"); + @Test + public void shouldReturnSingleFrame() { + FrameSet result = new FrameSet("1-7"); - assertEquals(5, result.get(4)); - } + assertEquals(5, result.get(4)); + } - @Test - public void shouldReturnCorrectIndexes() { - FrameSet result = new FrameSet("1-7"); - - assertEquals(5, result.index(6)); - assertEquals(-1, result.index(22)); - } + @Test + public void shouldReturnCorrectIndexes() { + FrameSet result = new FrameSet("1-7"); - @Test - public void shouldReconstructSteppedRange() { - FrameSet result = new FrameSet("1-10x2,11-100x20,103-108"); + assertEquals(5, result.index(6)); + assertEquals(-1, result.index(22)); + } - // int[] intArray = {1, 3, 5, 7, 9, 11, 31, 51, 71, 91, 103, 104, 105, 106, 107, 108}; + @Test + public void shouldReconstructSteppedRange() { + FrameSet result = new FrameSet("1-10x2,11-100x20,103-108"); - assertEquals("11-91x20", result.getChunk(5, 5)); - } + // int[] intArray = {1, 3, 5, 7, 9, 11, 31, 51, 71, 91, 103, 104, 105, 106, 107, + // 108}; - @Test - public void shouldCreateNewSteppedRangeAndNextFrame() { - FrameSet result = new FrameSet("1-10x2,11-100x20,103-108"); + assertEquals("11-91x20", result.getChunk(5, 5)); + } - // int[] intArray = {1, 3, 5, 7, 9, 11, 31, 51, 71, 91, 103, 104, 105, 106, 107, 108}; + @Test + public void shouldCreateNewSteppedRangeAndNextFrame() { + FrameSet result = new FrameSet("1-10x2,11-100x20,103-108"); - assertEquals("5-11x2,31", result.getChunk(2, 5)); - } + // int[] intArray = {1, 3, 5, 7, 9, 11, 31, 51, 71, 91, 103, 104, 105, 106, 107, + // 108}; - @Test - public void shouldReturnCommaSeparatedList() { - FrameSet result = new FrameSet("1-10x2,11-100x20,103-108"); + assertEquals("5-11x2,31", result.getChunk(2, 5)); + } - // int[] intArray = {1, 3, 5, 7, 9, 11, 31, 51, 71, 91, 103, 104, 105, 106, 107, 108}; + @Test + public void shouldReturnCommaSeparatedList() { + FrameSet result = new FrameSet("1-10x2,11-100x20,103-108"); - assertEquals("91,103,104", result.getChunk(9, 3)); - } + // int[] intArray = {1, 3, 5, 7, 9, 11, 31, 51, 71, 91, 103, 104, 105, 106, 107, + // 108}; - @Test - public void shouldReturnSubsetOfSteppedRange() { - FrameSet result = new FrameSet("1-100x3"); + assertEquals("91,103,104", result.getChunk(9, 3)); + } - assertEquals("28-34x3", result.getChunk(9, 3)); - } + @Test + public void shouldReturnSubsetOfSteppedRange() { + FrameSet result = new FrameSet("1-100x3"); - @Test - public void shouldReturnSubsetOfRange() { - FrameSet result = new FrameSet("1-100"); + assertEquals("28-34x3", result.getChunk(9, 3)); + } - assertEquals("10-12", result.getChunk(9, 3)); - } + @Test + public void shouldReturnSubsetOfRange() { + FrameSet result = new FrameSet("1-100"); - @Test - public void shouldStopBeforeTheEndOfTheRange() { - FrameSet result = new FrameSet("55-60"); + assertEquals("10-12", result.getChunk(9, 3)); + } - assertEquals("55-60", result.getChunk(0, 10)); - } + @Test + public void shouldStopBeforeTheEndOfTheRange() { + FrameSet result = new FrameSet("55-60"); - @Test - public void shouldReturnLastFrame() { - FrameSet result1 = new FrameSet("1-10x2"); + assertEquals("55-60", result.getChunk(0, 10)); + } - FrameSet chunk1 = new FrameSet(result1.getChunk(0, 3)); - FrameSet chunk2 = new FrameSet(result1.getChunk(3, 3)); + @Test + public void shouldReturnLastFrame() { + FrameSet result1 = new FrameSet("1-10x2"); - assertEquals(5, chunk1.get(chunk1.size()-1)); - assertEquals(9, chunk2.get(chunk2.size()-1)); + FrameSet chunk1 = new FrameSet(result1.getChunk(0, 3)); + FrameSet chunk2 = new FrameSet(result1.getChunk(3, 3)); - FrameSet result2 = new FrameSet("1"); - FrameSet chunk3 = new FrameSet(result2.getChunk(0, 3)); + assertEquals(5, chunk1.get(chunk1.size() - 1)); + assertEquals(9, chunk2.get(chunk2.size() - 1)); - assertEquals(1, chunk3.get(chunk3.size()-1)); - } + FrameSet result2 = new FrameSet("1"); + FrameSet chunk3 = new FrameSet(result2.getChunk(0, 3)); + + assertEquals(1, chunk3.get(chunk3.size() - 1)); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/util/JobLogUtilTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/util/JobLogUtilTests.java index 417924b9b..158e48b50 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/util/JobLogUtilTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/util/JobLogUtilTests.java @@ -1,20 +1,17 @@ /* * Copyright Contributors to the OpenCue Project * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at + * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except + * in compliance with the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. */ - package com.imageworks.spcue.test.util; import com.imageworks.spcue.JobDetail; @@ -33,58 +30,60 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class JobLogUtilTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - private JobLogUtil jobLogUtil; + @Resource + private JobLogUtil jobLogUtil; - private String logRootDefault; - private String logRootSomeOs; + private String logRootDefault; + private String logRootSomeOs; - @Before - public void setUp() { - // The values should match what's defined in test/resources/opencue.properties. - logRootDefault = "/arbitraryLogDirectory"; - logRootSomeOs = "/arbitrarySomeOsLogDirectory"; - } + @Before + public void setUp() { + // The values should match what's defined in test/resources/opencue.properties. + logRootDefault = "/arbitraryLogDirectory"; + logRootSomeOs = "/arbitrarySomeOsLogDirectory"; + } - @Test - public void testGetJobLogRootDirDefault() { - assertEquals(logRootDefault, jobLogUtil.getJobLogRootDir("someUndefinedOs")); - } + @Test + public void testGetJobLogRootDirDefault() { + assertEquals(logRootDefault, jobLogUtil.getJobLogRootDir("someUndefinedOs")); + } - @Test - public void testGetJobLogRootSomeOs() { - assertEquals(logRootSomeOs, jobLogUtil.getJobLogRootDir("some_os")); - } + @Test + public void testGetJobLogRootSomeOs() { + assertEquals(logRootSomeOs, jobLogUtil.getJobLogRootDir("some_os")); + } - @Test - public void testGetJobLogDirDefault() { - assertEquals(logRootDefault + "/show/shot/logs", jobLogUtil.getJobLogDir("show", "shot", "someUndefinedOs")); - } + @Test + public void testGetJobLogDirDefault() { + assertEquals(logRootDefault + "/show/shot/logs", + jobLogUtil.getJobLogDir("show", "shot", "someUndefinedOs")); + } - @Test - public void testGetJobLogDirSomeOs() { - assertEquals(logRootSomeOs + "/show/shot/logs", jobLogUtil.getJobLogDir("show", "shot", "some_os")); - } + @Test + public void testGetJobLogDirSomeOs() { + assertEquals(logRootSomeOs + "/show/shot/logs", + jobLogUtil.getJobLogDir("show", "shot", "some_os")); + } - @Test - public void testGetJobLogPathDefault() { - JobDetail jobDetail = new JobDetail(); - jobDetail.id = "id"; - jobDetail.name = "name"; - jobDetail.showName = "show"; - jobDetail.shot = "shot"; - jobDetail.os = "someUndefinedOs"; - assertEquals(logRootDefault + "/show/shot/logs/name--id", jobLogUtil.getJobLogPath(jobDetail)); - } + @Test + public void testGetJobLogPathDefault() { + JobDetail jobDetail = new JobDetail(); + jobDetail.id = "id"; + jobDetail.name = "name"; + jobDetail.showName = "show"; + jobDetail.shot = "shot"; + jobDetail.os = "someUndefinedOs"; + assertEquals(logRootDefault + "/show/shot/logs/name--id", jobLogUtil.getJobLogPath(jobDetail)); + } - @Test - public void testGetJobLogPathSomeOs() { - JobDetail jobDetail = new JobDetail(); - jobDetail.id = "id"; - jobDetail.name = "name"; - jobDetail.showName = "show"; - jobDetail.shot = "shot"; - jobDetail.os = "some_os"; - assertEquals(logRootSomeOs + "/show/shot/logs/name--id", jobLogUtil.getJobLogPath(jobDetail)); - } + @Test + public void testGetJobLogPathSomeOs() { + JobDetail jobDetail = new JobDetail(); + jobDetail.id = "id"; + jobDetail.name = "name"; + jobDetail.showName = "show"; + jobDetail.shot = "shot"; + jobDetail.os = "some_os"; + assertEquals(logRootSomeOs + "/show/shot/logs/name--id", jobLogUtil.getJobLogPath(jobDetail)); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/util/SqlUtilTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/util/SqlUtilTests.java index e6711bcf5..a219b75e1 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/util/SqlUtilTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/util/SqlUtilTests.java @@ -7,12 +7,12 @@ public class SqlUtilTests { - @Test - public void testBuildBindVariableArray() { - String colName = "arbitrary-column-name"; + @Test + public void testBuildBindVariableArray() { + String colName = "arbitrary-column-name"; - String queryString = buildBindVariableArray(colName, 6); + String queryString = buildBindVariableArray(colName, 6); - assertEquals(colName + " IN (?,?,?,?,?,?)", queryString); - } + assertEquals(colName + " IN (?,?,?,?,?,?)", queryString); + } } From e1a9a8d6047fd39ceb734890acabc5db373b8f24 Mon Sep 17 00:00:00 2001 From: Diego Tavares Date: Mon, 6 Jan 2025 16:39:29 -0800 Subject: [PATCH 2/3] Change tabulation to 4 spaces --- cuebot/build.gradle | 1 - cuebot/jdtls.xml | 2 +- .../spring/remoting/CueServerInterceptor.java | 53 +- .../common/spring/remoting/GrpcServer.java | 122 +- .../com/imageworks/spcue/ActionEntity.java | 122 +- .../com/imageworks/spcue/ActionInterface.java | 2 +- .../imageworks/spcue/AllocationEntity.java | 18 +- .../imageworks/spcue/AllocationInterface.java | 2 +- .../imageworks/spcue/BuildableDependency.java | 190 +- .../com/imageworks/spcue/BuildableJob.java | 144 +- .../com/imageworks/spcue/BuildableLayer.java | 40 +- .../com/imageworks/spcue/CommentDetail.java | 8 +- .../imageworks/spcue/CueGrpcException.java | 14 +- .../imageworks/spcue/CuebotApplication.java | 55 +- .../java/com/imageworks/spcue/DeedEntity.java | 12 +- .../imageworks/spcue/DepartmentEntity.java | 6 +- .../imageworks/spcue/DepartmentInterface.java | 2 +- .../spcue/DependencyManagerException.java | 16 +- .../com/imageworks/spcue/DispatchFrame.java | 92 +- .../com/imageworks/spcue/DispatchHost.java | 306 +- .../com/imageworks/spcue/DispatchJob.java | 12 +- .../java/com/imageworks/spcue/Entity.java | 72 +- .../imageworks/spcue/EntityCreationError.java | 46 +- .../com/imageworks/spcue/EntityException.java | 52 +- .../com/imageworks/spcue/EntityInterface.java | 4 +- .../spcue/EntityModificationError.java | 46 +- .../spcue/EntityNotFoundException.java | 14 +- .../imageworks/spcue/EntityRemovalError.java | 46 +- .../spcue/EntityRetrievalException.java | 36 +- .../imageworks/spcue/ExecutionSummary.java | 144 +- .../com/imageworks/spcue/FacilityEntity.java | 14 +- .../imageworks/spcue/FacilityInterface.java | 2 +- .../com/imageworks/spcue/FilterEntity.java | 40 +- .../com/imageworks/spcue/FilterInterface.java | 2 +- .../com/imageworks/spcue/FrameDetail.java | 24 +- .../com/imageworks/spcue/FrameEntity.java | 30 +- .../com/imageworks/spcue/FrameInterface.java | 16 +- .../imageworks/spcue/FrameStateTotals.java | 144 +- .../com/imageworks/spcue/GroupDetail.java | 58 +- .../com/imageworks/spcue/GroupEntity.java | 18 +- .../com/imageworks/spcue/GroupInterface.java | 2 +- .../spcue/HistoricalJobTransferException.java | 36 +- .../HostConfigurationErrorException.java | 38 +- .../java/com/imageworks/spcue/HostEntity.java | 86 +- .../com/imageworks/spcue/HostInterface.java | 2 +- .../java/com/imageworks/spcue/Inherit.java | 2 +- .../java/com/imageworks/spcue/JobDetail.java | 74 +- .../spcue/JobDispatchException.java | 28 +- .../java/com/imageworks/spcue/JobEntity.java | 30 +- .../com/imageworks/spcue/JobInterface.java | 2 +- .../imageworks/spcue/JobLaunchException.java | 36 +- .../com/imageworks/spcue/LayerDetail.java | 272 +- .../com/imageworks/spcue/LayerEntity.java | 42 +- .../com/imageworks/spcue/LayerInterface.java | 2 +- .../java/com/imageworks/spcue/LayerStats.java | 273 +- .../spcue/LightweightDependency.java | 34 +- .../com/imageworks/spcue/LimitEntity.java | 24 +- .../com/imageworks/spcue/LimitInterface.java | 2 +- .../imageworks/spcue/LocalHostAssignment.java | 380 +- .../com/imageworks/spcue/MaintenanceTask.java | 48 +- .../com/imageworks/spcue/MatcherEntity.java | 54 +- .../imageworks/spcue/MatcherInterface.java | 2 +- .../com/imageworks/spcue/MinimalHost.java | 40 +- .../com/imageworks/spcue/OwnerEntity.java | 8 +- .../com/imageworks/spcue/PointDetail.java | 42 +- .../com/imageworks/spcue/PointInterface.java | 2 +- .../com/imageworks/spcue/ProcInterface.java | 2 +- .../spcue/PrometheusMetricsCollector.java | 505 +-- .../java/com/imageworks/spcue/Redirect.java | 154 +- .../com/imageworks/spcue/ResourceUsage.java | 50 +- .../com/imageworks/spcue/ServiceEntity.java | 90 +- .../spcue/ServiceOverrideEntity.java | 8 +- .../java/com/imageworks/spcue/ShowEntity.java | 20 +- .../com/imageworks/spcue/ShowInterface.java | 2 +- .../com/imageworks/spcue/SortableShow.java | 152 +- .../java/com/imageworks/spcue/Source.java | 56 +- .../spcue/SpcueRuntimeException.java | 38 +- .../spcue/SpecBuilderException.java | 16 +- .../com/imageworks/spcue/StrandedCores.java | 30 +- .../imageworks/spcue/SubscriptionEntity.java | 36 +- .../spcue/SubscriptionInterface.java | 2 +- .../java/com/imageworks/spcue/TaskEntity.java | 80 +- .../com/imageworks/spcue/TaskInterface.java | 12 +- .../com/imageworks/spcue/ThreadStats.java | 28 +- .../imageworks/spcue/TrackitTaskDetail.java | 20 +- .../com/imageworks/spcue/VirtualProc.java | 456 +-- .../imageworks/spcue/config/AppConfig.java | 92 +- .../spcue/config/DatabaseEngine.java | 8 +- .../config/PostgresDatabaseCondition.java | 16 +- .../com/imageworks/spcue/dao/ActionDao.java | 12 +- .../imageworks/spcue/dao/AllocationDao.java | 136 +- .../com/imageworks/spcue/dao/BookingDao.java | 358 +- .../com/imageworks/spcue/dao/CommentDao.java | 133 +- .../com/imageworks/spcue/dao/DeedDao.java | 76 +- .../imageworks/spcue/dao/DepartmentDao.java | 80 +- .../com/imageworks/spcue/dao/DependDao.java | 335 +- .../imageworks/spcue/dao/DispatcherDao.java | 301 +- .../com/imageworks/spcue/dao/FacilityDao.java | 84 +- .../com/imageworks/spcue/dao/FilterDao.java | 28 +- .../com/imageworks/spcue/dao/FrameDao.java | 707 ++-- .../com/imageworks/spcue/dao/GroupDao.java | 402 +-- .../imageworks/spcue/dao/HistoricalDao.java | 28 +- .../com/imageworks/spcue/dao/HostDao.java | 567 +-- .../java/com/imageworks/spcue/dao/JobDao.java | 878 ++--- .../com/imageworks/spcue/dao/LayerDao.java | 825 ++--- .../com/imageworks/spcue/dao/LimitDao.java | 88 +- .../imageworks/spcue/dao/MaintenanceDao.java | 64 +- .../com/imageworks/spcue/dao/MatcherDao.java | 12 +- .../spcue/dao/NestedWhiteboardDao.java | 26 +- .../com/imageworks/spcue/dao/OwnerDao.java | 90 +- .../com/imageworks/spcue/dao/PointDao.java | 194 +- .../com/imageworks/spcue/dao/ProcDao.java | 532 +-- .../com/imageworks/spcue/dao/RedirectDao.java | 76 +- .../com/imageworks/spcue/dao/ServiceDao.java | 20 +- .../com/imageworks/spcue/dao/ShowDao.java | 240 +- .../imageworks/spcue/dao/SubscriptionDao.java | 162 +- .../com/imageworks/spcue/dao/TaskDao.java | 196 +- .../com/imageworks/spcue/dao/TrackitDao.java | 18 +- .../imageworks/spcue/dao/WhiteboardDao.java | 1212 +++---- .../spcue/dao/criteria/CriteriaException.java | 36 +- .../spcue/dao/criteria/CriteriaInterface.java | 16 +- .../spcue/dao/criteria/Direction.java | 2 +- .../dao/criteria/FrameSearchFactory.java | 84 +- .../dao/criteria/FrameSearchInterface.java | 38 +- .../spcue/dao/criteria/HostSearchFactory.java | 30 +- .../dao/criteria/HostSearchInterface.java | 10 +- .../spcue/dao/criteria/JobSearchFactory.java | 50 +- .../dao/criteria/JobSearchInterface.java | 12 +- .../imageworks/spcue/dao/criteria/Phrase.java | 42 +- .../spcue/dao/criteria/ProcSearchFactory.java | 52 +- .../dao/criteria/ProcSearchInterface.java | 28 +- .../imageworks/spcue/dao/criteria/Sort.java | 36 +- .../spcue/dao/criteria/postgres/Criteria.java | 505 +-- .../dao/criteria/postgres/FrameSearch.java | 345 +- .../dao/criteria/postgres/HostSearch.java | 52 +- .../dao/criteria/postgres/JobSearch.java | 66 +- .../dao/criteria/postgres/ProcSearch.java | 164 +- .../spcue/dao/postgres/ActionDaoJdbc.java | 185 +- .../spcue/dao/postgres/AllocationDaoJdbc.java | 246 +- .../spcue/dao/postgres/BookingDaoJdbc.java | 545 +-- .../spcue/dao/postgres/CommentDaoJdbc.java | 178 +- .../spcue/dao/postgres/DeedDaoJdbc.java | 107 +- .../spcue/dao/postgres/DepartmentDaoJdbc.java | 76 +- .../spcue/dao/postgres/DependDaoJdbc.java | 718 ++-- .../spcue/dao/postgres/DispatcherDaoJdbc.java | 930 ++--- .../spcue/dao/postgres/FacilityDaoJdbc.java | 95 +- .../spcue/dao/postgres/FilterDaoJdbc.java | 215 +- .../spcue/dao/postgres/FrameDaoJdbc.java | 1508 ++++---- .../spcue/dao/postgres/GroupDaoJdbc.java | 655 ++-- .../spcue/dao/postgres/HistoricalDaoJdbc.java | 30 +- .../spcue/dao/postgres/HostDaoJdbc.java | 1016 +++--- .../spcue/dao/postgres/JobDaoJdbc.java | 1516 ++++---- .../spcue/dao/postgres/LayerDaoJdbc.java | 1217 ++++--- .../spcue/dao/postgres/LimitDaoJdbc.java | 106 +- .../dao/postgres/MaintenanceDaoJdbc.java | 63 +- .../spcue/dao/postgres/MatcherDaoJdbc.java | 106 +- .../dao/postgres/NestedWhiteboardDaoJdbc.java | 672 ++-- .../spcue/dao/postgres/OwnerDaoJdbc.java | 120 +- .../spcue/dao/postgres/PointDaoJdbc.java | 234 +- .../spcue/dao/postgres/ProcDaoJdbc.java | 1163 +++--- .../spcue/dao/postgres/RedirectDaoJdbc.java | 99 +- .../spcue/dao/postgres/ServiceDaoJdbc.java | 332 +- .../spcue/dao/postgres/ShowDaoJdbc.java | 342 +- .../dao/postgres/SubscriptionDaoJdbc.java | 235 +- .../spcue/dao/postgres/TaskDaoJdbc.java | 283 +- .../spcue/dao/postgres/WhiteboardDaoJdbc.java | 3197 +++++++++-------- .../spcue/depend/AbstractDepend.java | 82 +- .../com/imageworks/spcue/depend/Depend.java | 2 +- .../spcue/depend/DependCreationVisitor.java | 130 +- .../spcue/depend/DependException.java | 36 +- .../spcue/depend/DependVisitor.java | 24 +- .../imageworks/spcue/depend/FrameByFrame.java | 76 +- .../imageworks/spcue/depend/FrameOnFrame.java | 94 +- .../imageworks/spcue/depend/FrameOnJob.java | 62 +- .../imageworks/spcue/depend/FrameOnLayer.java | 70 +- .../imageworks/spcue/depend/JobOnFrame.java | 62 +- .../com/imageworks/spcue/depend/JobOnJob.java | 62 +- .../imageworks/spcue/depend/JobOnLayer.java | 62 +- .../imageworks/spcue/depend/LayerOnFrame.java | 68 +- .../imageworks/spcue/depend/LayerOnJob.java | 62 +- .../imageworks/spcue/depend/LayerOnLayer.java | 74 +- .../spcue/depend/LayerOnSimFrame.java | 76 +- .../spcue/depend/PreviousFrame.java | 68 +- .../spcue/depend/QueueDependOperation.java | 20 +- .../spcue/dispatcher/AbstractDispatcher.java | 351 +- .../spcue/dispatcher/BookingQueue.java | 141 +- .../spcue/dispatcher/CoreUnitDispatcher.java | 734 ++-- .../spcue/dispatcher/DispatchQueue.java | 121 +- .../DispatchQueueTaskRejectionException.java | 36 +- .../spcue/dispatcher/DispatchSupport.java | 948 ++--- .../dispatcher/DispatchSupportService.java | 1137 +++--- .../spcue/dispatcher/Dispatcher.java | 364 +- .../spcue/dispatcher/DispatcherException.java | 16 +- .../dispatcher/FrameCompleteHandler.java | 1217 +++---- .../dispatcher/FrameLookupException.java | 36 +- .../dispatcher/FrameReservationException.java | 36 +- .../spcue/dispatcher/HealthyThreadPool.java | 362 +- .../spcue/dispatcher/HostReportHandler.java | 1840 +++++----- .../spcue/dispatcher/HostReportQueue.java | 186 +- .../spcue/dispatcher/JobLookupException.java | 36 +- .../spcue/dispatcher/LocalDispatcher.java | 623 ++-- .../spcue/dispatcher/QueueHealthCheck.java | 4 +- .../spcue/dispatcher/QueueRejectCounter.java | 22 +- .../spcue/dispatcher/RedirectManager.java | 661 ++-- .../spcue/dispatcher/ResourceContainer.java | 40 +- .../ResourceCreationFailureException.java | 36 +- .../ResourceDuplicationFailureException.java | 36 +- .../ResourceReleaseFailureException.java | 36 +- .../ResourceReservationFailureException.java | 36 +- .../dispatcher/RqdRetryReportException.java | 36 +- .../dispatcher/commands/DispatchBookHost.java | 171 +- .../commands/DispatchBookHostLocal.java | 36 +- .../commands/DispatchCommandTemplate.java | 17 +- .../commands/DispatchDropDepends.java | 91 +- .../commands/DispatchEatFrames.java | 34 +- .../commands/DispatchHandleHostReport.java | 62 +- .../commands/DispatchJobComplete.java | 38 +- .../commands/DispatchKillFrames.java | 34 +- .../commands/DispatchKillProcs.java | 40 +- .../commands/DispatchLaunchJob.java | 28 +- .../dispatcher/commands/DispatchMoveJobs.java | 44 +- .../commands/DispatchNextFrame.java | 32 +- .../commands/DispatchReorderFrames.java | 73 +- .../commands/DispatchRetryFrames.java | 34 +- .../commands/DispatchRqdKillFrame.java | 56 +- .../commands/DispatchRqdKillFrameMemory.java | 66 +- .../commands/DispatchSatisfyDepends.java | 81 +- .../DispatchShutdownJobIfCompleted.java | 32 +- .../commands/DispatchStaggerFrames.java | 72 +- .../dispatcher/commands/KeyRunnable.java | 20 +- .../commands/ManageReparentHosts.java | 38 +- .../com/imageworks/spcue/rqd/RqdClient.java | 130 +- .../spcue/rqd/RqdClientException.java | 36 +- .../imageworks/spcue/rqd/RqdClientGrpc.java | 279 +- .../imageworks/spcue/servant/CueStatic.java | 236 +- .../spcue/servant/ManageAction.java | 99 +- .../spcue/servant/ManageAllocation.java | 430 +-- .../spcue/servant/ManageComment.java | 60 +- .../imageworks/spcue/servant/ManageDeed.java | 86 +- .../spcue/servant/ManageDepartment.java | 326 +- .../spcue/servant/ManageDepend.java | 130 +- .../spcue/servant/ManageFacility.java | 104 +- .../spcue/servant/ManageFilter.java | 449 +-- .../imageworks/spcue/servant/ManageFrame.java | 642 ++-- .../imageworks/spcue/servant/ManageGroup.java | 541 +-- .../imageworks/spcue/servant/ManageHost.java | 599 +-- .../imageworks/spcue/servant/ManageJob.java | 1615 ++++----- .../imageworks/spcue/servant/ManageLayer.java | 954 ++--- .../imageworks/spcue/servant/ManageLimit.java | 132 +- .../spcue/servant/ManageMatcher.java | 78 +- .../imageworks/spcue/servant/ManageOwner.java | 200 +- .../imageworks/spcue/servant/ManageProc.java | 467 +-- .../spcue/servant/ManageRenderPartition.java | 56 +- .../spcue/servant/ManageService.java | 188 +- .../spcue/servant/ManageServiceOverride.java | 88 +- .../imageworks/spcue/servant/ManageShow.java | 769 ++-- .../spcue/servant/ManageSubscription.java | 163 +- .../imageworks/spcue/servant/ManageTask.java | 70 +- .../spcue/servant/RqdReportStatic.java | 88 +- .../imageworks/spcue/servant/ServantUtil.java | 84 +- .../spcue/service/AdminManager.java | 114 +- .../spcue/service/AdminManagerService.java | 574 +-- .../spcue/service/BookingManager.java | 222 +- .../spcue/service/BookingManagerService.java | 350 +- .../spcue/service/CommentManager.java | 113 +- .../spcue/service/CommentManagerService.java | 142 +- .../spcue/service/DepartmentManager.java | 344 +- .../service/DepartmentManagerService.java | 442 +-- .../spcue/service/DependManager.java | 321 +- .../spcue/service/DependManagerService.java | 1073 +++--- .../spcue/service/EmailSupport.java | 496 +-- .../spcue/service/FilterManager.java | 54 +- .../spcue/service/FilterManagerService.java | 842 ++--- .../spcue/service/GroupManager.java | 176 +- .../spcue/service/GroupManagerService.java | 348 +- .../spcue/service/HistoricalManager.java | 26 +- .../service/HistoricalManagerService.java | 50 +- .../spcue/service/HistoricalSupport.java | 42 +- .../imageworks/spcue/service/HostManager.java | 414 +-- .../spcue/service/HostManagerService.java | 699 ++-- .../imageworks/spcue/service/JmsMover.java | 100 +- .../imageworks/spcue/service/JobLauncher.java | 361 +- .../imageworks/spcue/service/JobManager.java | 896 ++--- .../spcue/service/JobManagerService.java | 1164 +++--- .../spcue/service/JobManagerSupport.java | 1021 +++--- .../com/imageworks/spcue/service/JobSpec.java | 1623 ++++----- .../spcue/service/LocalBookingSupport.java | 235 +- .../service/MaintenanceManagerSupport.java | 421 +-- .../spcue/service/OwnerManager.java | 128 +- .../spcue/service/OwnerManagerService.java | 168 +- .../spcue/service/RedirectService.java | 178 +- .../spcue/service/ServiceManager.java | 20 +- .../spcue/service/ServiceManagerService.java | 136 +- .../imageworks/spcue/service/Whiteboard.java | 14 +- .../spcue/service/WhiteboardService.java | 736 ++-- .../spcue/servlet/HealthCheckServlet.java | 168 +- .../spcue/servlet/JobLaunchServlet.java | 60 +- .../com/imageworks/spcue/util/Convert.java | 62 +- .../spcue/util/CueExceptionUtil.java | 52 +- .../com/imageworks/spcue/util/CueUtil.java | 599 +-- .../com/imageworks/spcue/util/FrameRange.java | 298 +- .../com/imageworks/spcue/util/FrameSet.java | 299 +- .../com/imageworks/spcue/util/JobLogUtil.java | 67 +- .../com/imageworks/spcue/util/SqlUtil.java | 206 +- .../com/imageworks/spcue/util/TagUtil.java | 38 +- .../spcue/config/TestAppConfig.java | 24 +- .../spcue/test/AssumingPostgresEngine.java | 52 +- .../imageworks/spcue/test/EntityTests.java | 66 +- .../spcue/test/TestDatabaseSetupPostgres.java | 70 +- .../spcue/test/TransactionalTest.java | 6 +- .../test/dao/criteria/FrameSearchTests.java | 366 +- .../test/dao/criteria/HostSearchTests.java | 112 +- .../test/dao/criteria/JobSearchTests.java | 96 +- .../test/dao/criteria/ProcSearchTests.java | 309 +- .../test/dao/postgres/ActionDaoTests.java | 298 +- .../test/dao/postgres/AllocationDaoTests.java | 290 +- .../test/dao/postgres/BookingDaoTests.java | 538 +-- .../test/dao/postgres/CommentDaoTests.java | 252 +- .../spcue/test/dao/postgres/DeedDaoTests.java | 145 +- .../test/dao/postgres/DepartmentDaoTests.java | 127 +- .../test/dao/postgres/DependDaoTests.java | 730 ++-- .../dao/postgres/DispatcherDaoFifoTests.java | 302 +- .../test/dao/postgres/DispatcherDaoTests.java | 857 ++--- .../test/dao/postgres/FacilityDaoTests.java | 57 +- .../test/dao/postgres/FilterDaoTests.java | 454 +-- .../test/dao/postgres/FrameDaoTests.java | 1148 +++--- .../test/dao/postgres/GroupDaoTests.java | 794 ++-- .../test/dao/postgres/HistoricalDaoTests.java | 52 +- .../spcue/test/dao/postgres/HostDaoTests.java | 969 ++--- .../spcue/test/dao/postgres/JobDaoTests.java | 1275 +++---- .../test/dao/postgres/LayerDaoTests.java | 1332 +++---- .../test/dao/postgres/LimitDaoTests.java | 184 +- .../dao/postgres/MaintenanceDaoTests.java | 50 +- .../test/dao/postgres/MatcherDaoTests.java | 174 +- .../postgres/NestedWhiteboardDaoTests.java | 48 +- .../test/dao/postgres/OwnerDaoTests.java | 160 +- .../test/dao/postgres/PointDaoTests.java | 212 +- .../spcue/test/dao/postgres/ProcDaoTests.java | 1457 ++++---- .../test/dao/postgres/ServiceDaoTests.java | 380 +- .../spcue/test/dao/postgres/ShowDaoTests.java | 331 +- .../dao/postgres/SubscriptionDaoTests.java | 368 +- .../spcue/test/dao/postgres/TaskDaoTests.java | 405 ++- .../test/dao/postgres/WhiteboardDaoTests.java | 2382 ++++++------ .../CoreUnitDispatcherGpuJobTests.java | 276 +- .../CoreUnitDispatcherGpuTests.java | 325 +- .../CoreUnitDispatcherGpusJobTests.java | 386 +- .../dispatcher/CoreUnitDispatcherTests.java | 287 +- .../test/dispatcher/DispatchSupportTests.java | 148 +- .../dispatcher/FrameCompleteHandlerTests.java | 702 ++-- .../test/dispatcher/HistoryControlTests.java | 231 +- .../dispatcher/HostReportHandlerGpuTests.java | 110 +- .../dispatcher/HostReportHandlerTests.java | 922 ++--- .../test/dispatcher/LocalDispatcherTests.java | 567 +-- .../test/dispatcher/RedirectManagerTests.java | 509 +-- .../test/dispatcher/StrandedCoreTests.java | 117 +- .../test/dispatcher/TestBookingQueue.java | 113 +- .../test/servant/FakeStreamObserver.java | 12 +- .../test/servant/ManageAllocationTests.java | 130 +- .../spcue/test/servant/ManageFrameTests.java | 146 +- .../spcue/test/service/AdminManagerTests.java | 276 +- .../test/service/BookingManagerTests.java | 627 ++-- .../test/service/CommentManagerTests.java | 50 +- .../test/service/DepartmentManagerTests.java | 94 +- .../service/DependManagerChunkingTests.java | 310 +- .../test/service/DependManagerTests.java | 838 ++--- .../spcue/test/service/EmailSupportTests.java | 102 +- .../test/service/FilterManagerTests.java | 610 ++-- .../spcue/test/service/GroupManagerTests.java | 124 +- .../spcue/test/service/HostManagerTests.java | 181 +- .../spcue/test/service/JobManagerTests.java | 776 ++-- .../spcue/test/service/JobSpecTests.java | 145 +- .../MaintenanceManagerSupportTests.java | 24 +- .../spcue/test/service/OwnerManagerTests.java | 231 +- .../test/service/ServiceManagerTests.java | 256 +- .../spcue/test/service/WhiteboardTests.java | 42 +- .../spcue/test/util/CoreSaturationTests.java | 48 +- .../spcue/test/util/CoreSpanTests.java | 240 +- .../spcue/test/util/CueUtilTester.java | 209 +- .../spcue/test/util/FrameRangeTests.java | 278 +- .../spcue/test/util/FrameSetTests.java | 138 +- .../spcue/test/util/JobLogUtilTests.java | 98 +- .../spcue/test/util/SqlUtilTests.java | 12 +- 382 files changed, 47798 insertions(+), 47173 deletions(-) diff --git a/cuebot/build.gradle b/cuebot/build.gradle index 815a09695..61b9dbfae 100644 --- a/cuebot/build.gradle +++ b/cuebot/build.gradle @@ -178,6 +178,5 @@ spotless { targetExclude 'src/compiled_protobuf/**' toggleOffOn() eclipse().configFile('jdtls.xml') - indentWithSpaces(4) } } diff --git a/cuebot/jdtls.xml b/cuebot/jdtls.xml index 7bb6804eb..bb335f000 100644 --- a/cuebot/jdtls.xml +++ b/cuebot/jdtls.xml @@ -167,7 +167,7 @@ - + diff --git a/cuebot/src/main/java/com/imageworks/common/spring/remoting/CueServerInterceptor.java b/cuebot/src/main/java/com/imageworks/common/spring/remoting/CueServerInterceptor.java index 9939785b5..0949e4bc1 100644 --- a/cuebot/src/main/java/com/imageworks/common/spring/remoting/CueServerInterceptor.java +++ b/cuebot/src/main/java/com/imageworks/common/spring/remoting/CueServerInterceptor.java @@ -12,34 +12,33 @@ public class CueServerInterceptor implements ServerInterceptor { - private static final Logger logger = LogManager.getLogger(CueServerInterceptor.class); - private static final Logger accessLogger = LogManager.getLogger("API"); + private static final Logger logger = LogManager.getLogger(CueServerInterceptor.class); + private static final Logger accessLogger = LogManager.getLogger("API"); - @Override - public ServerCall.Listener interceptCall(ServerCall serverCall, - Metadata metadata, ServerCallHandler serverCallHandler) { - accessLogger.info("gRPC [" + serverCall.getAttributes().get(Grpc.TRANSPORT_ATTR_REMOTE_ADDR) - + "]: " + serverCall.getMethodDescriptor().getFullMethodName()); + @Override + public ServerCall.Listener interceptCall(ServerCall serverCall, + Metadata metadata, ServerCallHandler serverCallHandler) { + accessLogger.info("gRPC [" + serverCall.getAttributes().get(Grpc.TRANSPORT_ATTR_REMOTE_ADDR) + + "]: " + serverCall.getMethodDescriptor().getFullMethodName()); - ServerCall.Listener delegate = serverCallHandler.startCall(serverCall, metadata); - return new SimpleForwardingServerCallListener(delegate) { - @Override - public void onHalfClose() { - try { - super.onHalfClose(); - } catch (Exception e) { - logger.error("Caught an unexpected error.", e); - serverCall.close( - Status.INTERNAL.withCause(e).withDescription(e.toString() + "\n" + e.getMessage()), - new Metadata()); - } - } + ServerCall.Listener delegate = serverCallHandler.startCall(serverCall, metadata); + return new SimpleForwardingServerCallListener(delegate) { + @Override + public void onHalfClose() { + try { + super.onHalfClose(); + } catch (Exception e) { + logger.error("Caught an unexpected error.", e); + serverCall.close(Status.INTERNAL.withCause(e) + .withDescription(e.toString() + "\n" + e.getMessage()), new Metadata()); + } + } - @Override - public void onMessage(ReqT request) { - accessLogger.info("Request Data: " + request); - super.onMessage(request); - } - }; - } + @Override + public void onMessage(ReqT request) { + accessLogger.info("Request Data: " + request); + super.onMessage(request); + } + }; + } } diff --git a/cuebot/src/main/java/com/imageworks/common/spring/remoting/GrpcServer.java b/cuebot/src/main/java/com/imageworks/common/spring/remoting/GrpcServer.java index 66c49de47..cd06daa05 100644 --- a/cuebot/src/main/java/com/imageworks/common/spring/remoting/GrpcServer.java +++ b/cuebot/src/main/java/com/imageworks/common/spring/remoting/GrpcServer.java @@ -40,72 +40,74 @@ public class GrpcServer implements ApplicationContextAware { - private static final Logger logger = LogManager.getLogger(GrpcServer.class); + private static final Logger logger = LogManager.getLogger(GrpcServer.class); - private static final String DEFAULT_NAME = "CueGrpcServer"; - private static final String DEFAULT_PORT = "8443"; - private static final int DEFAULT_MAX_MESSAGE_BYTES = 104857600; + private static final String DEFAULT_NAME = "CueGrpcServer"; + private static final String DEFAULT_PORT = "8443"; + private static final int DEFAULT_MAX_MESSAGE_BYTES = 104857600; - private String name; - private int port; - private int maxMessageBytes; - private Server server; - private ApplicationContext applicationContext; + private String name; + private int port; + private int maxMessageBytes; + private Server server; + private ApplicationContext applicationContext; - public GrpcServer() { - this(DEFAULT_NAME, DEFAULT_PORT, new Properties(), DEFAULT_MAX_MESSAGE_BYTES); - } + public GrpcServer() { + this(DEFAULT_NAME, DEFAULT_PORT, new Properties(), DEFAULT_MAX_MESSAGE_BYTES); + } - public GrpcServer(String name, String port, Properties props, Integer maxMessageBytes) { - logger.info("Setting up gRPC server..."); - this.name = name; - this.port = Integer.parseInt(port); - this.maxMessageBytes = maxMessageBytes; - } + public GrpcServer(String name, String port, Properties props, Integer maxMessageBytes) { + logger.info("Setting up gRPC server..."); + this.name = name; + this.port = Integer.parseInt(port); + this.maxMessageBytes = maxMessageBytes; + } - public void shutdown() { - if (!server.isShutdown()) { - logger.info("gRPC server shutting down on " + this.name + " at port " + this.port); - server.shutdown(); + public void shutdown() { + if (!server.isShutdown()) { + logger.info("gRPC server shutting down on " + this.name + " at port " + this.port); + server.shutdown(); + } } - } - public void start() throws IOException { - server = ServerBuilder.forPort(this.port) - .addService(applicationContext.getBean("rqdReportStatic", RqdReportStatic.class)) - .addService(applicationContext.getBean("cueStaticServant", CueStatic.class)) - .addService(applicationContext.getBean("manageAction", ManageAction.class)) - .addService(applicationContext.getBean("manageAllocation", ManageAllocation.class)) - .addService(applicationContext.getBean("manageComment", ManageComment.class)) - .addService(applicationContext.getBean("manageDeed", ManageDeed.class)) - .addService(applicationContext.getBean("manageDepartment", ManageDepartment.class)) - .addService(applicationContext.getBean("manageDepend", ManageDepend.class)) - .addService(applicationContext.getBean("manageFacility", ManageFacility.class)) - .addService(applicationContext.getBean("manageFilter", ManageFilter.class)) - .addService(applicationContext.getBean("manageFrame", ManageFrame.class)) - .addService(applicationContext.getBean("manageGroup", ManageGroup.class)) - .addService(applicationContext.getBean("manageHost", ManageHost.class)) - .addService(applicationContext.getBean("manageJob", ManageJob.class)) - .addService(applicationContext.getBean("manageLayer", ManageLayer.class)) - .addService(applicationContext.getBean("manageLimit", ManageLimit.class)) - .addService(applicationContext.getBean("manageMatcher", ManageMatcher.class)) - .addService(applicationContext.getBean("manageOwner", ManageOwner.class)) - .addService(applicationContext.getBean("manageProc", ManageProc.class)) - .addService( - applicationContext.getBean("manageRenderPartition", ManageRenderPartition.class)) - .addService(applicationContext.getBean("manageService", ManageService.class)) - .addService( - applicationContext.getBean("manageServiceOverride", ManageServiceOverride.class)) - .addService(applicationContext.getBean("manageShow", ManageShow.class)) - .addService(applicationContext.getBean("manageSubscription", ManageSubscription.class)) - .addService(applicationContext.getBean("manageTask", ManageTask.class)) - .maxInboundMessageSize(maxMessageBytes).intercept(new CueServerInterceptor()).build(); - server.start(); - logger.info("gRPC server started on " + this.name + " at port " + this.port + " !"); - } + public void start() throws IOException { + server = ServerBuilder.forPort(this.port) + .addService(applicationContext.getBean("rqdReportStatic", RqdReportStatic.class)) + .addService(applicationContext.getBean("cueStaticServant", CueStatic.class)) + .addService(applicationContext.getBean("manageAction", ManageAction.class)) + .addService(applicationContext.getBean("manageAllocation", ManageAllocation.class)) + .addService(applicationContext.getBean("manageComment", ManageComment.class)) + .addService(applicationContext.getBean("manageDeed", ManageDeed.class)) + .addService(applicationContext.getBean("manageDepartment", ManageDepartment.class)) + .addService(applicationContext.getBean("manageDepend", ManageDepend.class)) + .addService(applicationContext.getBean("manageFacility", ManageFacility.class)) + .addService(applicationContext.getBean("manageFilter", ManageFilter.class)) + .addService(applicationContext.getBean("manageFrame", ManageFrame.class)) + .addService(applicationContext.getBean("manageGroup", ManageGroup.class)) + .addService(applicationContext.getBean("manageHost", ManageHost.class)) + .addService(applicationContext.getBean("manageJob", ManageJob.class)) + .addService(applicationContext.getBean("manageLayer", ManageLayer.class)) + .addService(applicationContext.getBean("manageLimit", ManageLimit.class)) + .addService(applicationContext.getBean("manageMatcher", ManageMatcher.class)) + .addService(applicationContext.getBean("manageOwner", ManageOwner.class)) + .addService(applicationContext.getBean("manageProc", ManageProc.class)) + .addService(applicationContext.getBean("manageRenderPartition", + ManageRenderPartition.class)) + .addService(applicationContext.getBean("manageService", ManageService.class)) + .addService(applicationContext.getBean("manageServiceOverride", + ManageServiceOverride.class)) + .addService(applicationContext.getBean("manageShow", ManageShow.class)) + .addService( + applicationContext.getBean("manageSubscription", ManageSubscription.class)) + .addService(applicationContext.getBean("manageTask", ManageTask.class)) + .maxInboundMessageSize(maxMessageBytes).intercept(new CueServerInterceptor()) + .build(); + server.start(); + logger.info("gRPC server started on " + this.name + " at port " + this.port + " !"); + } - @Override - public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { - this.applicationContext = applicationContext; - } + @Override + public void setApplicationContext(ApplicationContext applicationContext) throws BeansException { + this.applicationContext = applicationContext; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/ActionEntity.java b/cuebot/src/main/java/com/imageworks/spcue/ActionEntity.java index b04fb5fa5..cd3586828 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ActionEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ActionEntity.java @@ -21,75 +21,75 @@ public class ActionEntity extends Entity implements ActionInterface { - public String filterId; - public String showId; - - public ActionType type; - public ActionValueType valueType; - public String stringValue; - public long intValue; - public boolean booleanValue; - public String groupValue; - public float floatValue; - - public ActionEntity() { - this.name = null; - } - - public static ActionEntity build(Action data) { - ActionEntity entity = new ActionEntity(); - if (data.getGroupValue() != null) { - entity.groupValue = data.getGroupValue(); + public String filterId; + public String showId; + + public ActionType type; + public ActionValueType valueType; + public String stringValue; + public long intValue; + public boolean booleanValue; + public String groupValue; + public float floatValue; + + public ActionEntity() { + this.name = null; } - entity.stringValue = data.getStringValue(); - entity.booleanValue = data.getBooleanValue(); - entity.intValue = data.getIntegerValue(); - entity.floatValue = data.getFloatValue(); - entity.name = ""; - entity.type = data.getType(); - entity.valueType = data.getValueType(); - return entity; - } - - public static ActionEntity build(FilterInterface filter, Action data) { - ActionEntity entity = build(data); - entity.filterId = filter.getFilterId(); - entity.showId = filter.getShowId(); - return entity; - } - - public static ActionEntity build(FilterInterface filter, Action data, String id) { - ActionEntity action = build(filter, data); - action.id = id; - if (action.isNew()) { - throw new SpcueRuntimeException("the action has not been created yet"); + + public static ActionEntity build(Action data) { + ActionEntity entity = new ActionEntity(); + if (data.getGroupValue() != null) { + entity.groupValue = data.getGroupValue(); + } + entity.stringValue = data.getStringValue(); + entity.booleanValue = data.getBooleanValue(); + entity.intValue = data.getIntegerValue(); + entity.floatValue = data.getFloatValue(); + entity.name = ""; + entity.type = data.getType(); + entity.valueType = data.getValueType(); + return entity; } - return action; - } + public static ActionEntity build(FilterInterface filter, Action data) { + ActionEntity entity = build(data); + entity.filterId = filter.getFilterId(); + entity.showId = filter.getShowId(); + return entity; + } - public String getId() { - return id; - } + public static ActionEntity build(FilterInterface filter, Action data, String id) { + ActionEntity action = build(filter, data); + action.id = id; + if (action.isNew()) { + throw new SpcueRuntimeException("the action has not been created yet"); + } + return action; - public String getName() { - return null; - } + } - public String getActionId() { - return id; - } + public String getId() { + return id; + } - public String getFilterId() { - if (filterId == null) { - throw new SpcueRuntimeException( - "Trying to get a filterId from a ActityEntity created without a filter"); + public String getName() { + return null; } - return filterId; - } - public String getShowId() { - return showId; - } + public String getActionId() { + return id; + } + + public String getFilterId() { + if (filterId == null) { + throw new SpcueRuntimeException( + "Trying to get a filterId from a ActityEntity created without a filter"); + } + return filterId; + } + + public String getShowId() { + return showId; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/ActionInterface.java b/cuebot/src/main/java/com/imageworks/spcue/ActionInterface.java index 4f28e2c54..4e1a73443 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ActionInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ActionInterface.java @@ -17,6 +17,6 @@ public interface ActionInterface extends FilterInterface { - public String getActionId(); + public String getActionId(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/AllocationEntity.java b/cuebot/src/main/java/com/imageworks/spcue/AllocationEntity.java index 27b492ac1..0f8f4a832 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/AllocationEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/AllocationEntity.java @@ -17,16 +17,16 @@ public class AllocationEntity extends Entity implements AllocationInterface { - public String tag; - public String facilityId; + public String tag; + public String facilityId; - public String getAllocationId() { - return id; - } + public String getAllocationId() { + return id; + } - @Override - public String getFacilityId() { - return facilityId; - } + @Override + public String getFacilityId() { + return facilityId; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/AllocationInterface.java b/cuebot/src/main/java/com/imageworks/spcue/AllocationInterface.java index 9e2669d15..fe040d6bb 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/AllocationInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/AllocationInterface.java @@ -17,5 +17,5 @@ public interface AllocationInterface extends EntityInterface, FacilityInterface { - public String getAllocationId(); + public String getAllocationId(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/BuildableDependency.java b/cuebot/src/main/java/com/imageworks/spcue/BuildableDependency.java index d3eb94b07..426da9f41 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/BuildableDependency.java +++ b/cuebot/src/main/java/com/imageworks/spcue/BuildableDependency.java @@ -19,100 +19,100 @@ public class BuildableDependency { - public DependType type; - public boolean anyFrame = false; - public boolean launchDepend = false; - - public String dependErJobName; - public String dependErLayerName; - public String dependErFrameName; - - public String dependOnJobName; - public String dependOnLayerName; - public String dependOnFrameName; - - public boolean isAnyFrame() { - return anyFrame; - } - - public void setAnyFrame(boolean anyFrame) { - this.anyFrame = anyFrame; - } - - public String getDependErFrameName() { - return dependErFrameName; - } - - public void setDependErFrameName(String dependErFrameName) { - this.dependErFrameName = dependErFrameName; - } - - public String getDependErJobName() { - return dependErJobName; - } - - public void setDependErJobName(String dependErJobName) { - this.dependErJobName = dependErJobName; - } - - public String getDependErLayerName() { - return dependErLayerName; - } - - public void setDependErLayerName(String dependErLayerName) { - this.dependErLayerName = dependErLayerName; - } - - public String getDependOnFrameName() { - return dependOnFrameName; - } - - public void setDependOnFrameName(String dependOnFrameName) { - this.dependOnFrameName = dependOnFrameName; - } - - public String getDependOnJobName() { - return dependOnJobName; - } - - public void setDependOnJobName(String dependOnJobName) { - this.dependOnJobName = dependOnJobName; - } - - public String getDependOnLayerName() { - return dependOnLayerName; - } - - public void setDependOnLayerName(String dependOnLayerName) { - this.dependOnLayerName = dependOnLayerName; - } - - public DependType getType() { - return type; - } - - public void setType(DependType type) { - this.type = type; - } - - public String toString() { - StringBuilder sb = new StringBuilder(1024); - sb.append("Depend Type: " + type.toString() + "\n"); - sb.append("Depend on job: " + dependErJobName + "\n"); - sb.append("Depend on layer: " + dependOnLayerName + "\n"); - sb.append("Depend on frame: " + dependOnFrameName + "\n"); - sb.append("Depend er job: " + dependOnJobName + "\n"); - sb.append("Depend er layer: " + dependErLayerName + "\n"); - sb.append("Depend er frame: " + dependErFrameName + "\n"); - return sb.toString(); - } - - public boolean isLaunchDepend() { - return launchDepend; - } - - public void setLaunchDepend(boolean launchDepend) { - this.launchDepend = launchDepend; - } + public DependType type; + public boolean anyFrame = false; + public boolean launchDepend = false; + + public String dependErJobName; + public String dependErLayerName; + public String dependErFrameName; + + public String dependOnJobName; + public String dependOnLayerName; + public String dependOnFrameName; + + public boolean isAnyFrame() { + return anyFrame; + } + + public void setAnyFrame(boolean anyFrame) { + this.anyFrame = anyFrame; + } + + public String getDependErFrameName() { + return dependErFrameName; + } + + public void setDependErFrameName(String dependErFrameName) { + this.dependErFrameName = dependErFrameName; + } + + public String getDependErJobName() { + return dependErJobName; + } + + public void setDependErJobName(String dependErJobName) { + this.dependErJobName = dependErJobName; + } + + public String getDependErLayerName() { + return dependErLayerName; + } + + public void setDependErLayerName(String dependErLayerName) { + this.dependErLayerName = dependErLayerName; + } + + public String getDependOnFrameName() { + return dependOnFrameName; + } + + public void setDependOnFrameName(String dependOnFrameName) { + this.dependOnFrameName = dependOnFrameName; + } + + public String getDependOnJobName() { + return dependOnJobName; + } + + public void setDependOnJobName(String dependOnJobName) { + this.dependOnJobName = dependOnJobName; + } + + public String getDependOnLayerName() { + return dependOnLayerName; + } + + public void setDependOnLayerName(String dependOnLayerName) { + this.dependOnLayerName = dependOnLayerName; + } + + public DependType getType() { + return type; + } + + public void setType(DependType type) { + this.type = type; + } + + public String toString() { + StringBuilder sb = new StringBuilder(1024); + sb.append("Depend Type: " + type.toString() + "\n"); + sb.append("Depend on job: " + dependErJobName + "\n"); + sb.append("Depend on layer: " + dependOnLayerName + "\n"); + sb.append("Depend on frame: " + dependOnFrameName + "\n"); + sb.append("Depend er job: " + dependOnJobName + "\n"); + sb.append("Depend er layer: " + dependErLayerName + "\n"); + sb.append("Depend er frame: " + dependErFrameName + "\n"); + return sb.toString(); + } + + public boolean isLaunchDepend() { + return launchDepend; + } + + public void setLaunchDepend(boolean launchDepend) { + this.launchDepend = launchDepend; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/BuildableJob.java b/cuebot/src/main/java/com/imageworks/spcue/BuildableJob.java index e71ec29c9..dd3d47e3e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/BuildableJob.java +++ b/cuebot/src/main/java/com/imageworks/spcue/BuildableJob.java @@ -27,76 +27,76 @@ */ public class BuildableJob { - /** - * Struct for the job detail, used for adding job to DB. - */ - public JobDetail detail; - - /** - * Maximum CPU cores and GPU units overrides. - */ - public Integer maxCoresOverride = null; - public Integer maxGpusOverride = null; - - /** - * List of layers - */ - private List layers = new ArrayList(); - - private BuildableJob postJob = null; - - /** - * Stores the local core assignment if one was launched with the job. - */ - private LocalHostAssignment runLocalConf = null; - - /** - * Job specific environment variables - */ - public Map env = new HashMap(); - - public BuildableJob() {} - - public BuildableJob(JobDetail detail) { - this.detail = detail; - } - - /** - * Add a layer to the job - * - * @param layer - */ - public void addBuildableLayer(BuildableLayer layer) { - layers.add(layer); - } - - /** - * Add a key/value pair environment var to job - * - * @param key - * @param value - */ - public void addEnvironmentVariable(String key, String value) { - env.put(key, value); - } - - public List getBuildableLayers() { - return layers; - } - - public void setPostJob(BuildableJob job) { - this.postJob = job; - } - - public BuildableJob getPostJob() { - return this.postJob; - } - - public void setRunLocalConf(LocalHostAssignment runLocalConf) { - this.runLocalConf = runLocalConf; - } - - public LocalHostAssignment getRunLocalConf() { - return this.runLocalConf; - } + /** + * Struct for the job detail, used for adding job to DB. + */ + public JobDetail detail; + + /** + * Maximum CPU cores and GPU units overrides. + */ + public Integer maxCoresOverride = null; + public Integer maxGpusOverride = null; + + /** + * List of layers + */ + private List layers = new ArrayList(); + + private BuildableJob postJob = null; + + /** + * Stores the local core assignment if one was launched with the job. + */ + private LocalHostAssignment runLocalConf = null; + + /** + * Job specific environment variables + */ + public Map env = new HashMap(); + + public BuildableJob() {} + + public BuildableJob(JobDetail detail) { + this.detail = detail; + } + + /** + * Add a layer to the job + * + * @param layer + */ + public void addBuildableLayer(BuildableLayer layer) { + layers.add(layer); + } + + /** + * Add a key/value pair environment var to job + * + * @param key + * @param value + */ + public void addEnvironmentVariable(String key, String value) { + env.put(key, value); + } + + public List getBuildableLayers() { + return layers; + } + + public void setPostJob(BuildableJob job) { + this.postJob = job; + } + + public BuildableJob getPostJob() { + return this.postJob; + } + + public void setRunLocalConf(LocalHostAssignment runLocalConf) { + this.runLocalConf = runLocalConf; + } + + public LocalHostAssignment getRunLocalConf() { + return this.runLocalConf; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/BuildableLayer.java b/cuebot/src/main/java/com/imageworks/spcue/BuildableLayer.java index 0fbb0a2a4..6c6c4e372 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/BuildableLayer.java +++ b/cuebot/src/main/java/com/imageworks/spcue/BuildableLayer.java @@ -24,24 +24,24 @@ * @category Job Launching */ public class BuildableLayer { - /** - * If the user manually set memory, this is updated to true. - */ - public boolean isMemoryOverride = false; - - /** - * Stores the layer detail. LayerDetail is needed to actually insert the layer into the DB. - */ - public LayerDetail layerDetail = new LayerDetail(); - - /** - * Map for storing environment vars - */ - public Map env = new HashMap(); - - public BuildableLayer() {} - - public BuildableLayer(LayerDetail detail) { - this.layerDetail = detail; - } + /** + * If the user manually set memory, this is updated to true. + */ + public boolean isMemoryOverride = false; + + /** + * Stores the layer detail. LayerDetail is needed to actually insert the layer into the DB. + */ + public LayerDetail layerDetail = new LayerDetail(); + + /** + * Map for storing environment vars + */ + public Map env = new HashMap(); + + public BuildableLayer() {} + + public BuildableLayer(LayerDetail detail) { + this.layerDetail = detail; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/CommentDetail.java b/cuebot/src/main/java/com/imageworks/spcue/CommentDetail.java index b8ac3e3ec..3453bc5d5 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/CommentDetail.java +++ b/cuebot/src/main/java/com/imageworks/spcue/CommentDetail.java @@ -18,8 +18,8 @@ import java.sql.Timestamp; public class CommentDetail extends Entity { - public Timestamp timestamp; - public String subject; - public String message; - public String user; + public Timestamp timestamp; + public String subject; + public String message; + public String user; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/CueGrpcException.java b/cuebot/src/main/java/com/imageworks/spcue/CueGrpcException.java index a9ddfe41d..64beb46b9 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/CueGrpcException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/CueGrpcException.java @@ -18,14 +18,14 @@ public class CueGrpcException extends NestedRuntimeException { - private static final long serialVersionUID = -3215497096936812369L; + private static final long serialVersionUID = -3215497096936812369L; - public CueGrpcException(String message) { - super(message); - } + public CueGrpcException(String message) { + super(message); + } - public CueGrpcException(String message, Throwable cause) { - super(message, cause); - } + public CueGrpcException(String message, Throwable cause) { + super(message, cause); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/CuebotApplication.java b/cuebot/src/main/java/com/imageworks/spcue/CuebotApplication.java index c50c308f2..f20dcdfb5 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/CuebotApplication.java +++ b/cuebot/src/main/java/com/imageworks/spcue/CuebotApplication.java @@ -27,34 +27,33 @@ @SpringBootApplication public class CuebotApplication extends SpringApplication { - private static String[] checkArgs(String[] args) { - Optional deprecatedFlag = - Arrays.stream(args).filter(arg -> arg.startsWith("--log.frame-log-root=")).findFirst(); - if (deprecatedFlag.isPresent()) { - // Log a deprecation warning. - Logger warning_logger = LogManager.getLogger(CuebotApplication.class); - warning_logger.warn("`--log.frame-log-root` is deprecated and will be removed in an " - + "upcoming release. It has been replaced with `--log.frame-log-root.default_os`. " - + "See opencue.properties for details on OpenCue's new OS-dependent root directories."); - // If new flags are not present, swap in the value provided using the new flag. - // If the new flags are already present, don't do anything. - Optional newFlags = - Arrays.stream(args).filter(arg -> arg.startsWith("--log.frame-log-root.")).findAny(); - if (!newFlags.isPresent()) { - String fixedFlag = "--log.frame-log-root.default_os=" - + StringUtils.substringAfter(deprecatedFlag.get(), "="); - args = Stream - .concat(Arrays.stream(args).filter(arg -> !arg.startsWith("--log.frame-log-root=")), - Stream.of(fixedFlag)) - .toArray(String[]::new); - } + private static String[] checkArgs(String[] args) { + Optional deprecatedFlag = Arrays.stream(args) + .filter(arg -> arg.startsWith("--log.frame-log-root=")).findFirst(); + if (deprecatedFlag.isPresent()) { + // Log a deprecation warning. + Logger warning_logger = LogManager.getLogger(CuebotApplication.class); + warning_logger.warn("`--log.frame-log-root` is deprecated and will be removed in an " + + "upcoming release. It has been replaced with `--log.frame-log-root.default_os`. " + + "See opencue.properties for details on OpenCue's new OS-dependent root directories."); + // If new flags are not present, swap in the value provided using the new flag. + // If the new flags are already present, don't do anything. + Optional newFlags = Arrays.stream(args) + .filter(arg -> arg.startsWith("--log.frame-log-root.")).findAny(); + if (!newFlags.isPresent()) { + String fixedFlag = "--log.frame-log-root.default_os=" + + StringUtils.substringAfter(deprecatedFlag.get(), "="); + args = Stream.concat( + Arrays.stream(args).filter(arg -> !arg.startsWith("--log.frame-log-root=")), + Stream.of(fixedFlag)).toArray(String[]::new); + } + } + return args; } - return args; - } - public static void main(String[] args) { - // Cuebot startup - String[] filteredArgs = checkArgs(args); - SpringApplication.run(CuebotApplication.class, filteredArgs); - } + public static void main(String[] args) { + // Cuebot startup + String[] filteredArgs = checkArgs(args); + SpringApplication.run(CuebotApplication.class, filteredArgs); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/DeedEntity.java b/cuebot/src/main/java/com/imageworks/spcue/DeedEntity.java index a0d79622a..52081232e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/DeedEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/DeedEntity.java @@ -17,11 +17,11 @@ public class DeedEntity extends Entity { - public String owner; - public String host; - public String show; + public String owner; + public String host; + public String show; - public String getName() { - return String.format("%s.%s", owner, host); - } + public String getName() { + return String.format("%s.%s", owner, host); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/DepartmentEntity.java b/cuebot/src/main/java/com/imageworks/spcue/DepartmentEntity.java index ed077e3bd..72c0da73b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/DepartmentEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/DepartmentEntity.java @@ -17,8 +17,8 @@ public class DepartmentEntity extends Entity implements DepartmentInterface { - public String getDepartmentId() { - return id; - } + public String getDepartmentId() { + return id; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/DepartmentInterface.java b/cuebot/src/main/java/com/imageworks/spcue/DepartmentInterface.java index 03e5ffab7..17951caab 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/DepartmentInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/DepartmentInterface.java @@ -17,5 +17,5 @@ public interface DepartmentInterface extends EntityInterface { - public String getDepartmentId(); + public String getDepartmentId(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/DependencyManagerException.java b/cuebot/src/main/java/com/imageworks/spcue/DependencyManagerException.java index 2f750c14e..077a5fff6 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/DependencyManagerException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/DependencyManagerException.java @@ -20,14 +20,14 @@ @SuppressWarnings("serial") public class DependencyManagerException extends NestedRuntimeException { - public DependencyManagerException(String arg0) { - super(arg0); - // TODO Auto-generated constructor stub - } + public DependencyManagerException(String arg0) { + super(arg0); + // TODO Auto-generated constructor stub + } - public DependencyManagerException(String arg0, Throwable arg1) { - super(arg0, arg1); - // TODO Auto-generated constructor stub - } + public DependencyManagerException(String arg0, Throwable arg1) { + super(arg0, arg1); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/DispatchFrame.java b/cuebot/src/main/java/com/imageworks/spcue/DispatchFrame.java index 9e5c856b6..88a2bc10c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/DispatchFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/DispatchFrame.java @@ -22,50 +22,50 @@ public class DispatchFrame extends FrameEntity implements FrameInterface { - public int retries; - public FrameState state; - - public String show; - public String shot; - public String owner; - public Optional uid; - public String logDir; - public String command; - public String range; - public int chunkSize; - - public String layerName; - public String jobName; - - public int minCores; - public int maxCores; - public boolean threadable; - public int minGpus; - public int maxGpus; - public long minGpuMemory; - - // A comma separated list of services - public String services; - - // The Operational System this frame is expected to run in - public String os; - - // Memory requirement for this frame in bytes - private long minMemory; - - // Soft limit to be enforced for this frame in bytes - public long softMemoryLimit; - - // Hard limit to be enforced for this frame in bytes - public long hardMemoryLimit; - - public void setMinMemory(long minMemory) { - this.minMemory = minMemory; - this.softMemoryLimit = (long) (((double) minMemory) * Dispatcher.SOFT_MEMORY_MULTIPLIER); - this.hardMemoryLimit = (long) (((double) minMemory) * Dispatcher.HARD_MEMORY_MULTIPLIER); - } - - public long getMinMemory() { - return this.minMemory; - } + public int retries; + public FrameState state; + + public String show; + public String shot; + public String owner; + public Optional uid; + public String logDir; + public String command; + public String range; + public int chunkSize; + + public String layerName; + public String jobName; + + public int minCores; + public int maxCores; + public boolean threadable; + public int minGpus; + public int maxGpus; + public long minGpuMemory; + + // A comma separated list of services + public String services; + + // The Operational System this frame is expected to run in + public String os; + + // Memory requirement for this frame in bytes + private long minMemory; + + // Soft limit to be enforced for this frame in bytes + public long softMemoryLimit; + + // Hard limit to be enforced for this frame in bytes + public long hardMemoryLimit; + + public void setMinMemory(long minMemory) { + this.minMemory = minMemory; + this.softMemoryLimit = (long) (((double) minMemory) * Dispatcher.SOFT_MEMORY_MULTIPLIER); + this.hardMemoryLimit = (long) (((double) minMemory) * Dispatcher.HARD_MEMORY_MULTIPLIER); + } + + public long getMinMemory() { + return this.minMemory; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/DispatchHost.java b/cuebot/src/main/java/com/imageworks/spcue/DispatchHost.java index 37060a244..8ad4bc418 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/DispatchHost.java +++ b/cuebot/src/main/java/com/imageworks/spcue/DispatchHost.java @@ -24,166 +24,166 @@ import org.apache.logging.log4j.LogManager; public class DispatchHost extends Entity - implements HostInterface, FacilityInterface, ResourceContainer { - - private static final Logger logger = LogManager.getLogger(DispatchHost.class); - - public String facilityId; - public String allocationId; - public LockState lockState; - public HardwareState hardwareState; - - public int cores; - public int idleCores; - - public int gpus; - public int idleGpus; - - // Basically an 0 = auto, 1 = all. - public int threadMode; - - public long memory; - public long idleMemory; - public long gpuMemory; - public long idleGpuMemory; - public String tags; - private String os; - - public boolean isNimby; - public boolean isLocalDispatch = false; - - /** - * Number of cores that will be added to the first proc booked to this host. - */ - public int strandedCores = 0; - public int strandedGpus = 0; - - // To reserve resources for future gpu job - long idleMemoryOrig = 0; - int idleCoresOrig = 0; - long idleGpuMemoryOrig = 0; - int idleGpusOrig = 0; - - public String getHostId() { - return id; - } - - public String getAllocationId() { - return allocationId; - } - - public String getFacilityId() { - return facilityId; - } - - public String[] getOs() { - return this.os.split(","); - } - - public void setOs(String os) { - this.os = os; - } - - public boolean canHandleNegativeCoresRequest(int requestedCores) { - // Request is positive, no need to test further. - if (requestedCores > 0) { - logger.debug(getName() + " can handle the job with " + requestedCores + " cores."); - return true; + implements HostInterface, FacilityInterface, ResourceContainer { + + private static final Logger logger = LogManager.getLogger(DispatchHost.class); + + public String facilityId; + public String allocationId; + public LockState lockState; + public HardwareState hardwareState; + + public int cores; + public int idleCores; + + public int gpus; + public int idleGpus; + + // Basically an 0 = auto, 1 = all. + public int threadMode; + + public long memory; + public long idleMemory; + public long gpuMemory; + public long idleGpuMemory; + public String tags; + private String os; + + public boolean isNimby; + public boolean isLocalDispatch = false; + + /** + * Number of cores that will be added to the first proc booked to this host. + */ + public int strandedCores = 0; + public int strandedGpus = 0; + + // To reserve resources for future gpu job + long idleMemoryOrig = 0; + int idleCoresOrig = 0; + long idleGpuMemoryOrig = 0; + int idleGpusOrig = 0; + + public String getHostId() { + return id; + } + + public String getAllocationId() { + return allocationId; } - // All cores are available, validate the request. - if (cores == idleCores) { - logger.debug(getName() + " can handle the job with " + requestedCores + " cores."); - return true; + + public String getFacilityId() { + return facilityId; } - // Some or all cores are busy, avoid booking again. - logger.debug(getName() + " cannot handle the job with " + requestedCores + " cores."); - return false; - } - - public int handleNegativeCoresRequirement(int requestedCores) { - // If we request a <=0 amount of cores, return positive core count. - // Request -2 on a 24 core machine will return 22. - - if (requestedCores > 0) { - // Do not process positive core requests. - logger.debug("Requested " + requestedCores + " cores."); - return requestedCores; + + public String[] getOs() { + return this.os.split(","); } - if (requestedCores <= 0 && idleCores < cores) { - // If request is negative but cores are already used, return 0. - // We don't want to overbook the host. - logger.debug("Requested " + requestedCores - + " cores, but the host is busy and cannot book more jobs."); - return 0; + + public void setOs(String os) { + this.os = os; } - // Book all cores minus the request - int totalCores = idleCores + requestedCores; - logger.debug("Requested " + requestedCores + " cores <= 0, " + idleCores - + " cores are free, booking " + totalCores + " cores"); - return totalCores; - } - - @Override - public boolean hasAdditionalResources(int minCores, long minMemory, int minGpus, - long minGpuMemory) { - minCores = handleNegativeCoresRequirement(minCores); - if (idleCores < minCores) { - return false; + + public boolean canHandleNegativeCoresRequest(int requestedCores) { + // Request is positive, no need to test further. + if (requestedCores > 0) { + logger.debug(getName() + " can handle the job with " + requestedCores + " cores."); + return true; + } + // All cores are available, validate the request. + if (cores == idleCores) { + logger.debug(getName() + " can handle the job with " + requestedCores + " cores."); + return true; + } + // Some or all cores are busy, avoid booking again. + logger.debug(getName() + " cannot handle the job with " + requestedCores + " cores."); + return false; } - if (minCores <= 0) { - return false; - } else if (idleMemory < minMemory) { - return false; - } else if (idleGpus < minGpus) { - return false; - } else if (idleGpuMemory < minGpuMemory) { - return false; + + public int handleNegativeCoresRequirement(int requestedCores) { + // If we request a <=0 amount of cores, return positive core count. + // Request -2 on a 24 core machine will return 22. + + if (requestedCores > 0) { + // Do not process positive core requests. + logger.debug("Requested " + requestedCores + " cores."); + return requestedCores; + } + if (requestedCores <= 0 && idleCores < cores) { + // If request is negative but cores are already used, return 0. + // We don't want to overbook the host. + logger.debug("Requested " + requestedCores + + " cores, but the host is busy and cannot book more jobs."); + return 0; + } + // Book all cores minus the request + int totalCores = idleCores + requestedCores; + logger.debug("Requested " + requestedCores + " cores <= 0, " + idleCores + + " cores are free, booking " + totalCores + " cores"); + return totalCores; } - return true; - } - - @Override - public void useResources(int coreUnits, long memory, int gpuUnits, long gpuMemory) { - idleCores = idleCores - coreUnits; - idleMemory = idleMemory - memory; - idleGpus = idleGpus - gpuUnits; - idleGpuMemory = idleGpuMemory - gpuMemory; - } - - /** - * If host has idle gpu, remove enough resources to book a gpu frame later. - * - */ - public void removeGpu() { - if (idleGpuMemory > 0 && idleGpuMemoryOrig == 0) { - idleMemoryOrig = idleMemory; - idleCoresOrig = idleCores; - idleGpuMemoryOrig = idleGpuMemory; - idleGpusOrig = idleGpus; - - idleMemory = idleMemory - Math.min(CueUtil.GB4, idleMemory); - idleCores = idleCores - Math.min(100, idleCores); - idleGpuMemory = idleGpuMemory - Math.min(CueUtil.GB4, idleGpuMemory); - idleGpus = idleGpus - Math.min(1, idleGpus); + @Override + public boolean hasAdditionalResources(int minCores, long minMemory, int minGpus, + long minGpuMemory) { + minCores = handleNegativeCoresRequirement(minCores); + if (idleCores < minCores) { + return false; + } + if (minCores <= 0) { + return false; + } else if (idleMemory < minMemory) { + return false; + } else if (idleGpus < minGpus) { + return false; + } else if (idleGpuMemory < minGpuMemory) { + return false; + } + + return true; } - } - - /** - * If host had idle gpu removed, restore the host to the origional state. - * - */ - public void restoreGpu() { - if (idleGpuMemoryOrig > 0) { - idleMemory = idleMemoryOrig; - idleCores = idleCoresOrig; - idleGpuMemory = idleGpuMemoryOrig; - idleGpus = idleGpusOrig; - - idleMemoryOrig = 0; - idleCoresOrig = 0; - idleGpuMemoryOrig = 0; - idleGpusOrig = 0; + + @Override + public void useResources(int coreUnits, long memory, int gpuUnits, long gpuMemory) { + idleCores = idleCores - coreUnits; + idleMemory = idleMemory - memory; + idleGpus = idleGpus - gpuUnits; + idleGpuMemory = idleGpuMemory - gpuMemory; + } + + /** + * If host has idle gpu, remove enough resources to book a gpu frame later. + * + */ + public void removeGpu() { + if (idleGpuMemory > 0 && idleGpuMemoryOrig == 0) { + idleMemoryOrig = idleMemory; + idleCoresOrig = idleCores; + idleGpuMemoryOrig = idleGpuMemory; + idleGpusOrig = idleGpus; + + idleMemory = idleMemory - Math.min(CueUtil.GB4, idleMemory); + idleCores = idleCores - Math.min(100, idleCores); + idleGpuMemory = idleGpuMemory - Math.min(CueUtil.GB4, idleGpuMemory); + idleGpus = idleGpus - Math.min(1, idleGpus); + } + } + + /** + * If host had idle gpu removed, restore the host to the origional state. + * + */ + public void restoreGpu() { + if (idleGpuMemoryOrig > 0) { + idleMemory = idleMemoryOrig; + idleCores = idleCoresOrig; + idleGpuMemory = idleGpuMemoryOrig; + idleGpus = idleGpusOrig; + + idleMemoryOrig = 0; + idleCoresOrig = 0; + idleGpuMemoryOrig = 0; + idleGpusOrig = 0; + } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/DispatchJob.java b/cuebot/src/main/java/com/imageworks/spcue/DispatchJob.java index 347565b10..b0ded0a85 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/DispatchJob.java +++ b/cuebot/src/main/java/com/imageworks/spcue/DispatchJob.java @@ -18,10 +18,10 @@ import com.imageworks.spcue.grpc.job.JobState; public class DispatchJob extends JobEntity implements JobInterface { - public int maxRetries; - public boolean paused; - public boolean autoEat; - public boolean autoBook; - public boolean autoUnbook; - public JobState state; + public int maxRetries; + public boolean paused; + public boolean autoEat; + public boolean autoBook; + public boolean autoUnbook; + public JobState state; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/Entity.java b/cuebot/src/main/java/com/imageworks/spcue/Entity.java index b216893f6..b5dac61c3 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/Entity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/Entity.java @@ -17,51 +17,51 @@ public class Entity implements EntityInterface { - public String id = null; - public String name = "unknown"; + public String id = null; + public String name = "unknown"; - public Entity() {} + public Entity() {} - public Entity(String id) { - this.id = id; - } + public Entity(String id) { + this.id = id; + } - public Entity(String id, String name) { - this.id = id; - this.name = name; - } + public Entity(String id, String name) { + this.id = id; + this.name = name; + } - public String getId() { - return id; - } + public String getId() { + return id; + } - public String getName() { - return name; - } + public String getName() { + return name; + } - public boolean isNew() { - return id == null; - } + public boolean isNew() { + return id == null; + } - @Override - public String toString() { - return String.format("%s/%s", getName(), getId()); - } + @Override + public String toString() { + return String.format("%s/%s", getName(), getId()); + } - @Override - public int hashCode() { - if (id != null) { - return id.hashCode(); - } else { - return super.hashCode(); + @Override + public int hashCode() { + if (id != null) { + return id.hashCode(); + } else { + return super.hashCode(); + } } - } - @Override - public boolean equals(Object other) { - if (other == null) { - return false; + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + return this.toString().equals(other.toString()); } - return this.toString().equals(other.toString()); - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/EntityCreationError.java b/cuebot/src/main/java/com/imageworks/spcue/EntityCreationError.java index 6c1a243f8..cd3e71054 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/EntityCreationError.java +++ b/cuebot/src/main/java/com/imageworks/spcue/EntityCreationError.java @@ -18,28 +18,28 @@ @SuppressWarnings("serial") public class EntityCreationError extends EntityException { - public EntityCreationError() { - // TODO Auto-generated constructor stub - } - - public EntityCreationError(String message, EntityInterface entity) { - super("failed to create entity of type: " + entity.getClass() + " with name: " - + entity.getName() + " ," + message, entity); - } - - public EntityCreationError(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } - - public EntityCreationError(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public EntityCreationError(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public EntityCreationError() { + // TODO Auto-generated constructor stub + } + + public EntityCreationError(String message, EntityInterface entity) { + super("failed to create entity of type: " + entity.getClass() + " with name: " + + entity.getName() + " ," + message, entity); + } + + public EntityCreationError(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + + public EntityCreationError(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public EntityCreationError(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/EntityException.java b/cuebot/src/main/java/com/imageworks/spcue/EntityException.java index 463ec588c..e3118ab44 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/EntityException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/EntityException.java @@ -18,31 +18,31 @@ @SuppressWarnings("serial") public class EntityException extends SpcueRuntimeException { - @SuppressWarnings("unused") - private EntityInterface entity; - - public EntityException() { - // TODO Auto-generated constructor stub - } - - public EntityException(String message, EntityInterface e) { - super(message); - entity = e; - } - - public EntityException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } - - public EntityException(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public EntityException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + @SuppressWarnings("unused") + private EntityInterface entity; + + public EntityException() { + // TODO Auto-generated constructor stub + } + + public EntityException(String message, EntityInterface e) { + super(message); + entity = e; + } + + public EntityException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + + public EntityException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public EntityException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/EntityInterface.java b/cuebot/src/main/java/com/imageworks/spcue/EntityInterface.java index a9fbd0765..c2faf7528 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/EntityInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/EntityInterface.java @@ -17,7 +17,7 @@ public interface EntityInterface { - public String getName(); + public String getName(); - public String getId(); + public String getId(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/EntityModificationError.java b/cuebot/src/main/java/com/imageworks/spcue/EntityModificationError.java index fb347f3d6..7e0043717 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/EntityModificationError.java +++ b/cuebot/src/main/java/com/imageworks/spcue/EntityModificationError.java @@ -18,28 +18,28 @@ @SuppressWarnings("serial") public class EntityModificationError extends EntityException { - public EntityModificationError() { - // TODO Auto-generated constructor stub - } - - public EntityModificationError(String message, EntityInterface e) { - super(message, e); - // TODO Auto-generated constructor stub - } - - public EntityModificationError(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } - - public EntityModificationError(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public EntityModificationError(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public EntityModificationError() { + // TODO Auto-generated constructor stub + } + + public EntityModificationError(String message, EntityInterface e) { + super(message, e); + // TODO Auto-generated constructor stub + } + + public EntityModificationError(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + + public EntityModificationError(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public EntityModificationError(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/EntityNotFoundException.java b/cuebot/src/main/java/com/imageworks/spcue/EntityNotFoundException.java index f463904c4..99ff5ffac 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/EntityNotFoundException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/EntityNotFoundException.java @@ -18,14 +18,14 @@ public class EntityNotFoundException extends NestedRuntimeException { - private static final long serialVersionUID = -5612998213656259822L; + private static final long serialVersionUID = -5612998213656259822L; - public EntityNotFoundException(String message) { - super(message); - } + public EntityNotFoundException(String message) { + super(message); + } - public EntityNotFoundException(String message, Throwable cause) { - super(message, cause); - } + public EntityNotFoundException(String message, Throwable cause) { + super(message, cause); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/EntityRemovalError.java b/cuebot/src/main/java/com/imageworks/spcue/EntityRemovalError.java index 31379fbaf..4fa05a420 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/EntityRemovalError.java +++ b/cuebot/src/main/java/com/imageworks/spcue/EntityRemovalError.java @@ -18,28 +18,28 @@ @SuppressWarnings("serial") public class EntityRemovalError extends EntityException { - public EntityRemovalError() { - // TODO Auto-generated constructor stub - } - - public EntityRemovalError(String message, EntityInterface entity) { - super("failed to create entity of type: " + entity.getClass() + " with name: " - + entity.getName() + " ," + message, entity); - } - - public EntityRemovalError(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } - - public EntityRemovalError(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public EntityRemovalError(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public EntityRemovalError() { + // TODO Auto-generated constructor stub + } + + public EntityRemovalError(String message, EntityInterface entity) { + super("failed to create entity of type: " + entity.getClass() + " with name: " + + entity.getName() + " ," + message, entity); + } + + public EntityRemovalError(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + + public EntityRemovalError(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public EntityRemovalError(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/EntityRetrievalException.java b/cuebot/src/main/java/com/imageworks/spcue/EntityRetrievalException.java index 9ae399952..ae0a29a0f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/EntityRetrievalException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/EntityRetrievalException.java @@ -18,23 +18,23 @@ @SuppressWarnings("serial") public class EntityRetrievalException extends RuntimeException { - public EntityRetrievalException() { - // TODO Auto-generated constructor stub - } - - public EntityRetrievalException(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public EntityRetrievalException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } - - public EntityRetrievalException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public EntityRetrievalException() { + // TODO Auto-generated constructor stub + } + + public EntityRetrievalException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public EntityRetrievalException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } + + public EntityRetrievalException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/ExecutionSummary.java b/cuebot/src/main/java/com/imageworks/spcue/ExecutionSummary.java index 87a499060..1d39e394a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ExecutionSummary.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ExecutionSummary.java @@ -20,76 +20,76 @@ */ public class ExecutionSummary { - public double standardDeviation; - public long coreTime; - public long coreTimeSuccess; - public long coreTimeFail; - public long gpuTime; - public long gpuTimeSuccess; - public long gpuTimeFail; - public long highMemoryKb; - - public long getHighMemoryKb() { - return highMemoryKb; - } - - public void setHighMemoryKb(long highMemoryKb) { - this.highMemoryKb = highMemoryKb; - } - - public double getStandardDeviation() { - return standardDeviation; - } - - public void setStandardDeviation(double standardDeviation) { - this.standardDeviation = standardDeviation; - } - - public long getCoreTime() { - return coreTime; - } - - public void setCoreTime(long coreTime) { - this.coreTime = coreTime; - } - - public long getCoreTimeSuccess() { - return coreTimeSuccess; - } - - public void setCoreTimeSuccess(long coreTimeSuccess) { - this.coreTimeSuccess = coreTimeSuccess; - } - - public long getCoreTimeFail() { - return coreTimeFail; - } - - public void setCoreTimeFail(long coreTimeFail) { - this.coreTimeFail = coreTimeFail; - } - - public long getGpuTime() { - return gpuTime; - } - - public void setGpuTime(long gpuTime) { - this.gpuTime = gpuTime; - } - - public long getGpuTimeSuccess() { - return gpuTimeSuccess; - } - - public void setGpuTimeSuccess(long gpuTimeSuccess) { - this.gpuTimeSuccess = gpuTimeSuccess; - } - - public long getGpuTimeFail() { - return gpuTimeFail; - } - - public void setGpuTimeFail(long gpuTimeFail) { - this.gpuTimeFail = gpuTimeFail; - } + public double standardDeviation; + public long coreTime; + public long coreTimeSuccess; + public long coreTimeFail; + public long gpuTime; + public long gpuTimeSuccess; + public long gpuTimeFail; + public long highMemoryKb; + + public long getHighMemoryKb() { + return highMemoryKb; + } + + public void setHighMemoryKb(long highMemoryKb) { + this.highMemoryKb = highMemoryKb; + } + + public double getStandardDeviation() { + return standardDeviation; + } + + public void setStandardDeviation(double standardDeviation) { + this.standardDeviation = standardDeviation; + } + + public long getCoreTime() { + return coreTime; + } + + public void setCoreTime(long coreTime) { + this.coreTime = coreTime; + } + + public long getCoreTimeSuccess() { + return coreTimeSuccess; + } + + public void setCoreTimeSuccess(long coreTimeSuccess) { + this.coreTimeSuccess = coreTimeSuccess; + } + + public long getCoreTimeFail() { + return coreTimeFail; + } + + public void setCoreTimeFail(long coreTimeFail) { + this.coreTimeFail = coreTimeFail; + } + + public long getGpuTime() { + return gpuTime; + } + + public void setGpuTime(long gpuTime) { + this.gpuTime = gpuTime; + } + + public long getGpuTimeSuccess() { + return gpuTimeSuccess; + } + + public void setGpuTimeSuccess(long gpuTimeSuccess) { + this.gpuTimeSuccess = gpuTimeSuccess; + } + + public long getGpuTimeFail() { + return gpuTimeFail; + } + + public void setGpuTimeFail(long gpuTimeFail) { + this.gpuTimeFail = gpuTimeFail; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/FacilityEntity.java b/cuebot/src/main/java/com/imageworks/spcue/FacilityEntity.java index 14d1db864..0fa3003bc 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/FacilityEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/FacilityEntity.java @@ -17,13 +17,13 @@ public class FacilityEntity extends Entity implements FacilityInterface { - public FacilityEntity() {} + public FacilityEntity() {} - public FacilityEntity(String id) { - this.id = id; - } + public FacilityEntity(String id) { + this.id = id; + } - public String getFacilityId() { - return id; - } + public String getFacilityId() { + return id; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/FacilityInterface.java b/cuebot/src/main/java/com/imageworks/spcue/FacilityInterface.java index 26cd1abe0..ba995f383 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/FacilityInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/FacilityInterface.java @@ -17,6 +17,6 @@ public interface FacilityInterface extends EntityInterface { - public String getFacilityId(); + public String getFacilityId(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/FilterEntity.java b/cuebot/src/main/java/com/imageworks/spcue/FilterEntity.java index 3c30f8b9c..28ba5848a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/FilterEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/FilterEntity.java @@ -19,25 +19,25 @@ public class FilterEntity extends Entity implements FilterInterface { - public FilterType type; - public String showId; - public boolean enabled; - public float order; - - public String getId() { - return id; - } - - public String getName() { - return name; - } - - public String getFilterId() { - return id; - } - - public String getShowId() { - return showId; - } + public FilterType type; + public String showId; + public boolean enabled; + public float order; + + public String getId() { + return id; + } + + public String getName() { + return name; + } + + public String getFilterId() { + return id; + } + + public String getShowId() { + return showId; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/FilterInterface.java b/cuebot/src/main/java/com/imageworks/spcue/FilterInterface.java index 0805749ab..3737c0dbb 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/FilterInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/FilterInterface.java @@ -17,6 +17,6 @@ public interface FilterInterface extends ShowInterface { - public String getFilterId(); + public String getFilterId(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/FrameDetail.java b/cuebot/src/main/java/com/imageworks/spcue/FrameDetail.java index cb95b341f..7fc4bc48a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/FrameDetail.java +++ b/cuebot/src/main/java/com/imageworks/spcue/FrameDetail.java @@ -21,17 +21,17 @@ public class FrameDetail extends FrameEntity implements FrameInterface { - public FrameState state; - public int number; - public int dependCount; - public int retryCount; - public int exitStatus; - public long maxRss; - public int dispatchOrder; - public String lastResource; + public FrameState state; + public int number; + public int dependCount; + public int retryCount; + public int exitStatus; + public long maxRss; + public int dispatchOrder; + public String lastResource; - public Timestamp dateStarted; - public Timestamp dateStopped; - public Timestamp dateUpdated; - public Timestamp dateLLU; + public Timestamp dateStarted; + public Timestamp dateStopped; + public Timestamp dateUpdated; + public Timestamp dateLLU; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/FrameEntity.java b/cuebot/src/main/java/com/imageworks/spcue/FrameEntity.java index eb9a3d080..f3b56dc9d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/FrameEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/FrameEntity.java @@ -17,24 +17,24 @@ public class FrameEntity extends LayerEntity implements FrameInterface { - public String layerId; - public int version; + public String layerId; + public int version; - public FrameEntity() {} + public FrameEntity() {} - public FrameEntity(String id) { - this.id = id; - } + public FrameEntity(String id) { + this.id = id; + } - public String getFrameId() { - return id; - } + public String getFrameId() { + return id; + } - public String getLayerId() { - return layerId; - } + public String getLayerId() { + return layerId; + } - public int getVersion() { - return version; - } + public int getVersion() { + return version; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/FrameInterface.java b/cuebot/src/main/java/com/imageworks/spcue/FrameInterface.java index d60325a7c..a4abfbb30 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/FrameInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/FrameInterface.java @@ -16,13 +16,13 @@ public interface FrameInterface extends LayerInterface { - public String getFrameId(); + public String getFrameId(); - /** - * To change a frame state, you must have the same version of the frame that exists in the DB. If - * the version changes, any operation that changes the state will fail. - * - * @return the time stamp that represents the last time this frame was updated. - */ - public int getVersion(); + /** + * To change a frame state, you must have the same version of the frame that exists in the DB. + * If the version changes, any operation that changes the state will fail. + * + * @return the time stamp that represents the last time this frame was updated. + */ + public int getVersion(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/FrameStateTotals.java b/cuebot/src/main/java/com/imageworks/spcue/FrameStateTotals.java index fa9e33c17..9b3a2f845 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/FrameStateTotals.java +++ b/cuebot/src/main/java/com/imageworks/spcue/FrameStateTotals.java @@ -16,76 +16,76 @@ package com.imageworks.spcue; public class FrameStateTotals { - public int waiting = 0; - public int running = 0; - public int depend = 0; - public int dead = 0; - public int eaten = 0; - public int succeeded = 0; - public int total = 0; - public int checkpoint = 0; - - public int getWaiting() { - return waiting; - } - - public void setWaiting(int waiting) { - this.waiting = waiting; - } - - public int getRunning() { - return running; - } - - public void setRunning(int running) { - this.running = running; - } - - public int getDepend() { - return depend; - } - - public void setDepend(int depend) { - this.depend = depend; - } - - public int getDead() { - return dead; - } - - public void setDead(int dead) { - this.dead = dead; - } - - public int getEaten() { - return eaten; - } - - public void setEaten(int eaten) { - this.eaten = eaten; - } - - public int getSucceeded() { - return succeeded; - } - - public void setSucceeded(int succeeded) { - this.succeeded = succeeded; - } - - public int getTotal() { - return total; - } - - public void setTotal(int total) { - this.total = total; - } - - public int getCheckpoint() { - return checkpoint; - } - - public void setCheckpoint(int checkpoint) { - this.checkpoint = checkpoint; - } + public int waiting = 0; + public int running = 0; + public int depend = 0; + public int dead = 0; + public int eaten = 0; + public int succeeded = 0; + public int total = 0; + public int checkpoint = 0; + + public int getWaiting() { + return waiting; + } + + public void setWaiting(int waiting) { + this.waiting = waiting; + } + + public int getRunning() { + return running; + } + + public void setRunning(int running) { + this.running = running; + } + + public int getDepend() { + return depend; + } + + public void setDepend(int depend) { + this.depend = depend; + } + + public int getDead() { + return dead; + } + + public void setDead(int dead) { + this.dead = dead; + } + + public int getEaten() { + return eaten; + } + + public void setEaten(int eaten) { + this.eaten = eaten; + } + + public int getSucceeded() { + return succeeded; + } + + public void setSucceeded(int succeeded) { + this.succeeded = succeeded; + } + + public int getTotal() { + return total; + } + + public void setTotal(int total) { + this.total = total; + } + + public int getCheckpoint() { + return checkpoint; + } + + public void setCheckpoint(int checkpoint) { + this.checkpoint = checkpoint; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/GroupDetail.java b/cuebot/src/main/java/com/imageworks/spcue/GroupDetail.java index 1d4c468d2..522d56653 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/GroupDetail.java +++ b/cuebot/src/main/java/com/imageworks/spcue/GroupDetail.java @@ -17,33 +17,33 @@ public class GroupDetail extends Entity implements GroupInterface, DepartmentInterface { - public int jobMinCores = -1; - public int jobMaxCores = -1; - public int jobMinGpus = -1; - public int jobMaxGpus = -1; - public int jobPriority = -1; - - public int minCores = -1; - public int maxCores = -1; - - public int minGpus = -1; - public int maxGpus = -1; - - public String parentId = null; - public String showId; - public String deptId; - - @Override - public String getShowId() { - return showId; - } - - public String getGroupId() { - return id; - } - - @Override - public String getDepartmentId() { - return deptId; - } + public int jobMinCores = -1; + public int jobMaxCores = -1; + public int jobMinGpus = -1; + public int jobMaxGpus = -1; + public int jobPriority = -1; + + public int minCores = -1; + public int maxCores = -1; + + public int minGpus = -1; + public int maxGpus = -1; + + public String parentId = null; + public String showId; + public String deptId; + + @Override + public String getShowId() { + return showId; + } + + public String getGroupId() { + return id; + } + + @Override + public String getDepartmentId() { + return deptId; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/GroupEntity.java b/cuebot/src/main/java/com/imageworks/spcue/GroupEntity.java index 49d81e967..83468f932 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/GroupEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/GroupEntity.java @@ -17,16 +17,16 @@ public class GroupEntity extends Entity implements GroupInterface { - public String showId; + public String showId; - @Override - public String getGroupId() { - return id; - } + @Override + public String getGroupId() { + return id; + } - @Override - public String getShowId() { - return showId; - } + @Override + public String getShowId() { + return showId; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/GroupInterface.java b/cuebot/src/main/java/com/imageworks/spcue/GroupInterface.java index a00118941..df0597154 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/GroupInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/GroupInterface.java @@ -17,6 +17,6 @@ public interface GroupInterface extends ShowInterface { - public String getGroupId(); + public String getGroupId(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/HistoricalJobTransferException.java b/cuebot/src/main/java/com/imageworks/spcue/HistoricalJobTransferException.java index eafe9701f..9d41ee117 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/HistoricalJobTransferException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/HistoricalJobTransferException.java @@ -18,23 +18,23 @@ @SuppressWarnings("serial") public class HistoricalJobTransferException extends SpcueRuntimeException { - public HistoricalJobTransferException() { - // TODO Auto-generated constructor stub - } - - public HistoricalJobTransferException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } - - public HistoricalJobTransferException(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public HistoricalJobTransferException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public HistoricalJobTransferException() { + // TODO Auto-generated constructor stub + } + + public HistoricalJobTransferException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + + public HistoricalJobTransferException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public HistoricalJobTransferException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/HostConfigurationErrorException.java b/cuebot/src/main/java/com/imageworks/spcue/HostConfigurationErrorException.java index a7eb2b393..b8c654f8a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/HostConfigurationErrorException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/HostConfigurationErrorException.java @@ -18,24 +18,24 @@ @SuppressWarnings("serial") public class HostConfigurationErrorException extends SpcueRuntimeException { - public HostConfigurationErrorException() { - super(); - // TODO Auto-generated constructor stub - } - - public HostConfigurationErrorException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } - - public HostConfigurationErrorException(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public HostConfigurationErrorException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public HostConfigurationErrorException() { + super(); + // TODO Auto-generated constructor stub + } + + public HostConfigurationErrorException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + + public HostConfigurationErrorException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public HostConfigurationErrorException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/HostEntity.java b/cuebot/src/main/java/com/imageworks/spcue/HostEntity.java index 172d5593e..548e99cd2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/HostEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/HostEntity.java @@ -23,55 +23,55 @@ public class HostEntity extends Entity implements HostInterface { - public String facilityId; - public String allocId; - public HardwareState state; - public LockState lockState; - public boolean nimbyEnabled; + public String facilityId; + public String allocId; + public HardwareState state; + public LockState lockState; + public boolean nimbyEnabled; - public int procs; - public int cores; - public int idleCores; - public long memory; - public long idleMemory; - public int gpus; - public int idleGpus; - public long gpuMemory; - public long idleGpuMemory; + public int procs; + public int cores; + public int idleCores; + public long memory; + public long idleMemory; + public int gpus; + public int idleGpus; + public long gpuMemory; + public long idleGpuMemory; - public boolean unlockAtBoot; + public boolean unlockAtBoot; - public Date dateCreated; - public Date datePinged; - public Date dateBooted; + public Date dateCreated; + public Date datePinged; + public Date dateBooted; - public HostEntity() {} + public HostEntity() {} - public HostEntity(Host grpcHost) { - this.id = grpcHost.getId(); - this.allocId = grpcHost.getAllocName(); - this.state = grpcHost.getState(); - this.lockState = grpcHost.getLockState(); - this.nimbyEnabled = grpcHost.getNimbyEnabled(); - this.cores = (int) grpcHost.getCores(); - this.idleCores = (int) grpcHost.getIdleCores(); - this.memory = grpcHost.getMemory(); - this.idleMemory = grpcHost.getIdleMemory(); - this.gpus = (int) grpcHost.getGpus(); - this.idleGpus = (int) grpcHost.getIdleGpus(); - this.gpuMemory = grpcHost.getGpuMemory(); - this.idleGpuMemory = grpcHost.getIdleGpuMemory(); - } + public HostEntity(Host grpcHost) { + this.id = grpcHost.getId(); + this.allocId = grpcHost.getAllocName(); + this.state = grpcHost.getState(); + this.lockState = grpcHost.getLockState(); + this.nimbyEnabled = grpcHost.getNimbyEnabled(); + this.cores = (int) grpcHost.getCores(); + this.idleCores = (int) grpcHost.getIdleCores(); + this.memory = grpcHost.getMemory(); + this.idleMemory = grpcHost.getIdleMemory(); + this.gpus = (int) grpcHost.getGpus(); + this.idleGpus = (int) grpcHost.getIdleGpus(); + this.gpuMemory = grpcHost.getGpuMemory(); + this.idleGpuMemory = grpcHost.getIdleGpuMemory(); + } - public String getHostId() { - return id; - } + public String getHostId() { + return id; + } - public String getAllocationId() { - return allocId; - } + public String getAllocationId() { + return allocId; + } - public String getFacilityId() { - return facilityId; - } + public String getFacilityId() { + return facilityId; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/HostInterface.java b/cuebot/src/main/java/com/imageworks/spcue/HostInterface.java index f3fff15e1..b38c121eb 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/HostInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/HostInterface.java @@ -16,5 +16,5 @@ package com.imageworks.spcue; public interface HostInterface extends AllocationInterface { - String getHostId(); + String getHostId(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/Inherit.java b/cuebot/src/main/java/com/imageworks/spcue/Inherit.java index b368e2380..3df0702eb 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/Inherit.java +++ b/cuebot/src/main/java/com/imageworks/spcue/Inherit.java @@ -20,5 +20,5 @@ * */ public enum Inherit { - Priority, MinCores, MaxCores, MinGpus, MaxGpus, All + Priority, MinCores, MaxCores, MinGpus, MaxGpus, All } diff --git a/cuebot/src/main/java/com/imageworks/spcue/JobDetail.java b/cuebot/src/main/java/com/imageworks/spcue/JobDetail.java index b5a7a3e78..b62f3e16a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/JobDetail.java +++ b/cuebot/src/main/java/com/imageworks/spcue/JobDetail.java @@ -20,41 +20,41 @@ import com.imageworks.spcue.grpc.job.JobState; public class JobDetail extends JobEntity implements JobInterface, DepartmentInterface { - public String groupId; - public String deptId; - public JobState state; - public String shot; - public String user; - public String email; - public Optional uid; - public String logDir; - public boolean isPaused; - public boolean isAutoEat; - public int totalFrames; - public int totalLayers; - public int startTime; - public int stopTime; - public int maxRetries; - - public String os; - public String facilityName; - public String deptName; - public String showName; - - public int priority = 1; - public int minCoreUnits = 100; - public int maxCoreUnits = 200000; - public int minGpuUnits = 0; - public int maxGpuUnits = 1000; - public boolean isLocal = false; - public String localHostName; - public int localMaxCores; - public long localMaxMemory; - public int localThreadNumber; - public int localMaxGpus; - public long localMaxGpuMemory; - - public String getDepartmentId() { - return deptId; - } + public String groupId; + public String deptId; + public JobState state; + public String shot; + public String user; + public String email; + public Optional uid; + public String logDir; + public boolean isPaused; + public boolean isAutoEat; + public int totalFrames; + public int totalLayers; + public int startTime; + public int stopTime; + public int maxRetries; + + public String os; + public String facilityName; + public String deptName; + public String showName; + + public int priority = 1; + public int minCoreUnits = 100; + public int maxCoreUnits = 200000; + public int minGpuUnits = 0; + public int maxGpuUnits = 1000; + public boolean isLocal = false; + public String localHostName; + public int localMaxCores; + public long localMaxMemory; + public int localThreadNumber; + public int localMaxGpus; + public long localMaxGpuMemory; + + public String getDepartmentId() { + return deptId; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/JobDispatchException.java b/cuebot/src/main/java/com/imageworks/spcue/JobDispatchException.java index d12c4b896..680ba129f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/JobDispatchException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/JobDispatchException.java @@ -17,22 +17,22 @@ @SuppressWarnings("serial") public class JobDispatchException extends SpcueRuntimeException { - public JobDispatchException() { - // TODO Auto-generated constructor stub - } + public JobDispatchException() { + // TODO Auto-generated constructor stub + } - public JobDispatchException(String message) { - super(message); - } + public JobDispatchException(String message) { + super(message); + } - public JobDispatchException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public JobDispatchException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } - public JobDispatchException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public JobDispatchException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/JobEntity.java b/cuebot/src/main/java/com/imageworks/spcue/JobEntity.java index d9aec9e4b..1cf148e5f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/JobEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/JobEntity.java @@ -17,24 +17,24 @@ public class JobEntity extends Entity implements JobInterface { - public String showId; - public String facilityId; + public String showId; + public String facilityId; - public JobEntity() {} + public JobEntity() {} - public JobEntity(String id) { - this.id = id; - } + public JobEntity(String id) { + this.id = id; + } - public String getJobId() { - return id; - } + public String getJobId() { + return id; + } - public String getShowId() { - return showId; - } + public String getShowId() { + return showId; + } - public String getFacilityId() { - return facilityId; - } + public String getFacilityId() { + return facilityId; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/JobInterface.java b/cuebot/src/main/java/com/imageworks/spcue/JobInterface.java index 631925492..40a6027ac 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/JobInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/JobInterface.java @@ -17,5 +17,5 @@ public interface JobInterface extends ShowInterface, FacilityInterface { - public String getJobId(); + public String getJobId(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/JobLaunchException.java b/cuebot/src/main/java/com/imageworks/spcue/JobLaunchException.java index a10bb18c4..c74e55ef6 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/JobLaunchException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/JobLaunchException.java @@ -18,23 +18,23 @@ @SuppressWarnings("serial") public class JobLaunchException extends SpcueRuntimeException { - public JobLaunchException() { - // TODO Auto-generated constructor stub - } - - public JobLaunchException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } - - public JobLaunchException(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public JobLaunchException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public JobLaunchException() { + // TODO Auto-generated constructor stub + } + + public JobLaunchException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + + public JobLaunchException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public JobLaunchException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/LayerDetail.java b/cuebot/src/main/java/com/imageworks/spcue/LayerDetail.java index d173c63dc..572139039 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/LayerDetail.java +++ b/cuebot/src/main/java/com/imageworks/spcue/LayerDetail.java @@ -23,143 +23,143 @@ import com.imageworks.spcue.grpc.job.LayerType; public class LayerDetail extends LayerEntity implements LayerInterface { - public String command; - public String range; - public LayerType type; - public int minimumCores; - public int maximumCores; - public int minimumGpus; - public int maximumGpus; - public boolean isThreadable; - public long minimumMemory; - public long minimumGpuMemory; - public int chunkSize; - public int timeout; - public int timeout_llu; - public int dispatchOrder; - public int totalFrameCount; - - public Set tags = new LinkedHashSet(); - public Set services = new LinkedHashSet(); - public Set limits = new LinkedHashSet(); - public Set outputs = new LinkedHashSet(); - - /* - * - */ - public List getServices() { - return new ArrayList(services); - } - - public String getCommand() { - return command; - } - - public void setCommand(String command) { - this.command = command; - } - - public String getRange() { - return range; - } - - public void setRange(String range) { - this.range = range; - } - - public LayerType getType() { - return type; - } - - public void setType(LayerType type) { - this.type = type; - } - - public int getMinimumCores() { - return minimumCores; - } - - public void setMinimumCores(int minimumCores) { - this.minimumCores = minimumCores; - } - - public boolean isThreadable() { - return isThreadable; - } - - public void setThreadable(boolean isThreadable) { - this.isThreadable = isThreadable; - } - - public int getTimeout() { - return timeout; - } - - public void setTimeout(int timeout) { - this.timeout = timeout; - } - - public int getTimeoutLLU() { - return timeout; - } - - public void setTimeoutLLU(int timeout_llu) { - this.timeout_llu = timeout_llu; - } - - public long getMinimumMemory() { - return minimumMemory; - } - - public void setMinimumMemory(long minimumMemory) { - this.minimumMemory = minimumMemory; - } - - public int getMinimumGpus() { - return minimumGpus; - } - - public void setMinimumGpus(int minimumGpus) { - this.minimumGpus = minimumGpus; - } - - public long getMinimumGpuMemory() { - return minimumGpuMemory; - } - - public void setMinimumGpuMemory(long minimumGpuMemory) { - this.minimumGpuMemory = minimumGpuMemory; - } - - public int getChunkSize() { - return chunkSize; - } - - public void setChunkSize(int chunkSize) { - this.chunkSize = chunkSize; - } - - public int getDispatchOrder() { - return dispatchOrder; - } - - public void setDispatchOrder(int dispatchOrder) { - this.dispatchOrder = dispatchOrder; - } - - public int getTotalFrameCount() { - return totalFrameCount; - } + public String command; + public String range; + public LayerType type; + public int minimumCores; + public int maximumCores; + public int minimumGpus; + public int maximumGpus; + public boolean isThreadable; + public long minimumMemory; + public long minimumGpuMemory; + public int chunkSize; + public int timeout; + public int timeout_llu; + public int dispatchOrder; + public int totalFrameCount; + + public Set tags = new LinkedHashSet(); + public Set services = new LinkedHashSet(); + public Set limits = new LinkedHashSet(); + public Set outputs = new LinkedHashSet(); + + /* + * + */ + public List getServices() { + return new ArrayList(services); + } + + public String getCommand() { + return command; + } + + public void setCommand(String command) { + this.command = command; + } + + public String getRange() { + return range; + } + + public void setRange(String range) { + this.range = range; + } + + public LayerType getType() { + return type; + } + + public void setType(LayerType type) { + this.type = type; + } + + public int getMinimumCores() { + return minimumCores; + } + + public void setMinimumCores(int minimumCores) { + this.minimumCores = minimumCores; + } + + public boolean isThreadable() { + return isThreadable; + } + + public void setThreadable(boolean isThreadable) { + this.isThreadable = isThreadable; + } + + public int getTimeout() { + return timeout; + } + + public void setTimeout(int timeout) { + this.timeout = timeout; + } + + public int getTimeoutLLU() { + return timeout; + } + + public void setTimeoutLLU(int timeout_llu) { + this.timeout_llu = timeout_llu; + } + + public long getMinimumMemory() { + return minimumMemory; + } + + public void setMinimumMemory(long minimumMemory) { + this.minimumMemory = minimumMemory; + } + + public int getMinimumGpus() { + return minimumGpus; + } + + public void setMinimumGpus(int minimumGpus) { + this.minimumGpus = minimumGpus; + } + + public long getMinimumGpuMemory() { + return minimumGpuMemory; + } + + public void setMinimumGpuMemory(long minimumGpuMemory) { + this.minimumGpuMemory = minimumGpuMemory; + } + + public int getChunkSize() { + return chunkSize; + } + + public void setChunkSize(int chunkSize) { + this.chunkSize = chunkSize; + } + + public int getDispatchOrder() { + return dispatchOrder; + } + + public void setDispatchOrder(int dispatchOrder) { + this.dispatchOrder = dispatchOrder; + } + + public int getTotalFrameCount() { + return totalFrameCount; + } - public void setTotalFrameCount(int totalFrameCount) { - this.totalFrameCount = totalFrameCount; - } + public void setTotalFrameCount(int totalFrameCount) { + this.totalFrameCount = totalFrameCount; + } - public Set getTags() { - return tags; - } + public Set getTags() { + return tags; + } - public void setTags(Set tags) { - this.tags = tags; - } + public void setTags(Set tags) { + this.tags = tags; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/LayerEntity.java b/cuebot/src/main/java/com/imageworks/spcue/LayerEntity.java index 34f26cb5b..b8f0d9fee 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/LayerEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/LayerEntity.java @@ -17,31 +17,31 @@ public class LayerEntity extends Entity implements LayerInterface { - public String showId; - public String facilityId; - public String jobId; + public String showId; + public String facilityId; + public String jobId; - public LayerEntity() {} + public LayerEntity() {} - public LayerEntity(String id) { - this.id = id; - } + public LayerEntity(String id) { + this.id = id; + } - public String getLayerId() { - return id; - } + public String getLayerId() { + return id; + } - public String getJobId() { - return jobId; - } + public String getJobId() { + return jobId; + } - @Override - public String getShowId() { - return showId; - } + @Override + public String getShowId() { + return showId; + } - @Override - public String getFacilityId() { - return facilityId; - } + @Override + public String getFacilityId() { + return facilityId; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/LayerInterface.java b/cuebot/src/main/java/com/imageworks/spcue/LayerInterface.java index f53b1810e..19bb5f060 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/LayerInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/LayerInterface.java @@ -17,6 +17,6 @@ public interface LayerInterface extends JobInterface { - public String getLayerId(); + public String getLayerId(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/LayerStats.java b/cuebot/src/main/java/com/imageworks/spcue/LayerStats.java index 3edb78e4a..d51cb0dec 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/LayerStats.java +++ b/cuebot/src/main/java/com/imageworks/spcue/LayerStats.java @@ -23,142 +23,143 @@ public class LayerStats { - private LayerDetail detail; - private FrameStateTotals frameStateTotals; - private ExecutionSummary executionSummary; - private List threadStats; - private List outputs; + private LayerDetail detail; + private FrameStateTotals frameStateTotals; + private ExecutionSummary executionSummary; + private List threadStats; + private List outputs; + + private String graphUnits; + private float conversionUnits; + private int scale; + + public List getOutputs() { + return outputs; + } + + public void setOutputs(List outputs) { + + List newOutputs = new ArrayList(outputs.size()); + for (String output : outputs) { + newOutputs.add(new File(output).getParent() + "/*"); + } + this.outputs = newOutputs; + } + + public List getThreadStats() { + return threadStats; + } + + public void setThreadStats(List threadStats) { + this.threadStats = threadStats; + setGraphScaleValues(); + } + + public LayerDetail getDetail() { + return detail; + } + + public void setDetail(LayerDetail detail) { + this.detail = detail; + } + + public FrameStateTotals getFrameStateTotals() { + return frameStateTotals; + } + + public void setFrameStateTotals(FrameStateTotals frameStateTotals) { + this.frameStateTotals = frameStateTotals; + } + + public ExecutionSummary getExecutionSummary() { + return executionSummary; + } + + public void setExecutionSummary(ExecutionSummary executionSummary) { + this.executionSummary = executionSummary; + } + + public int getGraphScale() { + return scale; + } + + public String getGraphUnits() { + return graphUnits; + } - private String graphUnits; - private float conversionUnits; - private int scale; + public String getFormattedHighMemory() { + return String.format(Locale.ROOT, "%.1fGB", + executionSummary.highMemoryKb / 1024.0 / 1024.0); + } + + public String getFormattedProcHours() { + return String.format(Locale.ROOT, "%.1f", executionSummary.coreTime / 3600.0); + } + + public int getFailedFrames() { + return frameStateTotals.waiting + frameStateTotals.dead + frameStateTotals.eaten; + } - public List getOutputs() { - return outputs; - } - - public void setOutputs(List outputs) { - - List newOutputs = new ArrayList(outputs.size()); - for (String output : outputs) { - newOutputs.add(new File(output).getParent() + "/*"); - } - this.outputs = newOutputs; - } - - public List getThreadStats() { - return threadStats; - } - - public void setThreadStats(List threadStats) { - this.threadStats = threadStats; - setGraphScaleValues(); - } - - public LayerDetail getDetail() { - return detail; - } - - public void setDetail(LayerDetail detail) { - this.detail = detail; - } - - public FrameStateTotals getFrameStateTotals() { - return frameStateTotals; - } - - public void setFrameStateTotals(FrameStateTotals frameStateTotals) { - this.frameStateTotals = frameStateTotals; - } - - public ExecutionSummary getExecutionSummary() { - return executionSummary; - } - - public void setExecutionSummary(ExecutionSummary executionSummary) { - this.executionSummary = executionSummary; - } - - public int getGraphScale() { - return scale; - } - - public String getGraphUnits() { - return graphUnits; - } - - public String getFormattedHighMemory() { - return String.format(Locale.ROOT, "%.1fGB", executionSummary.highMemoryKb / 1024.0 / 1024.0); - } - - public String getFormattedProcHours() { - return String.format(Locale.ROOT, "%.1f", executionSummary.coreTime / 3600.0); - } - - public int getFailedFrames() { - return frameStateTotals.waiting + frameStateTotals.dead + frameStateTotals.eaten; - } - - public String getGraphLegend() { - StringBuilder sb = new StringBuilder(128); - List reversed = new ArrayList(threadStats); - Collections.reverse(reversed); - for (ThreadStats t : reversed) { - sb.append("|"); - sb.append(t.getThreads()); - sb.append("+"); - sb.append("Thread "); - } - return sb.toString(); - } - - public String getGraphData() { - - StringBuilder sb = new StringBuilder(128); - - for (ThreadStats t : threadStats) { - sb.append(String.format(Locale.ROOT, "%.2f", t.getAvgFrameTime() / conversionUnits)); - sb.append(","); - } - if (sb.length() > 1) { - sb.deleteCharAt(sb.length() - 1); - } - return sb.toString(); - } - - public int getThreadAvgCount() { - return threadStats.size(); - } - - /** - * Since frame times vary wildly, anywhere from 1 second to 7 days, this method will set some - * values so average frame times are displayed in units that make them easy to compare. - * - * Based on the highest average frame time per thread group, average frame can be displayed in - * minutes, seconds, or hours. - * - */ - private void setGraphScaleValues() { - - int hightestAverageSec = 0; - for (ThreadStats t : threadStats) { - if (t.getAvgFrameTime() >= hightestAverageSec) { - hightestAverageSec = t.getAvgFrameTime(); - } - } - - if (hightestAverageSec < 60) { - graphUnits = "Seconds"; - scale = ((hightestAverageSec / 2 + 1) * 2); - conversionUnits = 1f; - } else if (hightestAverageSec < 3600) { - graphUnits = "Minutes"; - scale = ((hightestAverageSec / 60) + 1); - conversionUnits = 60f; - } else { - graphUnits = "Hours"; - scale = ((hightestAverageSec / 3600) + 1); - conversionUnits = 3600f; - } - } + public String getGraphLegend() { + StringBuilder sb = new StringBuilder(128); + List reversed = new ArrayList(threadStats); + Collections.reverse(reversed); + for (ThreadStats t : reversed) { + sb.append("|"); + sb.append(t.getThreads()); + sb.append("+"); + sb.append("Thread "); + } + return sb.toString(); + } + + public String getGraphData() { + + StringBuilder sb = new StringBuilder(128); + + for (ThreadStats t : threadStats) { + sb.append(String.format(Locale.ROOT, "%.2f", t.getAvgFrameTime() / conversionUnits)); + sb.append(","); + } + if (sb.length() > 1) { + sb.deleteCharAt(sb.length() - 1); + } + return sb.toString(); + } + + public int getThreadAvgCount() { + return threadStats.size(); + } + + /** + * Since frame times vary wildly, anywhere from 1 second to 7 days, this method will set some + * values so average frame times are displayed in units that make them easy to compare. + * + * Based on the highest average frame time per thread group, average frame can be displayed in + * minutes, seconds, or hours. + * + */ + private void setGraphScaleValues() { + + int hightestAverageSec = 0; + for (ThreadStats t : threadStats) { + if (t.getAvgFrameTime() >= hightestAverageSec) { + hightestAverageSec = t.getAvgFrameTime(); + } + } + + if (hightestAverageSec < 60) { + graphUnits = "Seconds"; + scale = ((hightestAverageSec / 2 + 1) * 2); + conversionUnits = 1f; + } else if (hightestAverageSec < 3600) { + graphUnits = "Minutes"; + scale = ((hightestAverageSec / 60) + 1); + conversionUnits = 60f; + } else { + graphUnits = "Hours"; + scale = ((hightestAverageSec / 3600) + 1); + conversionUnits = 3600f; + } + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/LightweightDependency.java b/cuebot/src/main/java/com/imageworks/spcue/LightweightDependency.java index 87fda00d3..49a2d94c3 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/LightweightDependency.java +++ b/cuebot/src/main/java/com/imageworks/spcue/LightweightDependency.java @@ -20,27 +20,27 @@ public class LightweightDependency extends Entity implements DependInterface { - public DependType type; - public DependTarget target; + public DependType type; + public DependTarget target; - public String parent = null; + public String parent = null; - public String dependErJobId; - public String dependErLayerId; - public String dependErFrameId; + public String dependErJobId; + public String dependErLayerId; + public String dependErFrameId; - public String dependOnJobId; - public String dependOnLayerId; - public String dependOnFrameId; + public String dependOnJobId; + public String dependOnLayerId; + public String dependOnFrameId; - public boolean anyFrame; - public boolean active; + public boolean anyFrame; + public boolean active; - public String getName() { - return type.toString() + "/" + dependErJobId; - } + public String getName() { + return type.toString() + "/" + dependErJobId; + } - public String toString() { - return String.format("%s/%s", type.toString(), getId()); - } + public String toString() { + return String.format("%s/%s", type.toString(), getId()); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/LimitEntity.java b/cuebot/src/main/java/com/imageworks/spcue/LimitEntity.java index 894d5542d..a273e878a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/LimitEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/LimitEntity.java @@ -19,19 +19,19 @@ public class LimitEntity extends Entity implements LimitInterface { - public int maxValue; - public int currentRunning; + public int maxValue; + public int currentRunning; - public LimitEntity() {} + public LimitEntity() {} - public LimitEntity(Limit grpcLimit) { - this.id = grpcLimit.getId(); - this.name = grpcLimit.getName(); - this.maxValue = grpcLimit.getMaxValue(); - this.currentRunning = grpcLimit.getCurrentRunning(); - } + public LimitEntity(Limit grpcLimit) { + this.id = grpcLimit.getId(); + this.name = grpcLimit.getName(); + this.maxValue = grpcLimit.getMaxValue(); + this.currentRunning = grpcLimit.getCurrentRunning(); + } - public String getLimitId() { - return id; - } + public String getLimitId() { + return id; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/LimitInterface.java b/cuebot/src/main/java/com/imageworks/spcue/LimitInterface.java index 1f7a39f2d..d9f67bad9 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/LimitInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/LimitInterface.java @@ -17,6 +17,6 @@ public interface LimitInterface extends EntityInterface { - public String getLimitId(); + public String getLimitId(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/LocalHostAssignment.java b/cuebot/src/main/java/com/imageworks/spcue/LocalHostAssignment.java index 9eee2340f..e89632cbe 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/LocalHostAssignment.java +++ b/cuebot/src/main/java/com/imageworks/spcue/LocalHostAssignment.java @@ -29,198 +29,198 @@ */ public class LocalHostAssignment extends Entity implements ResourceContainer { - private static final Logger logger = LogManager.getLogger(LocalHostAssignment.class); - - private int idleCoreUnits; - private long idleMemory; - private int idleGpuUnits; - private long idleGpuMemory; - - private long maxMemory; - private long maxGpuMemory; - private int maxCoreUnits; - private int maxGpuUnits; - - private int threads; - - private String hostId; - private String jobId = null; - private String layerId = null; - private String frameId = null; - - private RenderPartitionType type; - - public LocalHostAssignment() {} - - public LocalHostAssignment(int maxCores, int threads, long maxMemory, int maxGpus, - long maxGpuMemory) { - this.maxCoreUnits = maxCores; - this.threads = threads; - this.maxMemory = maxMemory; - this.maxGpuUnits = maxGpus; - this.maxGpuMemory = maxGpuMemory; - } - - public int handleNegativeCoresRequirement(int requestedCores) { - // If we request a <=0 amount of cores, return positive core count. - // Request -2 on a 24 core machine will return 22. - - if (requestedCores > 0) { - // Do not process positive core requests. - logger.debug("Requested " + requestedCores + " cores."); - return requestedCores; - } - if (requestedCores <= 0 && idleCoreUnits < threads) { - // If request is negative but cores are already used, return 0. - // We don't want to overbook the host. - logger.debug("Requested " + requestedCores - + " cores, but the host is busy and cannot book more jobs."); - return 0; - } - // Book all cores minus the request - int totalCores = idleCoreUnits + requestedCores; - logger.debug("Requested " + requestedCores + " cores <= 0, " + idleCoreUnits - + " cores are free, booking " + totalCores + " cores"); - return totalCores; - } - - @Override - public boolean hasAdditionalResources(int minCores, long minMemory, int minGpus, - long minGpuMemory) { - minCores = handleNegativeCoresRequirement(minCores); - if (idleCoreUnits < minCores) { - return false; - } - if (minCores <= 0) { - return false; - } else if (idleMemory < minMemory) { - return false; - } else if (idleGpuUnits < minGpus) { - return false; - } else if (idleGpuMemory < minGpuMemory) { - return false; - } - - return true; - } - - @Override - public void useResources(int coreUnits, long memory, int gpuUnits, long gpuMemory) { - idleCoreUnits = idleCoreUnits - coreUnits; - idleMemory = idleMemory - memory; - idleGpuUnits = idleGpuUnits - gpuUnits; - idleGpuMemory = idleGpuMemory - gpuMemory; - } - - public int getThreads() { - return threads; - } - - public void setThreads(int threads) { - this.threads = threads; - } - - public long getMaxMemory() { - return maxMemory; - } - - public void setMaxMemory(long maxMemory) { - this.maxMemory = maxMemory; - } - - public int getMaxCoreUnits() { - return maxCoreUnits; - } - - public void setMaxCoreUnits(int maxCoreUnits) { - this.maxCoreUnits = maxCoreUnits; - } - - public long getIdleMemory() { - return this.idleMemory; - } - - public int getMaxGpuUnits() { - return maxGpuUnits; - } - - public void setMaxGpuUnits(int maxGpuUnits) { - this.maxGpuUnits = maxGpuUnits; - } - - public long getMaxGpuMemory() { - return maxGpuMemory; - } - - public void setMaxGpuMemory(long maxGpuMemory) { - this.maxGpuMemory = maxGpuMemory; - } - - public long getIdleGpuMemory() { - return this.idleGpuMemory; - } - - public int getIdleCoreUnits() { - return this.idleCoreUnits; - } - - public void setIdleCoreUnits(int idleCoreUnits) { - this.idleCoreUnits = idleCoreUnits; - } - - public void setIdleMemory(long idleMemory) { - this.idleMemory = idleMemory; - } - - public int getIdleGpuUnits() { - return this.idleGpuUnits; - } - - public void setIdleGpuUnits(int idleGpuUnits) { - this.idleGpuUnits = idleGpuUnits; - } - - public void setIdleGpuMemory(long idleGpuMemory) { - this.idleGpuMemory = idleGpuMemory; - } - - public String getHostId() { - return hostId; - } - - public void setHostId(String hostId) { - this.hostId = hostId; - } - - public String getJobId() { - return jobId; - } - - public void setJobId(String jobId) { - this.jobId = jobId; - } + private static final Logger logger = LogManager.getLogger(LocalHostAssignment.class); - public String getLayerId() { - return layerId; - } - - public void setLayerId(String layerId) { - this.layerId = layerId; - } - - public String getFrameId() { - return frameId; - } + private int idleCoreUnits; + private long idleMemory; + private int idleGpuUnits; + private long idleGpuMemory; - public void setFrameId(String frameId) { - this.frameId = frameId; - } + private long maxMemory; + private long maxGpuMemory; + private int maxCoreUnits; + private int maxGpuUnits; - public RenderPartitionType getType() { - return type; - } + private int threads; - public void setType(RenderPartitionType type) { - this.type = type; - } + private String hostId; + private String jobId = null; + private String layerId = null; + private String frameId = null; + + private RenderPartitionType type; + + public LocalHostAssignment() {} + + public LocalHostAssignment(int maxCores, int threads, long maxMemory, int maxGpus, + long maxGpuMemory) { + this.maxCoreUnits = maxCores; + this.threads = threads; + this.maxMemory = maxMemory; + this.maxGpuUnits = maxGpus; + this.maxGpuMemory = maxGpuMemory; + } + + public int handleNegativeCoresRequirement(int requestedCores) { + // If we request a <=0 amount of cores, return positive core count. + // Request -2 on a 24 core machine will return 22. + + if (requestedCores > 0) { + // Do not process positive core requests. + logger.debug("Requested " + requestedCores + " cores."); + return requestedCores; + } + if (requestedCores <= 0 && idleCoreUnits < threads) { + // If request is negative but cores are already used, return 0. + // We don't want to overbook the host. + logger.debug("Requested " + requestedCores + + " cores, but the host is busy and cannot book more jobs."); + return 0; + } + // Book all cores minus the request + int totalCores = idleCoreUnits + requestedCores; + logger.debug("Requested " + requestedCores + " cores <= 0, " + idleCoreUnits + + " cores are free, booking " + totalCores + " cores"); + return totalCores; + } + + @Override + public boolean hasAdditionalResources(int minCores, long minMemory, int minGpus, + long minGpuMemory) { + minCores = handleNegativeCoresRequirement(minCores); + if (idleCoreUnits < minCores) { + return false; + } + if (minCores <= 0) { + return false; + } else if (idleMemory < minMemory) { + return false; + } else if (idleGpuUnits < minGpus) { + return false; + } else if (idleGpuMemory < minGpuMemory) { + return false; + } + + return true; + } + + @Override + public void useResources(int coreUnits, long memory, int gpuUnits, long gpuMemory) { + idleCoreUnits = idleCoreUnits - coreUnits; + idleMemory = idleMemory - memory; + idleGpuUnits = idleGpuUnits - gpuUnits; + idleGpuMemory = idleGpuMemory - gpuMemory; + } + + public int getThreads() { + return threads; + } + + public void setThreads(int threads) { + this.threads = threads; + } + + public long getMaxMemory() { + return maxMemory; + } + + public void setMaxMemory(long maxMemory) { + this.maxMemory = maxMemory; + } + + public int getMaxCoreUnits() { + return maxCoreUnits; + } + + public void setMaxCoreUnits(int maxCoreUnits) { + this.maxCoreUnits = maxCoreUnits; + } + + public long getIdleMemory() { + return this.idleMemory; + } + + public int getMaxGpuUnits() { + return maxGpuUnits; + } + + public void setMaxGpuUnits(int maxGpuUnits) { + this.maxGpuUnits = maxGpuUnits; + } + + public long getMaxGpuMemory() { + return maxGpuMemory; + } + + public void setMaxGpuMemory(long maxGpuMemory) { + this.maxGpuMemory = maxGpuMemory; + } + + public long getIdleGpuMemory() { + return this.idleGpuMemory; + } + + public int getIdleCoreUnits() { + return this.idleCoreUnits; + } + + public void setIdleCoreUnits(int idleCoreUnits) { + this.idleCoreUnits = idleCoreUnits; + } + + public void setIdleMemory(long idleMemory) { + this.idleMemory = idleMemory; + } + + public int getIdleGpuUnits() { + return this.idleGpuUnits; + } + + public void setIdleGpuUnits(int idleGpuUnits) { + this.idleGpuUnits = idleGpuUnits; + } + + public void setIdleGpuMemory(long idleGpuMemory) { + this.idleGpuMemory = idleGpuMemory; + } + + public String getHostId() { + return hostId; + } + + public void setHostId(String hostId) { + this.hostId = hostId; + } + + public String getJobId() { + return jobId; + } + + public void setJobId(String jobId) { + this.jobId = jobId; + } + + public String getLayerId() { + return layerId; + } + + public void setLayerId(String layerId) { + this.layerId = layerId; + } + + public String getFrameId() { + return frameId; + } + + public void setFrameId(String frameId) { + this.frameId = frameId; + } + + public RenderPartitionType getType() { + return type; + } + + public void setType(RenderPartitionType type) { + this.type = type; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/MaintenanceTask.java b/cuebot/src/main/java/com/imageworks/spcue/MaintenanceTask.java index f46378c8f..90be9ca29 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/MaintenanceTask.java +++ b/cuebot/src/main/java/com/imageworks/spcue/MaintenanceTask.java @@ -21,28 +21,28 @@ */ public enum MaintenanceTask { - /** - * Lock the transfer of jobs to the historical table - */ - LOCK_HISTORICAL_TRANSFER, - - /** - * Lock the hardware start check - */ - LOCK_HARDWARE_STATE_CHECK, - - /** - * Lock the orphaned proc check - */ - LOCK_ORPHANED_PROC_CHECK, - - /** - * Lock for task updates - */ - LOCK_TASK_UPDATE, - - /** - * Lock the stale checkpoint task. - */ - LOCK_STALE_CHECKPOINT + /** + * Lock the transfer of jobs to the historical table + */ + LOCK_HISTORICAL_TRANSFER, + + /** + * Lock the hardware start check + */ + LOCK_HARDWARE_STATE_CHECK, + + /** + * Lock the orphaned proc check + */ + LOCK_ORPHANED_PROC_CHECK, + + /** + * Lock for task updates + */ + LOCK_TASK_UPDATE, + + /** + * Lock the stale checkpoint task. + */ + LOCK_STALE_CHECKPOINT } diff --git a/cuebot/src/main/java/com/imageworks/spcue/MatcherEntity.java b/cuebot/src/main/java/com/imageworks/spcue/MatcherEntity.java index f8072e090..e1de9a26d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/MatcherEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/MatcherEntity.java @@ -21,39 +21,39 @@ public class MatcherEntity extends Entity implements MatcherInterface { - public MatchSubject subject; - public MatchType type; - public String value; + public MatchSubject subject; + public MatchType type; + public String value; - public String filterId; - public String showId; + public String filterId; + public String showId; - public static MatcherEntity build(FilterInterface filter, Matcher data) { + public static MatcherEntity build(FilterInterface filter, Matcher data) { - MatcherEntity detail = new MatcherEntity(); - detail.name = null; - detail.subject = data.getSubject(); - detail.type = data.getType(); - detail.value = data.getInput(); + MatcherEntity detail = new MatcherEntity(); + detail.name = null; + detail.subject = data.getSubject(); + detail.type = data.getType(); + detail.value = data.getInput(); - return detail; - } + return detail; + } - public static MatcherEntity build(FilterInterface filter, Matcher data, String id) { - MatcherEntity detail = build(filter, data); - detail.id = id.toString(); - return detail; - } + public static MatcherEntity build(FilterInterface filter, Matcher data, String id) { + MatcherEntity detail = build(filter, data); + detail.id = id.toString(); + return detail; + } - public String getFilterId() { - return filterId; - } + public String getFilterId() { + return filterId; + } - public String getShowId() { - return showId; - } + public String getShowId() { + return showId; + } - public String getMatcherId() { - return this.id; - } + public String getMatcherId() { + return this.id; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/MatcherInterface.java b/cuebot/src/main/java/com/imageworks/spcue/MatcherInterface.java index a1d0c042f..e9104a793 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/MatcherInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/MatcherInterface.java @@ -17,6 +17,6 @@ public interface MatcherInterface extends FilterInterface { - public String getMatcherId(); + public String getMatcherId(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/MinimalHost.java b/cuebot/src/main/java/com/imageworks/spcue/MinimalHost.java index dd6fb7399..b6cb2dec3 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/MinimalHost.java +++ b/cuebot/src/main/java/com/imageworks/spcue/MinimalHost.java @@ -17,30 +17,30 @@ public class MinimalHost extends Entity implements HostInterface { - public String allocId; - public String facilityId; + public String allocId; + public String facilityId; - public MinimalHost() { - // TODO Auto-generated constructor stub - } + public MinimalHost() { + // TODO Auto-generated constructor stub + } - public String getHostId() { - return this.id; - } + public String getHostId() { + return this.id; + } - public String getAllocationId() { - return allocId; - } + public String getAllocationId() { + return allocId; + } - public String getId() { - return id; - } + public String getId() { + return id; + } - public String getName() { - return name; - } + public String getName() { + return name; + } - public String getFacilityId() { - return facilityId; - } + public String getFacilityId() { + return facilityId; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/OwnerEntity.java b/cuebot/src/main/java/com/imageworks/spcue/OwnerEntity.java index 9e2434e5c..5dd70f06e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/OwnerEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/OwnerEntity.java @@ -17,10 +17,10 @@ public class OwnerEntity extends Entity { - public OwnerEntity() {} + public OwnerEntity() {} - public OwnerEntity(String name) { - this.name = name; - } + public OwnerEntity(String name) { + this.name = name; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/PointDetail.java b/cuebot/src/main/java/com/imageworks/spcue/PointDetail.java index 01efc9716..60bbfd9ef 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/PointDetail.java +++ b/cuebot/src/main/java/com/imageworks/spcue/PointDetail.java @@ -17,25 +17,25 @@ public class PointDetail extends Entity implements PointInterface { - public String showId; - public String deptId; - - public int cores = 0; - public String tiTask = ""; - public boolean tiManaged = false; - - @Override - public String getDepartmentId() { - return deptId; - } - - @Override - public String getShowId() { - return showId; - } - - @Override - public String getPointId() { - return id; - } + public String showId; + public String deptId; + + public int cores = 0; + public String tiTask = ""; + public boolean tiManaged = false; + + @Override + public String getDepartmentId() { + return deptId; + } + + @Override + public String getShowId() { + return showId; + } + + @Override + public String getPointId() { + return id; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/PointInterface.java b/cuebot/src/main/java/com/imageworks/spcue/PointInterface.java index f1bce6e61..b6f549394 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/PointInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/PointInterface.java @@ -16,5 +16,5 @@ package com.imageworks.spcue; public interface PointInterface extends DepartmentInterface, ShowInterface { - public String getPointId(); + public String getPointId(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/ProcInterface.java b/cuebot/src/main/java/com/imageworks/spcue/ProcInterface.java index cbc45a8dd..f9696ee5b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ProcInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ProcInterface.java @@ -17,6 +17,6 @@ public interface ProcInterface extends HostInterface, FrameInterface { - String getProcId(); + String getProcId(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/PrometheusMetricsCollector.java b/cuebot/src/main/java/com/imageworks/spcue/PrometheusMetricsCollector.java index 624b2c93c..3f12c1352 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/PrometheusMetricsCollector.java +++ b/cuebot/src/main/java/com/imageworks/spcue/PrometheusMetricsCollector.java @@ -18,260 +18,271 @@ */ @Component public class PrometheusMetricsCollector { - private BookingQueue bookingQueue; - - private DispatchQueue manageQueue; - - private DispatchQueue dispatchQueue; - - private HostReportQueue reportQueue; - - private boolean enabled; - - // BookingQueue bookingQueue - private static final Gauge bookingWaitingTotal = Gauge.build().name("cue_booking_waiting_total") - .help("Booking Queue number of waiting tasks").labelNames("env", "cuebot_hosts").register(); - private static final Gauge bookingRemainingCapacityTotal = - Gauge.build().name("cue_booking_remaining_capacity_total") - .help("Booking Queue remaining capacity").labelNames("env", "cuebot_hosts").register(); - private static final Gauge bookingThreadsTotal = Gauge.build().name("cue_booking_threads_total") - .help("Booking Queue number of active threads").labelNames("env", "cuebot_hosts").register(); - private static final Gauge bookingExecutedTotal = Gauge.build().name("cue_booking_executed_total") - .help("Booking Queue number of executed tasks").labelNames("env", "cuebot_hosts").register(); - private static final Gauge bookingRejectedTotal = Gauge.build().name("cue_booking_rejected_total") - .help("Booking Queue number of rejected tasks").labelNames("env", "cuebot_hosts").register(); - - // DispatchQueue manageQueue - private static final Gauge manageWaitingTotal = Gauge.build().name("cue_manage_waiting_total") - .help("Manage Queue number of waiting tasks").labelNames("env", "cuebot_hosts").register(); - private static final Gauge manageRemainingCapacityTotal = - Gauge.build().name("cue_manage_remaining_capacity_total") - .help("Manage Queue remaining capacity").labelNames("env", "cuebot_hosts").register(); - private static final Gauge manageThreadsTotal = Gauge.build().name("cue_manage_threads_total") - .help("Manage Queue number of active threads").labelNames("env", "cuebot_hosts").register(); - private static final Gauge manageExecutedTotal = Gauge.build().name("cue_manage_executed_total") - .help("Manage Queue number of executed tasks").labelNames("env", "cuebot_hosts").register(); - private static final Gauge manageRejectedTotal = Gauge.build().name("cue_manage_rejected_total") - .help("Manage Queue number of rejected tasks").labelNames("env", "cuebot_hosts").register(); - - // DispatchQueue dispatchQueue - private static final Gauge dispatchWaitingTotal = Gauge.build().name("cue_dispatch_waiting_total") - .help("Dispatch Queue number of waiting tasks").labelNames("env", "cuebot_hosts").register(); - private static final Gauge dispatchRemainingCapacityTotal = - Gauge.build().name("cue_dispatch_remaining_capacity_total") - .help("Dispatch Queue remaining capacity").labelNames("env", "cuebot_hosts").register(); - private static final Gauge dispatchThreadsTotal = Gauge.build().name("cue_dispatch_threads_total") - .help("Dispatch Queue number of active threads").labelNames("env", "cuebot_hosts").register(); - private static final Gauge dispatchExecutedTotal = Gauge.build() - .name("cue_dispatch_executed_total").help("Dispatch Queue number of executed tasks") - .labelNames("env", "cuebot_hosts").register(); - private static final Gauge dispatchRejectedTotal = Gauge.build() - .name("cue_dispatch_rejected_total").help("Dispatch Queue number of rejected tasks") - .labelNames("env", "cuebot_hosts").register(); - - // HostReportQueue reportQueue - private static final Gauge reportQueueWaitingTotal = - Gauge.build().name("cue_report_waiting_total").help("Report Queue number of waiting tasks") - .labelNames("env", "cuebot_hosts").register(); - private static final Gauge reportQueueRemainingCapacityTotal = - Gauge.build().name("cue_report_remaining_capacity_total") - .help("Report Queue remaining capacity").labelNames("env", "cuebot_hosts").register(); - private static final Gauge reportQueueThreadsTotal = - Gauge.build().name("cue_report_threads_total").help("Report Queue number of active threads") - .labelNames("env", "cuebot_hosts").register(); - private static final Gauge reportQueueExecutedTotal = - Gauge.build().name("cue_report_executed_total").help("Report Queue number of executed tasks") - .labelNames("env", "cuebot_hosts").register(); - private static final Gauge reportQueueRejectedTotal = - Gauge.build().name("cue_report_rejected_total").help("Report Queue number of rejected tasks") - .labelNames("env", "cuebot_hosts").register(); - - private static final Counter findJobsByShowQueryCountMetric = - Counter.build().name("cue_find_jobs_by_show_count") - .help("Count the occurrences of the query FIND_JOBS_BY_SHOW.") - .labelNames("env", "cuebot_hosts").register(); - private static final Gauge bookingDurationMillisMetric = - Gauge.build().name("cue_booking_durations_in_millis") - .help("Register duration of booking steps in milliseconds.") - .labelNames("env", "cuebot_host", "stage_desc").register(); - private static final Histogram bookingDurationMillisHistogramMetric = - Histogram.build().name("cue_booking_durations_histogram_in_millis") - .help("Register a summary of duration of booking steps in milliseconds.") - .labelNames("env", "cuebot_host", "stage_desc").register(); - - private static final Counter frameKilledCounter = Counter.build().name("cue_frame_killed_counter") - .help("Number of frames kill requests processed") - .labelNames("env", "cuebot_host", "render_node", "cause").register(); - - private static final Counter frameKillFailureCounter = - Counter.build().name("cue_frame_kill_failure_counter") - .help("Number of frames that failed to be killed after FRAME_KILL_RETRY_LIMIT tries") - .labelNames("env", "cuebot_host", "render_node", "job_name", "frame_name", "frame_id") - .register(); - - private String deployment_environment; - private String cuebot_host; - - @Autowired - public PrometheusMetricsCollector(Environment env) { - if (env == null) { - throw new SpcueRuntimeException("Env not defined"); + private BookingQueue bookingQueue; + + private DispatchQueue manageQueue; + + private DispatchQueue dispatchQueue; + + private HostReportQueue reportQueue; + + private boolean enabled; + + // BookingQueue bookingQueue + private static final Gauge bookingWaitingTotal = Gauge.build().name("cue_booking_waiting_total") + .help("Booking Queue number of waiting tasks").labelNames("env", "cuebot_hosts") + .register(); + private static final Gauge bookingRemainingCapacityTotal = Gauge.build() + .name("cue_booking_remaining_capacity_total").help("Booking Queue remaining capacity") + .labelNames("env", "cuebot_hosts").register(); + private static final Gauge bookingThreadsTotal = Gauge.build().name("cue_booking_threads_total") + .help("Booking Queue number of active threads").labelNames("env", "cuebot_hosts") + .register(); + private static final Gauge bookingExecutedTotal = Gauge.build() + .name("cue_booking_executed_total").help("Booking Queue number of executed tasks") + .labelNames("env", "cuebot_hosts").register(); + private static final Gauge bookingRejectedTotal = Gauge.build() + .name("cue_booking_rejected_total").help("Booking Queue number of rejected tasks") + .labelNames("env", "cuebot_hosts").register(); + + // DispatchQueue manageQueue + private static final Gauge manageWaitingTotal = Gauge.build().name("cue_manage_waiting_total") + .help("Manage Queue number of waiting tasks").labelNames("env", "cuebot_hosts") + .register(); + private static final Gauge manageRemainingCapacityTotal = Gauge.build() + .name("cue_manage_remaining_capacity_total").help("Manage Queue remaining capacity") + .labelNames("env", "cuebot_hosts").register(); + private static final Gauge manageThreadsTotal = Gauge.build().name("cue_manage_threads_total") + .help("Manage Queue number of active threads").labelNames("env", "cuebot_hosts") + .register(); + private static final Gauge manageExecutedTotal = Gauge.build().name("cue_manage_executed_total") + .help("Manage Queue number of executed tasks").labelNames("env", "cuebot_hosts") + .register(); + private static final Gauge manageRejectedTotal = Gauge.build().name("cue_manage_rejected_total") + .help("Manage Queue number of rejected tasks").labelNames("env", "cuebot_hosts") + .register(); + + // DispatchQueue dispatchQueue + private static final Gauge dispatchWaitingTotal = Gauge.build() + .name("cue_dispatch_waiting_total").help("Dispatch Queue number of waiting tasks") + .labelNames("env", "cuebot_hosts").register(); + private static final Gauge dispatchRemainingCapacityTotal = Gauge.build() + .name("cue_dispatch_remaining_capacity_total").help("Dispatch Queue remaining capacity") + .labelNames("env", "cuebot_hosts").register(); + private static final Gauge dispatchThreadsTotal = Gauge.build() + .name("cue_dispatch_threads_total").help("Dispatch Queue number of active threads") + .labelNames("env", "cuebot_hosts").register(); + private static final Gauge dispatchExecutedTotal = Gauge.build() + .name("cue_dispatch_executed_total").help("Dispatch Queue number of executed tasks") + .labelNames("env", "cuebot_hosts").register(); + private static final Gauge dispatchRejectedTotal = Gauge.build() + .name("cue_dispatch_rejected_total").help("Dispatch Queue number of rejected tasks") + .labelNames("env", "cuebot_hosts").register(); + + // HostReportQueue reportQueue + private static final Gauge reportQueueWaitingTotal = Gauge.build() + .name("cue_report_waiting_total").help("Report Queue number of waiting tasks") + .labelNames("env", "cuebot_hosts").register(); + private static final Gauge reportQueueRemainingCapacityTotal = Gauge.build() + .name("cue_report_remaining_capacity_total").help("Report Queue remaining capacity") + .labelNames("env", "cuebot_hosts").register(); + private static final Gauge reportQueueThreadsTotal = Gauge.build() + .name("cue_report_threads_total").help("Report Queue number of active threads") + .labelNames("env", "cuebot_hosts").register(); + private static final Gauge reportQueueExecutedTotal = Gauge.build() + .name("cue_report_executed_total").help("Report Queue number of executed tasks") + .labelNames("env", "cuebot_hosts").register(); + private static final Gauge reportQueueRejectedTotal = Gauge.build() + .name("cue_report_rejected_total").help("Report Queue number of rejected tasks") + .labelNames("env", "cuebot_hosts").register(); + + private static final Counter findJobsByShowQueryCountMetric = + Counter.build().name("cue_find_jobs_by_show_count") + .help("Count the occurrences of the query FIND_JOBS_BY_SHOW.") + .labelNames("env", "cuebot_hosts").register(); + private static final Gauge bookingDurationMillisMetric = + Gauge.build().name("cue_booking_durations_in_millis") + .help("Register duration of booking steps in milliseconds.") + .labelNames("env", "cuebot_host", "stage_desc").register(); + private static final Histogram bookingDurationMillisHistogramMetric = + Histogram.build().name("cue_booking_durations_histogram_in_millis") + .help("Register a summary of duration of booking steps in milliseconds.") + .labelNames("env", "cuebot_host", "stage_desc").register(); + + private static final Counter frameKilledCounter = Counter.build() + .name("cue_frame_killed_counter").help("Number of frames kill requests processed") + .labelNames("env", "cuebot_host", "render_node", "cause").register(); + + private static final Counter frameKillFailureCounter = Counter.build() + .name("cue_frame_kill_failure_counter") + .help("Number of frames that failed to be killed after FRAME_KILL_RETRY_LIMIT tries") + .labelNames("env", "cuebot_host", "render_node", "job_name", "frame_name", "frame_id") + .register(); + + private String deployment_environment; + private String cuebot_host; + + @Autowired + public PrometheusMetricsCollector(Environment env) { + if (env == null) { + throw new SpcueRuntimeException("Env not defined"); + } + this.enabled = env.getProperty("metrics.prometheus.collector", Boolean.class, false); + String envKey = env.getProperty("metrics.prometheus.environment_id.environment_variable", + String.class, "DEPLOYMENT_ENVIRONMENT"); + + this.cuebot_host = getHostNameFromEnv(); + // Get environment id from environment variable + this.deployment_environment = System.getenv(envKey); + if (this.deployment_environment == null) { + this.deployment_environment = "undefined"; + } } - this.enabled = env.getProperty("metrics.prometheus.collector", Boolean.class, false); - String envKey = env.getProperty("metrics.prometheus.environment_id.environment_variable", - String.class, "DEPLOYMENT_ENVIRONMENT"); - - this.cuebot_host = getHostNameFromEnv(); - // Get environment id from environment variable - this.deployment_environment = System.getenv(envKey); - if (this.deployment_environment == null) { - this.deployment_environment = "undefined"; + + /** + * Get hostname from environment variable + * + * Uses the following fallback order: + * + * - NODE_HOSTNAME -> HOSTNAME -> HOST -> "undefined" + * + * @return + */ + private String getHostNameFromEnv() { + String hostname = System.getenv("NODE_HOSTNAME"); + if (hostname != null) { + return hostname; + } + + hostname = System.getenv("HOSTNAME"); + if (hostname != null) { + return hostname; + } + + hostname = System.getenv("HOST"); + if (hostname != null) { + return hostname; + } + + return "undefined"; + } + + /** + * Collect metrics from queues + */ + public void collectPrometheusMetrics() { + if (this.enabled) { + // BookingQueue bookingQueue + bookingWaitingTotal.labels(this.deployment_environment, this.cuebot_host) + .set(bookingQueue.getSize()); + bookingRemainingCapacityTotal.labels(this.deployment_environment, this.cuebot_host) + .set(bookingQueue.getRemainingCapacity()); + bookingThreadsTotal.labels(this.deployment_environment, this.cuebot_host) + .set(bookingQueue.getActiveCount()); + bookingExecutedTotal.labels(this.deployment_environment, this.cuebot_host) + .set(bookingQueue.getCompletedTaskCount()); + bookingRejectedTotal.labels(this.deployment_environment, this.cuebot_host) + .set(bookingQueue.getRejectedTaskCount()); + + // DispatchQueue manageQueue + manageWaitingTotal.labels(this.deployment_environment, this.cuebot_host) + .set(manageQueue.getSize()); + manageRemainingCapacityTotal.labels(this.deployment_environment, this.cuebot_host) + .set(manageQueue.getRemainingCapacity()); + manageThreadsTotal.labels(this.deployment_environment, this.cuebot_host) + .set(manageQueue.getActiveCount()); + manageExecutedTotal.labels(this.deployment_environment, this.cuebot_host) + .set(manageQueue.getCompletedTaskCount()); + manageRejectedTotal.labels(this.deployment_environment, this.cuebot_host) + .set(manageQueue.getRejectedTaskCount()); + + // DispatchQueue dispatchQueue + dispatchWaitingTotal.labels(this.deployment_environment, this.cuebot_host) + .set(dispatchQueue.getSize()); + dispatchRemainingCapacityTotal.labels(this.deployment_environment, this.cuebot_host) + .set(dispatchQueue.getRemainingCapacity()); + dispatchThreadsTotal.labels(this.deployment_environment, this.cuebot_host) + .set(dispatchQueue.getActiveCount()); + dispatchExecutedTotal.labels(this.deployment_environment, this.cuebot_host) + .set(dispatchQueue.getCompletedTaskCount()); + dispatchRejectedTotal.labels(this.deployment_environment, this.cuebot_host) + .set(dispatchQueue.getRejectedTaskCount()); + + // HostReportQueue reportQueue + reportQueueWaitingTotal.labels(this.deployment_environment, this.cuebot_host) + .set(reportQueue.getQueue().size()); + reportQueueRemainingCapacityTotal.labels(this.deployment_environment, this.cuebot_host) + .set(reportQueue.getQueue().remainingCapacity()); + reportQueueThreadsTotal.labels(this.deployment_environment, this.cuebot_host) + .set(reportQueue.getActiveCount()); + reportQueueExecutedTotal.labels(this.deployment_environment, this.cuebot_host) + .set(reportQueue.getTaskCount()); + reportQueueRejectedTotal.labels(this.deployment_environment, this.cuebot_host) + .set(reportQueue.getRejectedTaskCount()); + } + } + + /** + * Set a new value to the cue_booking_durations_in_millis metric + * + * @param stage_desc booking stage description to be used as a tag + * @param value value to set + */ + public void setBookingDurationMetric(String stage_desc, double value) { + bookingDurationMillisMetric + .labels(this.deployment_environment, this.cuebot_host, stage_desc).set(value); + bookingDurationMillisHistogramMetric + .labels(this.deployment_environment, this.cuebot_host, stage_desc).observe(value); + } + + /** + * Increment cue_find_jobs_by_show_count metric + */ + public void incrementFindJobsByShowQueryCountMetric() { + findJobsByShowQueryCountMetric.labels(this.deployment_environment, this.cuebot_host).inc(); + } + + /** + * Increment cue_frame_killed_counter metric + * + * @param renderNode hostname of the render node receiving the kill request + * @param killCause cause assigned to the request + */ + public void incrementFrameKilledCounter(String renderNode, + HostReportHandler.KillCause killCause) { + frameKilledCounter + .labels(this.deployment_environment, this.cuebot_host, renderNode, killCause.name()) + .inc(); } - } - - /** - * Get hostname from environment variable - * - * Uses the following fallback order: - * - * - NODE_HOSTNAME -> HOSTNAME -> HOST -> "undefined" - * - * @return - */ - private String getHostNameFromEnv() { - String hostname = System.getenv("NODE_HOSTNAME"); - if (hostname != null) { - return hostname; + + /** + * Increment cue_frame_kill_failure_counter metric + * + * @param hostname + * @param jobName + * @param frameName + * @param frameId + */ + public void incrementFrameKillFailureCounter(String hostname, String jobName, String frameName, + String frameId) { + frameKillFailureCounter.labels(this.deployment_environment, this.cuebot_host, hostname, + jobName, frameName, frameId).inc(); + } + + // Setters used for dependency injection + public void setBookingQueue(BookingQueue bookingQueue) { + this.bookingQueue = bookingQueue; } - hostname = System.getenv("HOSTNAME"); - if (hostname != null) { - return hostname; + public void setManageQueue(DispatchQueue manageQueue) { + this.manageQueue = manageQueue; } - hostname = System.getenv("HOST"); - if (hostname != null) { - return hostname; + public void setDispatchQueue(DispatchQueue dispatchQueue) { + this.dispatchQueue = dispatchQueue; } - return "undefined"; - } - - /** - * Collect metrics from queues - */ - public void collectPrometheusMetrics() { - if (this.enabled) { - // BookingQueue bookingQueue - bookingWaitingTotal.labels(this.deployment_environment, this.cuebot_host) - .set(bookingQueue.getSize()); - bookingRemainingCapacityTotal.labels(this.deployment_environment, this.cuebot_host) - .set(bookingQueue.getRemainingCapacity()); - bookingThreadsTotal.labels(this.deployment_environment, this.cuebot_host) - .set(bookingQueue.getActiveCount()); - bookingExecutedTotal.labels(this.deployment_environment, this.cuebot_host) - .set(bookingQueue.getCompletedTaskCount()); - bookingRejectedTotal.labels(this.deployment_environment, this.cuebot_host) - .set(bookingQueue.getRejectedTaskCount()); - - // DispatchQueue manageQueue - manageWaitingTotal.labels(this.deployment_environment, this.cuebot_host) - .set(manageQueue.getSize()); - manageRemainingCapacityTotal.labels(this.deployment_environment, this.cuebot_host) - .set(manageQueue.getRemainingCapacity()); - manageThreadsTotal.labels(this.deployment_environment, this.cuebot_host) - .set(manageQueue.getActiveCount()); - manageExecutedTotal.labels(this.deployment_environment, this.cuebot_host) - .set(manageQueue.getCompletedTaskCount()); - manageRejectedTotal.labels(this.deployment_environment, this.cuebot_host) - .set(manageQueue.getRejectedTaskCount()); - - // DispatchQueue dispatchQueue - dispatchWaitingTotal.labels(this.deployment_environment, this.cuebot_host) - .set(dispatchQueue.getSize()); - dispatchRemainingCapacityTotal.labels(this.deployment_environment, this.cuebot_host) - .set(dispatchQueue.getRemainingCapacity()); - dispatchThreadsTotal.labels(this.deployment_environment, this.cuebot_host) - .set(dispatchQueue.getActiveCount()); - dispatchExecutedTotal.labels(this.deployment_environment, this.cuebot_host) - .set(dispatchQueue.getCompletedTaskCount()); - dispatchRejectedTotal.labels(this.deployment_environment, this.cuebot_host) - .set(dispatchQueue.getRejectedTaskCount()); - - // HostReportQueue reportQueue - reportQueueWaitingTotal.labels(this.deployment_environment, this.cuebot_host) - .set(reportQueue.getQueue().size()); - reportQueueRemainingCapacityTotal.labels(this.deployment_environment, this.cuebot_host) - .set(reportQueue.getQueue().remainingCapacity()); - reportQueueThreadsTotal.labels(this.deployment_environment, this.cuebot_host) - .set(reportQueue.getActiveCount()); - reportQueueExecutedTotal.labels(this.deployment_environment, this.cuebot_host) - .set(reportQueue.getTaskCount()); - reportQueueRejectedTotal.labels(this.deployment_environment, this.cuebot_host) - .set(reportQueue.getRejectedTaskCount()); + public void setReportQueue(HostReportQueue reportQueue) { + this.reportQueue = reportQueue; } - } - - /** - * Set a new value to the cue_booking_durations_in_millis metric - * - * @param stage_desc booking stage description to be used as a tag - * @param value value to set - */ - public void setBookingDurationMetric(String stage_desc, double value) { - bookingDurationMillisMetric.labels(this.deployment_environment, this.cuebot_host, stage_desc) - .set(value); - bookingDurationMillisHistogramMetric - .labels(this.deployment_environment, this.cuebot_host, stage_desc).observe(value); - } - - /** - * Increment cue_find_jobs_by_show_count metric - */ - public void incrementFindJobsByShowQueryCountMetric() { - findJobsByShowQueryCountMetric.labels(this.deployment_environment, this.cuebot_host).inc(); - } - - /** - * Increment cue_frame_killed_counter metric - * - * @param renderNode hostname of the render node receiving the kill request - * @param killCause cause assigned to the request - */ - public void incrementFrameKilledCounter(String renderNode, - HostReportHandler.KillCause killCause) { - frameKilledCounter - .labels(this.deployment_environment, this.cuebot_host, renderNode, killCause.name()).inc(); - } - - /** - * Increment cue_frame_kill_failure_counter metric - * - * @param hostname - * @param jobName - * @param frameName - * @param frameId - */ - public void incrementFrameKillFailureCounter(String hostname, String jobName, String frameName, - String frameId) { - frameKillFailureCounter.labels(this.deployment_environment, this.cuebot_host, hostname, jobName, - frameName, frameId).inc(); - } - - // Setters used for dependency injection - public void setBookingQueue(BookingQueue bookingQueue) { - this.bookingQueue = bookingQueue; - } - - public void setManageQueue(DispatchQueue manageQueue) { - this.manageQueue = manageQueue; - } - - public void setDispatchQueue(DispatchQueue dispatchQueue) { - this.dispatchQueue = dispatchQueue; - } - - public void setReportQueue(HostReportQueue reportQueue) { - this.reportQueue = reportQueue; - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/Redirect.java b/cuebot/src/main/java/com/imageworks/spcue/Redirect.java index 2ece95a58..bc776849b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/Redirect.java +++ b/cuebot/src/main/java/com/imageworks/spcue/Redirect.java @@ -26,81 +26,81 @@ */ public class Redirect implements Serializable { - private static final long serialVersionUID = -6461503320817105280L; - - /** - * Track requests to redirect multiple procs together by assigning a group id. - */ - private final String groupId; - private final RedirectType type; - private final String destinationId; - private final String name; - private final long creationTime; - - public static final long EXPIRE_TIME = TimeUnit.MILLISECONDS.convert(24, TimeUnit.HOURS); - - public Redirect(String groupId, RedirectType type, String destinationId, String name, - long creationTime) { - this.groupId = groupId; - this.type = type; - this.destinationId = destinationId; - this.name = name; - this.creationTime = creationTime; - } - - public Redirect(RedirectType type, String destinationId, String name) { - this.groupId = SqlUtil.genKeyRandom(); - this.type = type; - this.destinationId = destinationId; - this.name = name; - this.creationTime = System.currentTimeMillis(); - } - - public Redirect(String groupId, JobInterface job) { - this.groupId = groupId; - this.type = RedirectType.JOB_REDIRECT; - this.destinationId = job.getJobId(); - this.name = job.getName(); - this.creationTime = System.currentTimeMillis(); - } - - public Redirect(JobInterface job) { - this.groupId = SqlUtil.genKeyRandom(); - this.type = RedirectType.JOB_REDIRECT; - this.destinationId = job.getJobId(); - this.name = job.getName(); - this.creationTime = System.currentTimeMillis(); - } - - public Redirect(GroupInterface group) { - this.groupId = SqlUtil.genKeyRandom(); - this.type = RedirectType.GROUP_REDIRECT; - this.destinationId = group.getGroupId(); - this.name = group.getName(); - this.creationTime = System.currentTimeMillis(); - } - - public String getGroupId() { - return groupId; - } - - public RedirectType getType() { - return type; - } - - public String getDestinationId() { - return destinationId; - } - - public String getDestinationName() { - return name; - } - - public boolean isExpired() { - return System.currentTimeMillis() - creationTime >= EXPIRE_TIME; - } - - public long getCreationTime() { - return creationTime; - } + private static final long serialVersionUID = -6461503320817105280L; + + /** + * Track requests to redirect multiple procs together by assigning a group id. + */ + private final String groupId; + private final RedirectType type; + private final String destinationId; + private final String name; + private final long creationTime; + + public static final long EXPIRE_TIME = TimeUnit.MILLISECONDS.convert(24, TimeUnit.HOURS); + + public Redirect(String groupId, RedirectType type, String destinationId, String name, + long creationTime) { + this.groupId = groupId; + this.type = type; + this.destinationId = destinationId; + this.name = name; + this.creationTime = creationTime; + } + + public Redirect(RedirectType type, String destinationId, String name) { + this.groupId = SqlUtil.genKeyRandom(); + this.type = type; + this.destinationId = destinationId; + this.name = name; + this.creationTime = System.currentTimeMillis(); + } + + public Redirect(String groupId, JobInterface job) { + this.groupId = groupId; + this.type = RedirectType.JOB_REDIRECT; + this.destinationId = job.getJobId(); + this.name = job.getName(); + this.creationTime = System.currentTimeMillis(); + } + + public Redirect(JobInterface job) { + this.groupId = SqlUtil.genKeyRandom(); + this.type = RedirectType.JOB_REDIRECT; + this.destinationId = job.getJobId(); + this.name = job.getName(); + this.creationTime = System.currentTimeMillis(); + } + + public Redirect(GroupInterface group) { + this.groupId = SqlUtil.genKeyRandom(); + this.type = RedirectType.GROUP_REDIRECT; + this.destinationId = group.getGroupId(); + this.name = group.getName(); + this.creationTime = System.currentTimeMillis(); + } + + public String getGroupId() { + return groupId; + } + + public RedirectType getType() { + return type; + } + + public String getDestinationId() { + return destinationId; + } + + public String getDestinationName() { + return name; + } + + public boolean isExpired() { + return System.currentTimeMillis() - creationTime >= EXPIRE_TIME; + } + + public long getCreationTime() { + return creationTime; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/ResourceUsage.java b/cuebot/src/main/java/com/imageworks/spcue/ResourceUsage.java index 26ba5c65b..278a6417f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ResourceUsage.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ResourceUsage.java @@ -20,37 +20,37 @@ */ public class ResourceUsage { - private final long coreTimeSeconds; - private final long gpuTimeSeconds; - private final long clockTimeSeconds; + private final long coreTimeSeconds; + private final long gpuTimeSeconds; + private final long clockTimeSeconds; - public ResourceUsage(long clockTime, int corePoints, int gpuPoints) { + public ResourceUsage(long clockTime, int corePoints, int gpuPoints) { - if (clockTime < 1) { - clockTime = 1; - } + if (clockTime < 1) { + clockTime = 1; + } - long coreTime = (long) (clockTime * (corePoints / 100f)); - if (coreTime < 1) { - coreTime = 1; - } + long coreTime = (long) (clockTime * (corePoints / 100f)); + if (coreTime < 1) { + coreTime = 1; + } - long gpuTime = clockTime * gpuPoints; + long gpuTime = clockTime * gpuPoints; - clockTimeSeconds = clockTime; - coreTimeSeconds = coreTime; - gpuTimeSeconds = gpuTime; - } + clockTimeSeconds = clockTime; + coreTimeSeconds = coreTime; + gpuTimeSeconds = gpuTime; + } - public long getCoreTimeSeconds() { - return coreTimeSeconds; - } + public long getCoreTimeSeconds() { + return coreTimeSeconds; + } - public long getGpuTimeSeconds() { - return gpuTimeSeconds; - } + public long getGpuTimeSeconds() { + return gpuTimeSeconds; + } - public long getClockTimeSeconds() { - return clockTimeSeconds; - } + public long getClockTimeSeconds() { + return clockTimeSeconds; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/ServiceEntity.java b/cuebot/src/main/java/com/imageworks/spcue/ServiceEntity.java index 1dfea70e3..74570c605 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ServiceEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ServiceEntity.java @@ -20,50 +20,50 @@ import com.imageworks.spcue.dispatcher.Dispatcher; public class ServiceEntity extends Entity { - /** - * Determines if the service is threadable or not. - */ - public boolean threadable = false; - - /** - * Determines the default minimum cores per frame. - */ - public int minCores = Dispatcher.CORE_POINTS_RESERVED_DEFAULT; - - /** - * Determines the default minimum cores per frame. 0 indicates the feature is disabled. - */ - public int maxCores = 0; - - /** - * Determines the default minimum gpus per frame. - */ - public int minGpus = 0; - - /** - * Determines the default minimum gpus per frame. 0 indicates the feature is disabled. - */ - public int maxGpus = 0; - - /** - * Determines the default minimum memory per frame. - */ - public long minMemory = Dispatcher.MEM_SERVICE_RESERVED_DEFAULT; - - /** - * Determines the default minimum gpu per frame. - */ - public long minGpuMemory = Dispatcher.MEM_SERVICE_GPU_RESERVED_DEFAULT; - - /** - * Determines the default tags. - */ - public LinkedHashSet tags = new LinkedHashSet(); - - public int timeout = 0; - - public int timeout_llu = 0; - - public long minMemoryIncrease = Dispatcher.MINIMUM_MEMORY_INCREASE; + /** + * Determines if the service is threadable or not. + */ + public boolean threadable = false; + + /** + * Determines the default minimum cores per frame. + */ + public int minCores = Dispatcher.CORE_POINTS_RESERVED_DEFAULT; + + /** + * Determines the default minimum cores per frame. 0 indicates the feature is disabled. + */ + public int maxCores = 0; + + /** + * Determines the default minimum gpus per frame. + */ + public int minGpus = 0; + + /** + * Determines the default minimum gpus per frame. 0 indicates the feature is disabled. + */ + public int maxGpus = 0; + + /** + * Determines the default minimum memory per frame. + */ + public long minMemory = Dispatcher.MEM_SERVICE_RESERVED_DEFAULT; + + /** + * Determines the default minimum gpu per frame. + */ + public long minGpuMemory = Dispatcher.MEM_SERVICE_GPU_RESERVED_DEFAULT; + + /** + * Determines the default tags. + */ + public LinkedHashSet tags = new LinkedHashSet(); + + public int timeout = 0; + + public int timeout_llu = 0; + + public long minMemoryIncrease = Dispatcher.MINIMUM_MEMORY_INCREASE; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/ServiceOverrideEntity.java b/cuebot/src/main/java/com/imageworks/spcue/ServiceOverrideEntity.java index 2e7cca2b9..8cb6e8638 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ServiceOverrideEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ServiceOverrideEntity.java @@ -17,9 +17,9 @@ public class ServiceOverrideEntity extends ServiceEntity { - /** - * The show that wants to override the service. - */ - public String showId; + /** + * The show that wants to override the service. + */ + public String showId; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/ShowEntity.java b/cuebot/src/main/java/com/imageworks/spcue/ShowEntity.java index a7831deaf..9bcdc8c79 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ShowEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ShowEntity.java @@ -17,15 +17,15 @@ public class ShowEntity extends Entity implements ShowInterface { - public boolean active; - public boolean paused; - public int defaultMinCores; - public int defaultMaxCores; - public int defaultMinGpus; - public int defaultMaxGpus; - public String[] commentMail; + public boolean active; + public boolean paused; + public int defaultMinCores; + public int defaultMaxCores; + public int defaultMinGpus; + public int defaultMaxGpus; + public String[] commentMail; - public String getShowId() { - return id; - } + public String getShowId() { + return id; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/ShowInterface.java b/cuebot/src/main/java/com/imageworks/spcue/ShowInterface.java index a43aaf6af..ac21ec291 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ShowInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ShowInterface.java @@ -17,5 +17,5 @@ public interface ShowInterface extends EntityInterface { - public String getShowId(); + public String getShowId(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/SortableShow.java b/cuebot/src/main/java/com/imageworks/spcue/SortableShow.java index 9c8de3c55..811c4646e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/SortableShow.java +++ b/cuebot/src/main/java/com/imageworks/spcue/SortableShow.java @@ -25,89 +25,89 @@ public class SortableShow implements Comparable { - private static final Logger logger = LogManager.getLogger(SortableShow.class); - - private String show; - private float tier; - - private Map failed = new ConcurrentHashMap(); - private Set failedAllocs = new HashSet(); - - public SortableShow(String show, float value) { - this.show = show; - this.tier = value; - } - - public String getShowId() { - return show; - } - - public float getValue() { - return tier; - } - - public boolean isSkipped(String tags, long cores, long memory) { - try { - if (failed.containsKey(tags)) { - long[] mark = failed.get(tags); - if (cores < mark[0]) { - logger.info("skipped due to not enough cores " + cores + " < " + mark[0]); - return true; - } else if (memory < mark[1]) { - logger.info("skipped due to not enough memory " + memory + " < " + mark[1]); - return true; + private static final Logger logger = LogManager.getLogger(SortableShow.class); + + private String show; + private float tier; + + private Map failed = new ConcurrentHashMap(); + private Set failedAllocs = new HashSet(); + + public SortableShow(String show, float value) { + this.show = show; + this.tier = value; + } + + public String getShowId() { + return show; + } + + public float getValue() { + return tier; + } + + public boolean isSkipped(String tags, long cores, long memory) { + try { + if (failed.containsKey(tags)) { + long[] mark = failed.get(tags); + if (cores < mark[0]) { + logger.info("skipped due to not enough cores " + cores + " < " + mark[0]); + return true; + } else if (memory < mark[1]) { + logger.info("skipped due to not enough memory " + memory + " < " + mark[1]); + return true; + } + } + return false; + } catch (Exception e) { + logger.info("exception checking skipped: " + e); + return false; } - } - return false; - } catch (Exception e) { - logger.info("exception checking skipped: " + e); - return false; } - } - public boolean isSkipped(AllocationInterface a) { - if (failedAllocs.contains(a)) { - return true; + public boolean isSkipped(AllocationInterface a) { + if (failedAllocs.contains(a)) { + return true; + } + return false; } - return false; - } - public void skip(String tags, long cores, long memory) { - if (tags != null) { - failed.put(tags, new long[] {cores, memory}); + public void skip(String tags, long cores, long memory) { + if (tags != null) { + failed.put(tags, new long[] {cores, memory}); + } } - } - - /** - * Adds an allocation that should not be booked on this show. - * - * @param Allocation - */ - public void skip(AllocationInterface a) { - synchronized (failedAllocs) { - failedAllocs.add(a); + + /** + * Adds an allocation that should not be booked on this show. + * + * @param Allocation + */ + public void skip(AllocationInterface a) { + synchronized (failedAllocs) { + failedAllocs.add(a); + } } - } - - @Override - public int compareTo(SortableShow o) { - return (int) ((this.tier * 100) - (o.getValue() * 100)); - } - - @Override - public int hashCode() { - return show.hashCode(); - }; - - @Override - public boolean equals(Object other) { - if (other == null) { - return false; + + @Override + public int compareTo(SortableShow o) { + return (int) ((this.tier * 100) - (o.getValue() * 100)); } - if (this.getClass() != other.getClass()) { - return false; + + @Override + public int hashCode() { + return show.hashCode(); + }; + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (this.getClass() != other.getClass()) { + return false; + } + SortableShow that = (SortableShow) other; + return that.getShowId().equals(this.getShowId()); } - SortableShow that = (SortableShow) other; - return that.getShowId().equals(this.getShowId()); - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/Source.java b/cuebot/src/main/java/com/imageworks/spcue/Source.java index e1188f219..c3408ce58 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/Source.java +++ b/cuebot/src/main/java/com/imageworks/spcue/Source.java @@ -20,32 +20,32 @@ */ public class Source { - public String source = "unknown"; - public String username = ""; - public String pid = ""; - public String host_kill = ""; - public String reason = ""; - - public Source() {} - - public Source(String source) { - this.source = source; - } - - public Source(String source, String username, String pid, String host_kill, String reason) { - this.source = source; - this.username = username; - this.pid = pid; - this.host_kill = host_kill; - this.reason = reason; - } - - public String getReason() { - return this.reason; - } - - public String toString() { - return "User: " + this.username + ", Pid: " + this.pid + ", Hostname: " + this.host_kill - + ", Reason: " + this.reason + "\n" + this.source; - } + public String source = "unknown"; + public String username = ""; + public String pid = ""; + public String host_kill = ""; + public String reason = ""; + + public Source() {} + + public Source(String source) { + this.source = source; + } + + public Source(String source, String username, String pid, String host_kill, String reason) { + this.source = source; + this.username = username; + this.pid = pid; + this.host_kill = host_kill; + this.reason = reason; + } + + public String getReason() { + return this.reason; + } + + public String toString() { + return "User: " + this.username + ", Pid: " + this.pid + ", Hostname: " + this.host_kill + + ", Reason: " + this.reason + "\n" + this.source; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/SpcueRuntimeException.java b/cuebot/src/main/java/com/imageworks/spcue/SpcueRuntimeException.java index c3a6be1d6..ea13561b0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/SpcueRuntimeException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/SpcueRuntimeException.java @@ -18,24 +18,24 @@ @SuppressWarnings("serial") public class SpcueRuntimeException extends RuntimeException { - public SpcueRuntimeException() { - super(); - // TODO Auto-generated constructor stub - } - - public SpcueRuntimeException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } - - public SpcueRuntimeException(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public SpcueRuntimeException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public SpcueRuntimeException() { + super(); + // TODO Auto-generated constructor stub + } + + public SpcueRuntimeException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + + public SpcueRuntimeException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public SpcueRuntimeException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/SpecBuilderException.java b/cuebot/src/main/java/com/imageworks/spcue/SpecBuilderException.java index 2fd234501..5f6c3a31d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/SpecBuilderException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/SpecBuilderException.java @@ -20,14 +20,14 @@ @SuppressWarnings("serial") public class SpecBuilderException extends NestedRuntimeException { - public SpecBuilderException(String arg0) { - super(arg0); - // TODO Auto-generated constructor stub - } + public SpecBuilderException(String arg0) { + super(arg0); + // TODO Auto-generated constructor stub + } - public SpecBuilderException(String arg0, Throwable arg1) { - super(arg0, arg1); - // TODO Auto-generated constructor stub - } + public SpecBuilderException(String arg0, Throwable arg1) { + super(arg0, arg1); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/StrandedCores.java b/cuebot/src/main/java/com/imageworks/spcue/StrandedCores.java index 3d2e819a6..74fbccd30 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/StrandedCores.java +++ b/cuebot/src/main/java/com/imageworks/spcue/StrandedCores.java @@ -17,23 +17,23 @@ public final class StrandedCores { - /** - * The maximum time this object should be valid. - */ - private static final long MAX_AGE_MILLIS = 5000l; + /** + * The maximum time this object should be valid. + */ + private static final long MAX_AGE_MILLIS = 5000l; - private final int cores; - private final long expireTime = System.currentTimeMillis() + MAX_AGE_MILLIS; + private final int cores; + private final long expireTime = System.currentTimeMillis() + MAX_AGE_MILLIS; - public StrandedCores(int cores) { - this.cores = cores; - } + public StrandedCores(int cores) { + this.cores = cores; + } - public int getCores() { - return this.cores; - } + public int getCores() { + return this.cores; + } - public boolean isExpired() { - return System.currentTimeMillis() > expireTime; - } + public boolean isExpired() { + return System.currentTimeMillis() > expireTime; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/SubscriptionEntity.java b/cuebot/src/main/java/com/imageworks/spcue/SubscriptionEntity.java index f3b8d33ea..459c2f842 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/SubscriptionEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/SubscriptionEntity.java @@ -17,27 +17,27 @@ public class SubscriptionEntity extends Entity implements SubscriptionInterface { - public String showId; - public String allocationId; - public String facilityId; + public String showId; + public String allocationId; + public String facilityId; - public int size; - public int burst; + public int size; + public int burst; - public String getShowId() { - return showId; - } + public String getShowId() { + return showId; + } - public String getAllocationId() { - return allocationId; - } + public String getAllocationId() { + return allocationId; + } - public String getSubscriptionId() { - return id; - } + public String getSubscriptionId() { + return id; + } - @Override - public String getFacilityId() { - return facilityId; - } + @Override + public String getFacilityId() { + return facilityId; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/SubscriptionInterface.java b/cuebot/src/main/java/com/imageworks/spcue/SubscriptionInterface.java index 8b0bf2604..8dff57320 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/SubscriptionInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/SubscriptionInterface.java @@ -16,5 +16,5 @@ package com.imageworks.spcue; public interface SubscriptionInterface extends ShowInterface, AllocationInterface { - public String getSubscriptionId(); + public String getSubscriptionId(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/TaskEntity.java b/cuebot/src/main/java/com/imageworks/spcue/TaskEntity.java index 0ae853737..4252abc0d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/TaskEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/TaskEntity.java @@ -16,44 +16,44 @@ public class TaskEntity extends Entity implements TaskInterface { - public int minCoreUnits = 100; - public boolean isDefaultTask = false; - - public String shot; - public String showId; - public String deptId; - public String pointId; - - public TaskEntity() {} - - public TaskEntity(PointInterface c, String shot, int minCoreUnits) { - this.pointId = c.getPointId(); - this.shot = shot; - this.minCoreUnits = minCoreUnits; - } - - public TaskEntity(PointInterface c, String shot) { - this.pointId = c.getPointId(); - this.shot = shot; - } - - @Override - public String getDepartmentId() { - return deptId; - } - - @Override - public String getShowId() { - return showId; - } - - @Override - public String getTaskId() { - return id; - } - - @Override - public String getPointId() { - return pointId; - } + public int minCoreUnits = 100; + public boolean isDefaultTask = false; + + public String shot; + public String showId; + public String deptId; + public String pointId; + + public TaskEntity() {} + + public TaskEntity(PointInterface c, String shot, int minCoreUnits) { + this.pointId = c.getPointId(); + this.shot = shot; + this.minCoreUnits = minCoreUnits; + } + + public TaskEntity(PointInterface c, String shot) { + this.pointId = c.getPointId(); + this.shot = shot; + } + + @Override + public String getDepartmentId() { + return deptId; + } + + @Override + public String getShowId() { + return showId; + } + + @Override + public String getTaskId() { + return id; + } + + @Override + public String getPointId() { + return pointId; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/TaskInterface.java b/cuebot/src/main/java/com/imageworks/spcue/TaskInterface.java index fc13f452f..b01ae7271 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/TaskInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/TaskInterface.java @@ -16,10 +16,10 @@ package com.imageworks.spcue; public interface TaskInterface extends EntityInterface, PointInterface { - /** - * Returns the unique Id of the task - * - * @return - */ - public String getTaskId(); + /** + * Returns the unique Id of the task + * + * @return + */ + public String getTaskId(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/ThreadStats.java b/cuebot/src/main/java/com/imageworks/spcue/ThreadStats.java index fd870aa75..0b952c589 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ThreadStats.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ThreadStats.java @@ -17,23 +17,23 @@ public class ThreadStats { - private int threads; - private int avgFrameTime; + private int threads; + private int avgFrameTime; - public int getThreads() { - return threads; - } + public int getThreads() { + return threads; + } - public void setThreads(int threads) { - this.threads = threads; - } + public void setThreads(int threads) { + this.threads = threads; + } - public int getAvgFrameTime() { - return avgFrameTime; - } + public int getAvgFrameTime() { + return avgFrameTime; + } - public void setAvgFrameTime(int avgFrameTime) { - this.avgFrameTime = avgFrameTime; - } + public void setAvgFrameTime(int avgFrameTime) { + this.avgFrameTime = avgFrameTime; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/TrackitTaskDetail.java b/cuebot/src/main/java/com/imageworks/spcue/TrackitTaskDetail.java index cc664a322..d896b2303 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/TrackitTaskDetail.java +++ b/cuebot/src/main/java/com/imageworks/spcue/TrackitTaskDetail.java @@ -22,14 +22,14 @@ */ public class TrackitTaskDetail { - public String show; - public String shot; - public String task; - public String status; - public Date startDate; - public Date endDate; - public String cgSup; - public int frameCount; - public int points; - public int weeks; + public String show; + public String shot; + public String task; + public String status; + public Date startDate; + public Date endDate; + public String cgSup; + public int frameCount; + public int points; + public int weeks; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/VirtualProc.java b/cuebot/src/main/java/com/imageworks/spcue/VirtualProc.java index c88b1b933..8c92ad016 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/VirtualProc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/VirtualProc.java @@ -23,252 +23,254 @@ public class VirtualProc extends FrameEntity implements ProcInterface { - private static final Logger logger = LogManager.getLogger(VirtualProc.class); - - public String hostId; - public String allocationId; - public String frameId; - public String hostName; - public String os; - public byte[] childProcesses; - - public boolean canHandleNegativeCoresRequest; - public int coresReserved; - public long memoryReserved; - public long memoryUsed; - public long memoryMax; - public long virtualMemoryUsed; - public long virtualMemoryMax; - - public int gpusReserved; - public long gpuMemoryReserved; - public long gpuMemoryUsed; - public long gpuMemoryMax; - - public boolean unbooked; - public boolean usageRecorded = false; - public boolean isLocalDispatch = false; - - public String getProcId() { - return id; - } - - public String getHostId() { - return hostId; - } - - public String getAllocationId() { - return allocationId; - } - - public String getFrameId() { - return frameId; - } - - public String getName() { - return hostName; - } - - /** - * Build and return a proc in either fast or efficient mode. - * - * Efficient mode tries to assign one core per frame, but may upgrade the number of cores based on - * memory usage. - * - * Fast mode books all the idle cores on the the host at one time. - * - * @param host - * @param frame - * @return - */ - public static final VirtualProc build(DispatchHost host, DispatchFrame frame, - String... selfishServices) { - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.getAllocationId(); - proc.hostId = host.getHostId(); - proc.frameId = null; - proc.layerId = frame.getLayerId(); - proc.jobId = frame.getJobId(); - proc.showId = frame.getShowId(); - proc.facilityId = frame.getFacilityId(); - proc.os = frame.os; - - proc.hostName = host.getName(); - proc.unbooked = false; - proc.isLocalDispatch = host.isLocalDispatch; - - proc.coresReserved = frame.minCores; - proc.memoryReserved = frame.getMinMemory(); - proc.gpusReserved = frame.minGpus; - proc.gpuMemoryReserved = frame.minGpuMemory; - - /* - * Frames that are announcing cores less than 100 are not multi-threaded so there is no reason - * for the frame to span more than a single core. + private static final Logger logger = LogManager.getLogger(VirtualProc.class); + + public String hostId; + public String allocationId; + public String frameId; + public String hostName; + public String os; + public byte[] childProcesses; + + public boolean canHandleNegativeCoresRequest; + public int coresReserved; + public long memoryReserved; + public long memoryUsed; + public long memoryMax; + public long virtualMemoryUsed; + public long virtualMemoryMax; + + public int gpusReserved; + public long gpuMemoryReserved; + public long gpuMemoryUsed; + public long gpuMemoryMax; + + public boolean unbooked; + public boolean usageRecorded = false; + public boolean isLocalDispatch = false; + + public String getProcId() { + return id; + } + + public String getHostId() { + return hostId; + } + + public String getAllocationId() { + return allocationId; + } + + public String getFrameId() { + return frameId; + } + + public String getName() { + return hostName; + } + + /** + * Build and return a proc in either fast or efficient mode. * - * If we are in "fast mode", we just book all the cores If the host is nimby, desktops are - * automatically fast mode. + * Efficient mode tries to assign one core per frame, but may upgrade the number of cores based + * on memory usage. + * + * Fast mode books all the idle cores on the the host at one time. + * + * @param host + * @param frame + * @return */ + public static final VirtualProc build(DispatchHost host, DispatchFrame frame, + String... selfishServices) { + VirtualProc proc = new VirtualProc(); + proc.allocationId = host.getAllocationId(); + proc.hostId = host.getHostId(); + proc.frameId = null; + proc.layerId = frame.getLayerId(); + proc.jobId = frame.getJobId(); + proc.showId = frame.getShowId(); + proc.facilityId = frame.getFacilityId(); + proc.os = frame.os; + + proc.hostName = host.getName(); + proc.unbooked = false; + proc.isLocalDispatch = host.isLocalDispatch; + + proc.coresReserved = frame.minCores; + proc.memoryReserved = frame.getMinMemory(); + proc.gpusReserved = frame.minGpus; + proc.gpuMemoryReserved = frame.minGpuMemory; + + /* + * Frames that are announcing cores less than 100 are not multi-threaded so there is no + * reason for the frame to span more than a single core. + * + * If we are in "fast mode", we just book all the cores If the host is nimby, desktops are + * automatically fast mode. + */ + + if (host.strandedCores > 0) { + proc.coresReserved = proc.coresReserved + host.strandedCores; + } - if (host.strandedCores > 0) { - proc.coresReserved = proc.coresReserved + host.strandedCores; - } + proc.canHandleNegativeCoresRequest = host.canHandleNegativeCoresRequest(proc.coresReserved); + + if (proc.coresReserved == 0) { + logger.debug("Reserving all cores"); + proc.coresReserved = host.cores; + } else if (proc.coresReserved < 0) { + logger.debug("Reserving all cores minus " + proc.coresReserved); + proc.coresReserved = host.cores + proc.coresReserved; + } else if (proc.coresReserved >= 100) { + + int originalCores = proc.coresReserved; + + /* + * wholeCores could be 0 if we have a fraction of a core, we can just throw a re + */ + int wholeCores = (int) (Math.floor(host.idleCores / 100.0)); + if (wholeCores == 0) { + throw new EntityException("The host had only a fraction of a core remaining " + + "but the frame required " + frame.minCores); + } - proc.canHandleNegativeCoresRequest = host.canHandleNegativeCoresRequest(proc.coresReserved); - - if (proc.coresReserved == 0) { - logger.debug("Reserving all cores"); - proc.coresReserved = host.cores; - } else if (proc.coresReserved < 0) { - logger.debug("Reserving all cores minus " + proc.coresReserved); - proc.coresReserved = host.cores + proc.coresReserved; - } else if (proc.coresReserved >= 100) { - - int originalCores = proc.coresReserved; - - /* - * wholeCores could be 0 if we have a fraction of a core, we can just throw a re - */ - int wholeCores = (int) (Math.floor(host.idleCores / 100.0)); - if (wholeCores == 0) { - throw new EntityException("The host had only a fraction of a core remaining " - + "but the frame required " + frame.minCores); - } - - // if (host.threadMode == ThreadMode.Variable.value() && - // CueUtil.isDayTime()) { - if (host.threadMode == ThreadMode.ALL_VALUE) { - proc.coresReserved = wholeCores * 100; - } else { - if (frame.threadable) { - if (selfishServices != null && frame.services != null - && containsSelfishService(frame.services.split(","), selfishServices)) { - proc.coresReserved = wholeCores * 100; - } else { - if (host.idleMemory - frame.getMinMemory() <= Dispatcher.MEM_STRANDED_THRESHHOLD) { - proc.coresReserved = wholeCores * 100; + // if (host.threadMode == ThreadMode.Variable.value() && + // CueUtil.isDayTime()) { + if (host.threadMode == ThreadMode.ALL_VALUE) { + proc.coresReserved = wholeCores * 100; } else { - proc.coresReserved = getCoreSpan(host, frame.getMinMemory()); + if (frame.threadable) { + if (selfishServices != null && frame.services != null + && containsSelfishService(frame.services.split(","), selfishServices)) { + proc.coresReserved = wholeCores * 100; + } else { + if (host.idleMemory + - frame.getMinMemory() <= Dispatcher.MEM_STRANDED_THRESHHOLD) { + proc.coresReserved = wholeCores * 100; + } else { + proc.coresReserved = getCoreSpan(host, frame.getMinMemory()); + } + } + if (host.threadMode == ThreadMode.VARIABLE_VALUE && proc.coresReserved <= 200) { + proc.coresReserved = 200; + if (proc.coresReserved > host.idleCores) { + // Do not allow threadable frame running on 1 core. + throw new JobDispatchException( + "Do not allow threadable frame running one core on a ThreadMode.Variable host."); + } + } + } + } + + /* + * Sanity checks to ensure coreUnits are not to high or to low. + */ + if (proc.coresReserved < 100) { + proc.coresReserved = 100; + } + + /* + * If the core value is changed it can never fall below the original. + */ + if (proc.coresReserved < originalCores) { + proc.coresReserved = originalCores; + } + + /* + * Check to ensure we haven't exceeded max cores. + */ + if (frame.maxCores > 0 && proc.coresReserved >= frame.maxCores) { + proc.coresReserved = frame.maxCores; } - } - if (host.threadMode == ThreadMode.VARIABLE_VALUE && proc.coresReserved <= 200) { - proc.coresReserved = 200; + if (proc.coresReserved > host.idleCores) { - // Do not allow threadable frame running on 1 core. - throw new JobDispatchException( - "Do not allow threadable frame running one core on a ThreadMode.Variable host."); + if (host.threadMode == ThreadMode.VARIABLE_VALUE && frame.threadable + && wholeCores == 1) { + throw new JobDispatchException( + "Do not allow threadable frame running one core on a ThreadMode.Variable host."); + } + proc.coresReserved = wholeCores * 100; } - } } - } - - /* - * Sanity checks to ensure coreUnits are not to high or to low. - */ - if (proc.coresReserved < 100) { - proc.coresReserved = 100; - } - - /* - * If the core value is changed it can never fall below the original. - */ - if (proc.coresReserved < originalCores) { - proc.coresReserved = originalCores; - } - - /* - * Check to ensure we haven't exceeded max cores. - */ - if (frame.maxCores > 0 && proc.coresReserved >= frame.maxCores) { - proc.coresReserved = frame.maxCores; - } - - if (proc.coresReserved > host.idleCores) { - if (host.threadMode == ThreadMode.VARIABLE_VALUE && frame.threadable && wholeCores == 1) { - throw new JobDispatchException( - "Do not allow threadable frame running one core on a ThreadMode.Variable host."); + + /* + * Don't thread non-threadable layers, no matter what people put for the number of cores. + */ + if (!frame.threadable && proc.coresReserved > 100) { + proc.coresReserved = 100; } - proc.coresReserved = wholeCores * 100; - } + + return proc; } - /* - * Don't thread non-threadable layers, no matter what people put for the number of cores. - */ - if (!frame.threadable && proc.coresReserved > 100) { - proc.coresReserved = 100; + private static final boolean containsSelfishService(String[] frameServices, + String[] selfishServices) { + for (String frameService : frameServices) { + for (String selfishService : selfishServices) { + if (frameService.equals(selfishService)) { + return true; + } + } + } + return false; } - return proc; - } + public static final VirtualProc build(DispatchHost host, DispatchFrame frame, + LocalHostAssignment lja) { + + VirtualProc proc = new VirtualProc(); + proc.allocationId = host.getAllocationId(); + proc.hostId = host.getHostId(); + proc.frameId = null; + proc.layerId = frame.getLayerId(); + proc.jobId = frame.getJobId(); + proc.showId = frame.getShowId(); + proc.facilityId = frame.getFacilityId(); + proc.os = frame.os; + + proc.hostName = host.getName(); + proc.unbooked = false; + proc.isLocalDispatch = host.isLocalDispatch; + + proc.coresReserved = lja.getThreads() * 100; + proc.memoryReserved = frame.getMinMemory(); + proc.gpusReserved = frame.minGpus; + proc.gpuMemoryReserved = frame.minGpuMemory; + + int wholeCores = (int) (Math.floor(host.idleCores / 100.0)); + if (wholeCores == 0) { + throw new EntityException("The host had only a fraction of a core remaining " + + "but the frame required " + frame.minCores); + } - private static final boolean containsSelfishService(String[] frameServices, - String[] selfishServices) { - for (String frameService : frameServices) { - for (String selfishService : selfishServices) { - if (frameService.equals(selfishService)) { - return true; + if (proc.coresReserved > host.idleCores) { + proc.coresReserved = wholeCores * 100; } - } - } - return false; - } - - public static final VirtualProc build(DispatchHost host, DispatchFrame frame, - LocalHostAssignment lja) { - - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.getAllocationId(); - proc.hostId = host.getHostId(); - proc.frameId = null; - proc.layerId = frame.getLayerId(); - proc.jobId = frame.getJobId(); - proc.showId = frame.getShowId(); - proc.facilityId = frame.getFacilityId(); - proc.os = frame.os; - - proc.hostName = host.getName(); - proc.unbooked = false; - proc.isLocalDispatch = host.isLocalDispatch; - - proc.coresReserved = lja.getThreads() * 100; - proc.memoryReserved = frame.getMinMemory(); - proc.gpusReserved = frame.minGpus; - proc.gpuMemoryReserved = frame.minGpuMemory; - - int wholeCores = (int) (Math.floor(host.idleCores / 100.0)); - if (wholeCores == 0) { - throw new EntityException("The host had only a fraction of a core remaining " - + "but the frame required " + frame.minCores); - } - if (proc.coresReserved > host.idleCores) { - proc.coresReserved = wholeCores * 100; - } + return proc; - return proc; - - } - - /** - * Allocates additional cores when the frame is using more 50% more than a single cores worth of - * memory. - * - * @param host - * @param minMemory - * @return - */ - public static int getCoreSpan(DispatchHost host, long minMemory) { - int totalCores = (int) (Math.floor(host.cores / 100.0)); - int idleCores = (int) (Math.floor(host.idleCores / 100.0)); - if (idleCores < 1) { - return 100; } - long memPerCore = host.idleMemory / totalCores; - double procs = minMemory / (double) memPerCore; - int reserveCores = (int) (Math.round(procs)) * 100; + /** + * Allocates additional cores when the frame is using more 50% more than a single cores worth of + * memory. + * + * @param host + * @param minMemory + * @return + */ + public static int getCoreSpan(DispatchHost host, long minMemory) { + int totalCores = (int) (Math.floor(host.cores / 100.0)); + int idleCores = (int) (Math.floor(host.idleCores / 100.0)); + if (idleCores < 1) { + return 100; + } - return reserveCores; - } + long memPerCore = host.idleMemory / totalCores; + double procs = minMemory / (double) memPerCore; + int reserveCores = (int) (Math.round(procs)) * 100; + + return reserveCores; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/config/AppConfig.java b/cuebot/src/main/java/com/imageworks/spcue/config/AppConfig.java index d6c4b45fe..7ab5cda91 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/config/AppConfig.java +++ b/cuebot/src/main/java/com/imageworks/spcue/config/AppConfig.java @@ -34,58 +34,58 @@ @Configuration @ImportResource({"classpath:conf/spring/applicationContext-dbEngine.xml", - "classpath:conf/spring/applicationContext-grpc.xml", - "classpath:conf/spring/applicationContext-grpcServer.xml", - "classpath:conf/spring/applicationContext-service.xml", - "classpath:conf/spring/applicationContext-jms.xml", - "classpath:conf/spring/applicationContext-criteria.xml"}) + "classpath:conf/spring/applicationContext-grpc.xml", + "classpath:conf/spring/applicationContext-grpcServer.xml", + "classpath:conf/spring/applicationContext-service.xml", + "classpath:conf/spring/applicationContext-jms.xml", + "classpath:conf/spring/applicationContext-criteria.xml"}) @EnableConfigurationProperties @PropertySource({"classpath:opencue.properties"}) public class AppConfig { - @Configuration - @Conditional(PostgresDatabaseCondition.class) - @ImportResource({"classpath:conf/spring/applicationContext-dao-postgres.xml"}) - static class PostgresEngineConfig { - } + @Configuration + @Conditional(PostgresDatabaseCondition.class) + @ImportResource({"classpath:conf/spring/applicationContext-dao-postgres.xml"}) + static class PostgresEngineConfig { + } - @Bean - @Primary - @ConfigurationProperties(prefix = "datasource.cue-data-source") - public DataSource cueDataSource() { - return DataSourceBuilder.create().build(); - } + @Bean + @Primary + @ConfigurationProperties(prefix = "datasource.cue-data-source") + public DataSource cueDataSource() { + return DataSourceBuilder.create().build(); + } - @Bean - public ServletRegistrationBean jobLaunchServlet() { - ServletRegistrationBean b = new ServletRegistrationBean<>(); - b.addUrlMappings("/launch"); - b.addInitParameter("contextConfigLocation", - "classpath:conf/spring/jobLaunchServlet-servlet.xml"); - b.setServlet(new JobLaunchServlet()); - return b; - } + @Bean + public ServletRegistrationBean jobLaunchServlet() { + ServletRegistrationBean b = new ServletRegistrationBean<>(); + b.addUrlMappings("/launch"); + b.addInitParameter("contextConfigLocation", + "classpath:conf/spring/jobLaunchServlet-servlet.xml"); + b.setServlet(new JobLaunchServlet()); + return b; + } - @Bean - public ServletRegistrationBean healthCheckServlet() { - ServletRegistrationBean b = new ServletRegistrationBean<>(); - b.addUrlMappings("/health"); - b.addInitParameter("contextConfigLocation", - "classpath:conf/spring/healthCheckServlet-servlet.xml"); - b.setServlet(new HealthCheckServlet()); - return b; - } + @Bean + public ServletRegistrationBean healthCheckServlet() { + ServletRegistrationBean b = new ServletRegistrationBean<>(); + b.addUrlMappings("/health"); + b.addInitParameter("contextConfigLocation", + "classpath:conf/spring/healthCheckServlet-servlet.xml"); + b.setServlet(new HealthCheckServlet()); + return b; + } - /** - * Registers the Prometheus MetricsServlet to expose metrics at /metrics endpoint - * - * @return A ServletRegistrationBean for MetricsServlet - */ - @Bean - public ServletRegistrationBean prometheusServer() { - ServletRegistrationBean b = new ServletRegistrationBean<>(); - b.addUrlMappings("/metrics"); - b.setServlet(new MetricsServlet()); - return b; - } + /** + * Registers the Prometheus MetricsServlet to expose metrics at /metrics endpoint + * + * @return A ServletRegistrationBean for MetricsServlet + */ + @Bean + public ServletRegistrationBean prometheusServer() { + ServletRegistrationBean b = new ServletRegistrationBean<>(); + b.addUrlMappings("/metrics"); + b.setServlet(new MetricsServlet()); + return b; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/config/DatabaseEngine.java b/cuebot/src/main/java/com/imageworks/spcue/config/DatabaseEngine.java index 98fd478ed..0ae36a296 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/config/DatabaseEngine.java +++ b/cuebot/src/main/java/com/imageworks/spcue/config/DatabaseEngine.java @@ -1,9 +1,9 @@ package com.imageworks.spcue.config; public enum DatabaseEngine { - POSTGRES; + POSTGRES; - public static DatabaseEngine fromEnv() { - return POSTGRES; - } + public static DatabaseEngine fromEnv() { + return POSTGRES; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/config/PostgresDatabaseCondition.java b/cuebot/src/main/java/com/imageworks/spcue/config/PostgresDatabaseCondition.java index aae40a11e..f80ebfb03 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/config/PostgresDatabaseCondition.java +++ b/cuebot/src/main/java/com/imageworks/spcue/config/PostgresDatabaseCondition.java @@ -6,14 +6,14 @@ public class PostgresDatabaseCondition implements Condition { - @Override - public boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata) { - String dbEngine = System.getenv("CUEBOT_DB_ENGINE"); - if (dbEngine == null) { - return true; + @Override + public boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata) { + String dbEngine = System.getenv("CUEBOT_DB_ENGINE"); + if (dbEngine == null) { + return true; + } + DatabaseEngine selectedDatabaseEngine = DatabaseEngine.valueOf(dbEngine.toUpperCase()); + return selectedDatabaseEngine.equals(DatabaseEngine.POSTGRES); } - DatabaseEngine selectedDatabaseEngine = DatabaseEngine.valueOf(dbEngine.toUpperCase()); - return selectedDatabaseEngine.equals(DatabaseEngine.POSTGRES); - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/ActionDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/ActionDao.java index 507fa19a8..dc44a2672 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/ActionDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/ActionDao.java @@ -23,15 +23,15 @@ public interface ActionDao { - void createAction(ActionEntity action); + void createAction(ActionEntity action); - void deleteAction(ActionInterface action); + void deleteAction(ActionInterface action); - ActionEntity getAction(String id); + ActionEntity getAction(String id); - ActionEntity getAction(ActionInterface action); + ActionEntity getAction(ActionInterface action); - void updateAction(ActionEntity action); + void updateAction(ActionEntity action); - List getActions(FilterInterface filter); + List getActions(FilterInterface filter); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/AllocationDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/AllocationDao.java index 2b5a6846a..1825cf6d8 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/AllocationDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/AllocationDao.java @@ -26,81 +26,81 @@ */ public interface AllocationDao { - /** - * returns an AllocationEntity from its unique ID - * - * @param id - * @return AllocationEntity - */ - AllocationEntity getAllocationEntity(String id); + /** + * returns an AllocationEntity from its unique ID + * + * @param id + * @return AllocationEntity + */ + AllocationEntity getAllocationEntity(String id); - /** - * Return an AllocationEntity for the given facility and unique allocation name. - * - * @param name - * @return AllocationEntity - */ - AllocationEntity findAllocationEntity(String facility, String name); + /** + * Return an AllocationEntity for the given facility and unique allocation name. + * + * @param name + * @return AllocationEntity + */ + AllocationEntity findAllocationEntity(String facility, String name); - /** - * Return an AllocationEntity from its fully qualified name which should be formatted as - * facility.name. - * - * @param name - * @return - */ - AllocationEntity findAllocationEntity(String name); + /** + * Return an AllocationEntity from its fully qualified name which should be formatted as + * facility.name. + * + * @param name + * @return + */ + AllocationEntity findAllocationEntity(String name); - /** - * Creates a new allocation - * - * @param detail - */ - void insertAllocation(FacilityInterface facility, AllocationEntity detail); + /** + * Creates a new allocation + * + * @param detail + */ + void insertAllocation(FacilityInterface facility, AllocationEntity detail); - /** - * Deletes an allocation - * - * @param alloc - */ - void deleteAllocation(AllocationInterface alloc); + /** + * Deletes an allocation + * + * @param alloc + */ + void deleteAllocation(AllocationInterface alloc); - /** - * Updates the name of the allocation. This method also updates all child host allocation tags so - * you'll need to run allocDao.recalculateTags(alloc) - * - * @param alloc - * @param name - */ - void updateAllocationName(AllocationInterface alloc, String name); + /** + * Updates the name of the allocation. This method also updates all child host allocation tags + * so you'll need to run allocDao.recalculateTags(alloc) + * + * @param alloc + * @param name + */ + void updateAllocationName(AllocationInterface alloc, String name); - /** - * Updates the allocation tag. All hosts in the allocation are retagged. - * - * @param a - * @param tag - */ - void updateAllocationTag(AllocationInterface a, String tag); + /** + * Updates the allocation tag. All hosts in the allocation are retagged. + * + * @param a + * @param tag + */ + void updateAllocationTag(AllocationInterface a, String tag); - /** - * Sets the default allocation, AKA where procs go first. - * - * @param a - */ - void setDefaultAllocation(AllocationInterface a); + /** + * Sets the default allocation, AKA where procs go first. + * + * @param a + */ + void setDefaultAllocation(AllocationInterface a); - /** - * Returns the current default allocation. - * - * @return - */ - AllocationEntity getDefaultAllocationEntity(); + /** + * Returns the current default allocation. + * + * @return + */ + AllocationEntity getDefaultAllocationEntity(); - /** - * Set the allocation as billable or not billble. - * - * @param alloc - * @param value - */ - void updateAllocationBillable(AllocationInterface alloc, boolean value); + /** + * Set the allocation as billable or not billble. + * + * @param alloc + * @param value + */ + void updateAllocationBillable(AllocationInterface alloc, boolean value); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/BookingDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/BookingDao.java index 4e2eedf5d..4323f90f2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/BookingDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/BookingDao.java @@ -25,183 +25,185 @@ public interface BookingDao { - /** - * Updates the maximum number of cores the given local host assignment should use. - * - * @param l - * @return - */ - boolean updateMaxCores(LocalHostAssignment l, int maxCoreUnits); - - /** - * Updates the maximum number of gpus the given local host assignment should use. - * - * @param l - * @return - */ - boolean updateMaxGpus(LocalHostAssignment l, int gpus); - - /** - * Updates the maximum amount of memory a given local host assignment should use. - * - * @param l - * @return - */ - boolean updateMaxMemory(LocalHostAssignment l, long maxMemory); - - /** - * Updates the maximum amount of gpu memory a given local host assignment should use. - * - * @param l - * @return - */ - boolean updateMaxGpuMemory(LocalHostAssignment l, long maxGpuMemory); - - /** - * Create a new LocalHostAssignment attached to the given job. - * - * @param host - * @param job - * @param lha - */ - void insertLocalHostAssignment(HostInterface host, JobInterface job, LocalHostAssignment lha); - - /** - * Create a new LocalHostAssignment attached to the given layer. - * - * @param host - * @param layer - * @param lha - */ - void insertLocalHostAssignment(HostInterface host, LayerInterface layer, LocalHostAssignment lha); - - /** - * Create a new LocalHostAssignment attached to the given frame. - * - * @param host - * @param frame - * @param lha - */ - void insertLocalHostAssignment(HostInterface host, FrameInterface frame, LocalHostAssignment lha); - - /** - * Return the host + jobs local booking assignment properties. - * - * @param host - * @param job - * @return - */ - List getLocalJobAssignment(HostInterface host); - - /** - * Return the host + jobs local booking assignment properties. - * - * @param host - * @param job - * @return - */ - LocalHostAssignment getLocalJobAssignment(String id); - - /** - * Return the host + jobs local booking assignment properties. - * - * @param hostId - * @param jobId - * @return - */ - LocalHostAssignment getLocalJobAssignment(String hostId, String jobId); - - /** - * Return true if the host has a local job assignment. - * - * @param host - * @return - */ - boolean hasLocalJob(HostInterface host); - - /** - * Returns true if the host has an active local booking. - * - * @param host - * @return - */ - boolean hasActiveLocalJob(HostInterface host); - - /** - * Delete the given LocalHostAssignment. - * - * @param e - */ - boolean deleteLocalJobAssignment(LocalHostAssignment lha); - - /** - * Deactivate the given LocalHostAssignment. - * - * @param l - */ - boolean deactivate(LocalHostAssignment l); - - /** - * Return the difference between the number of assigned cores and the given coreUnits. - * - * @param l - * @param coreUnits - * @return - */ - int getCoreUsageDifference(LocalHostAssignment l, int coreUnits); - - /** - * Return the difference between the number of assigned gpus and the given gpuUnits. - * - * @param l - * @param gpuUnits - * @return - */ - int getGpuUsageDifference(LocalHostAssignment l, int gpuUnits); - - /** - * Allocate additional cores from the given host. - * - * @param h - * @param cores - * @return - */ - boolean allocateCoresFromHost(HostInterface h, int cores); - - /** - * Deallocate cores from the given host, returning them to its pool. - * - * @param h - * @param cores - * @return - */ - boolean deallocateCoresFromHost(HostInterface h, int cores); - - /** - * Allocate additional gpus from the given host. - * - * @param h - * @param gpus - * @return - */ - boolean allocateGpusFromHost(HostInterface h, int gpus); - - /** - * Deallocate gpu from the given host, returning them to its pool. - * - * @param h - * @param gpus - * @return - */ - boolean deallocateGpusFromHost(HostInterface h, int gpus); - - /** - * Return true if the Host has a resource deficit. A deficit can occur if there are more resources - * in use than the maximum allowed due to changes from the user. - * - * @param l - * @return - */ - boolean hasResourceDeficit(HostInterface host); + /** + * Updates the maximum number of cores the given local host assignment should use. + * + * @param l + * @return + */ + boolean updateMaxCores(LocalHostAssignment l, int maxCoreUnits); + + /** + * Updates the maximum number of gpus the given local host assignment should use. + * + * @param l + * @return + */ + boolean updateMaxGpus(LocalHostAssignment l, int gpus); + + /** + * Updates the maximum amount of memory a given local host assignment should use. + * + * @param l + * @return + */ + boolean updateMaxMemory(LocalHostAssignment l, long maxMemory); + + /** + * Updates the maximum amount of gpu memory a given local host assignment should use. + * + * @param l + * @return + */ + boolean updateMaxGpuMemory(LocalHostAssignment l, long maxGpuMemory); + + /** + * Create a new LocalHostAssignment attached to the given job. + * + * @param host + * @param job + * @param lha + */ + void insertLocalHostAssignment(HostInterface host, JobInterface job, LocalHostAssignment lha); + + /** + * Create a new LocalHostAssignment attached to the given layer. + * + * @param host + * @param layer + * @param lha + */ + void insertLocalHostAssignment(HostInterface host, LayerInterface layer, + LocalHostAssignment lha); + + /** + * Create a new LocalHostAssignment attached to the given frame. + * + * @param host + * @param frame + * @param lha + */ + void insertLocalHostAssignment(HostInterface host, FrameInterface frame, + LocalHostAssignment lha); + + /** + * Return the host + jobs local booking assignment properties. + * + * @param host + * @param job + * @return + */ + List getLocalJobAssignment(HostInterface host); + + /** + * Return the host + jobs local booking assignment properties. + * + * @param host + * @param job + * @return + */ + LocalHostAssignment getLocalJobAssignment(String id); + + /** + * Return the host + jobs local booking assignment properties. + * + * @param hostId + * @param jobId + * @return + */ + LocalHostAssignment getLocalJobAssignment(String hostId, String jobId); + + /** + * Return true if the host has a local job assignment. + * + * @param host + * @return + */ + boolean hasLocalJob(HostInterface host); + + /** + * Returns true if the host has an active local booking. + * + * @param host + * @return + */ + boolean hasActiveLocalJob(HostInterface host); + + /** + * Delete the given LocalHostAssignment. + * + * @param e + */ + boolean deleteLocalJobAssignment(LocalHostAssignment lha); + + /** + * Deactivate the given LocalHostAssignment. + * + * @param l + */ + boolean deactivate(LocalHostAssignment l); + + /** + * Return the difference between the number of assigned cores and the given coreUnits. + * + * @param l + * @param coreUnits + * @return + */ + int getCoreUsageDifference(LocalHostAssignment l, int coreUnits); + + /** + * Return the difference between the number of assigned gpus and the given gpuUnits. + * + * @param l + * @param gpuUnits + * @return + */ + int getGpuUsageDifference(LocalHostAssignment l, int gpuUnits); + + /** + * Allocate additional cores from the given host. + * + * @param h + * @param cores + * @return + */ + boolean allocateCoresFromHost(HostInterface h, int cores); + + /** + * Deallocate cores from the given host, returning them to its pool. + * + * @param h + * @param cores + * @return + */ + boolean deallocateCoresFromHost(HostInterface h, int cores); + + /** + * Allocate additional gpus from the given host. + * + * @param h + * @param gpus + * @return + */ + boolean allocateGpusFromHost(HostInterface h, int gpus); + + /** + * Deallocate gpu from the given host, returning them to its pool. + * + * @param h + * @param gpus + * @return + */ + boolean deallocateGpusFromHost(HostInterface h, int gpus); + + /** + * Return true if the Host has a resource deficit. A deficit can occur if there are more + * resources in use than the maximum allowed due to changes from the user. + * + * @param l + * @return + */ + boolean hasResourceDeficit(HostInterface host); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/CommentDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/CommentDao.java index e0b0acc51..3b8c0277f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/CommentDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/CommentDao.java @@ -23,79 +23,80 @@ public interface CommentDao { - /** - * deletes the specified comment. - * - * @param id - */ - public void deleteComment(String id); + /** + * deletes the specified comment. + * + * @param id + */ + public void deleteComment(String id); - /** - * Deletes comments using host, user, and subject - * - * @param host - * @param user - * @param subject - * @return boolean: returns true if one or more comments where deleted - */ - public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, String subject); + /** + * Deletes comments using host, user, and subject + * + * @param host + * @param user + * @param subject + * @return boolean: returns true if one or more comments where deleted + */ + public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, + String subject); - /** - * Get comments using host, user, and subject - * - * @param host - * @param user - * @param subject - * @return List - */ - public List getCommentsByHostUserAndSubject(HostInterface host, String user, - String subject); + /** + * Get comments using host, user, and subject + * + * @param host + * @param user + * @param subject + * @return List + */ + public List getCommentsByHostUserAndSubject(HostInterface host, String user, + String subject); - /** - * Retrieves the specified comment. - * - * @param id - * @return - */ - public CommentDetail getCommentDetail(String id); + /** + * Retrieves the specified comment. + * + * @param id + * @return + */ + public CommentDetail getCommentDetail(String id); - /** - * Inserts a comment on a job - * - * @param job - * @param comment - */ - public void insertComment(JobInterface job, CommentDetail comment); + /** + * Inserts a comment on a job + * + * @param job + * @param comment + */ + public void insertComment(JobInterface job, CommentDetail comment); - /** - * Inserts a comment on a host - * - * @param host - * @param comment - */ - public void insertComment(HostInterface host, CommentDetail comment); + /** + * Inserts a comment on a host + * + * @param host + * @param comment + */ + public void insertComment(HostInterface host, CommentDetail comment); - /** - * Update specified comment - * - * @param comment - */ - public void updateComment(CommentDetail comment); + /** + * Update specified comment + * + * @param comment + */ + public void updateComment(CommentDetail comment); - /** - * Updates the specified comment's message field with the supplied value. - * - * @param id - * @param message - */ - public void updateCommentMessage(String id, String message); + /** + * Updates the specified comment's message field with the supplied value. + * + * @param id + * @param message + */ + public void updateCommentMessage(String id, String message); - /** - * Update the specified comment's subject field with the supplied value. - * - * @param id - * @param subject - */ - public void updateCommentSubject(String id, String subject); + /** + * Update the specified comment's subject field with the supplied value. + * + * @param id + * @param subject + */ + public void updateCommentSubject(String id, String subject); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/DeedDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/DeedDao.java index cb2116884..209c6be14 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/DeedDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/DeedDao.java @@ -23,47 +23,47 @@ public interface DeedDao { - /** - * Create a new deed to the host. - */ - DeedEntity insertDeed(OwnerEntity owner, HostInterface host); + /** + * Create a new deed to the host. + */ + DeedEntity insertDeed(OwnerEntity owner, HostInterface host); - /** - * Delete the given deed. Return true if a row was actually deleted, false if one was not. - * - * @param deed - * @return - */ - boolean deleteDeed(DeedEntity deed); + /** + * Delete the given deed. Return true if a row was actually deleted, false if one was not. + * + * @param deed + * @return + */ + boolean deleteDeed(DeedEntity deed); - /** - * Delete the given deed. Return true if a row was actually deleted, false if one was not. - * - * @param deed - * @return - */ - boolean deleteDeed(HostInterface host); + /** + * Delete the given deed. Return true if a row was actually deleted, false if one was not. + * + * @param deed + * @return + */ + boolean deleteDeed(HostInterface host); - /** - * Return the deed by its given id. - * - * @param id - * @return - */ - DeedEntity getDeed(String id); + /** + * Return the deed by its given id. + * + * @param id + * @return + */ + DeedEntity getDeed(String id); - /** - * Return all deed's from the given owner. - * - * @param owner - * @return - */ - List getDeeds(OwnerEntity owner); + /** + * Return all deed's from the given owner. + * + * @param owner + * @return + */ + List getDeeds(OwnerEntity owner); - /** - * - * - * @param owner - */ - void deleteDeeds(OwnerEntity owner); + /** + * + * + * @param owner + */ + void deleteDeeds(OwnerEntity owner); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/DepartmentDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/DepartmentDao.java index de79d18cf..2cc91e1e1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/DepartmentDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/DepartmentDao.java @@ -23,49 +23,49 @@ */ public interface DepartmentDao { - /** - * Finds a department by name. Department objects contain only a name and a unique ID. - * - * @param name - * @return Department - */ - public DepartmentInterface findDepartment(String name); + /** + * Finds a department by name. Department objects contain only a name and a unique ID. + * + * @param name + * @return Department + */ + public DepartmentInterface findDepartment(String name); - /** - * Finds a department by id. Department objects contain only a name and a unique ID. - * - * @param id - * @return Department - */ - public DepartmentInterface getDepartment(String id); + /** + * Finds a department by id. Department objects contain only a name and a unique ID. + * + * @param id + * @return Department + */ + public DepartmentInterface getDepartment(String id); - /** - * Returns the cue's default department. The default department is assigned to any job that falls - * within a group that doesn't have a department. Usually this is Unassigned. - * - * @return Department - */ - public DepartmentInterface getDefaultDepartment(); + /** + * Returns the cue's default department. The default department is assigned to any job that + * falls within a group that doesn't have a department. Usually this is Unassigned. + * + * @return Department + */ + public DepartmentInterface getDefaultDepartment(); - /** - * Returns true if the department exists - * - * @param name - * @return - */ - public boolean departmentExists(String name); + /** + * Returns true if the department exists + * + * @param name + * @return + */ + public boolean departmentExists(String name); - /** - * Inserts a new department record. Departments are only a name and a unique ID. - * - * @param name - */ - public void insertDepartment(String name); + /** + * Inserts a new department record. Departments are only a name and a unique ID. + * + * @param name + */ + public void insertDepartment(String name); - /** - * Removes the specified department. - * - * @param d - */ - public void deleteDepartment(DepartmentInterface d); + /** + * Removes the specified department. + * + * @param d + */ + public void deleteDepartment(DepartmentInterface d); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/DependDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/DependDao.java index 053c20cf2..2c408479e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/DependDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/DependDao.java @@ -42,171 +42,172 @@ */ public interface DependDao { - /** - * Returns a LightweightDependency from its ID - * - * @param id - * @return LightweightDependency - */ - LightweightDependency getDepend(String id); - - /** - * Returns a LightweightDependency from its ID - * - * @param signature - * @return LightweightDependency - */ - LightweightDependency getDependBySignature(String s); - - /** - * Gets a list of LightweightDependenies that depend on the specified job - * - * @param job - * @return List - */ - List getWhatDependsOn(JobInterface job); - - /** - * Get a list of LightweightDependenies that depend on this job and are either intenral, external, - * or either. The depends returned can depend on any part of the job. - * - * @param job - * @param target - * @return - */ - List getWhatDependsOn(JobInterface job, DependTarget target); - - /** - * Gets a list of LightweightDependencies that depend on the specified layer - * - * @param job - * @param layer - * @return List - */ - List getWhatDependsOn(LayerInterface layer); - - /** - * Gets a list of LightweightDependencies that depend on the specified frame - * - * @param frame - * @return - */ - List getWhatDependsOn(FrameInterface frame); - - /** - * Deletes a dependency - * - * @param depend - */ - void deleteDepend(LightweightDependency depend); - - /** - * Returns a list of depends where the specified job is the depender. Passing a depend target will - * limit the results to either internal or external. This method returns active depends only. - * - * @param Job - * @param DependTarget - * @return List - */ - List getWhatThisDependsOn(JobInterface job, DependTarget target); - - /** - * Returns a list of depends the layer depends on. Passing in a depend target will limit the - * results to either internal, external or both. This method returns active depends only. - * - * @param Layer - * @return List - */ - List getWhatThisDependsOn(LayerInterface layer, DependTarget target); - - /** - * Returns a list of depends the frame depends on. Passing in a depend target will limit the - * results to either inernal, external, or both.This method returns active depends only. - * - * @param Frame - * @return List - */ - List getWhatThisDependsOn(FrameInterface frame, DependTarget target); - - /** - * Returns a list of dependencies where the supplied frame is the element being depended on. - * - * @param frame - * @param active - * @return - */ - List getWhatDependsOn(FrameInterface frame, boolean active); - - /** - * - * @param layer - * @param active - * @return - */ - List getWhatDependsOn(LayerInterface layer, boolean active); - - /** - * Returns a list of child FrameByFrame dependencies - * - * @param depend - * @return - */ - List getChildDepends(LightweightDependency depend); - - void insertDepend(JobOnJob d); - - void insertDepend(JobOnLayer d); - - void insertDepend(JobOnFrame d); - - void insertDepend(LayerOnJob d); - - void insertDepend(LayerOnLayer d); - - void insertDepend(LayerOnFrame d); - - void insertDepend(FrameOnJob d); - - void insertDepend(FrameOnLayer d); - - void insertDepend(FrameByFrame d); - - void insertDepend(FrameOnFrame d); - - void insertDepend(PreviousFrame d); - - void updateFrameState(FrameInterface f); - - /** - * Increment the depend count for the specified frame. - * - * @param f - * @throws DependException if the depend count was not incremented. - */ - void incrementDependCount(FrameInterface f); - - /** - * Decrement the depend count for the specified frame. Return false if the depend count is already - * 0, true if the depend count was decremented. - * - * @param f - */ - boolean decrementDependCount(FrameInterface f); - - /** - * Returns true if this is the thread that set the depend to inactive. - * - * @param depend - * @return - */ - boolean setInactive(LightweightDependency depend); - - /** - * Sets a dependency as active. If the dependency is already active return false, otherwise return - * true. Currently this only works on FrameOnFrame and LayerOnLayer. - * - * @param depend - * @return true if this thread actually updated the row. - */ - boolean setActive(LightweightDependency depend); + /** + * Returns a LightweightDependency from its ID + * + * @param id + * @return LightweightDependency + */ + LightweightDependency getDepend(String id); + + /** + * Returns a LightweightDependency from its ID + * + * @param signature + * @return LightweightDependency + */ + LightweightDependency getDependBySignature(String s); + + /** + * Gets a list of LightweightDependenies that depend on the specified job + * + * @param job + * @return List + */ + List getWhatDependsOn(JobInterface job); + + /** + * Get a list of LightweightDependenies that depend on this job and are either intenral, + * external, or either. The depends returned can depend on any part of the job. + * + * @param job + * @param target + * @return + */ + List getWhatDependsOn(JobInterface job, DependTarget target); + + /** + * Gets a list of LightweightDependencies that depend on the specified layer + * + * @param job + * @param layer + * @return List + */ + List getWhatDependsOn(LayerInterface layer); + + /** + * Gets a list of LightweightDependencies that depend on the specified frame + * + * @param frame + * @return + */ + List getWhatDependsOn(FrameInterface frame); + + /** + * Deletes a dependency + * + * @param depend + */ + void deleteDepend(LightweightDependency depend); + + /** + * Returns a list of depends where the specified job is the depender. Passing a depend target + * will limit the results to either internal or external. This method returns active depends + * only. + * + * @param Job + * @param DependTarget + * @return List + */ + List getWhatThisDependsOn(JobInterface job, DependTarget target); + + /** + * Returns a list of depends the layer depends on. Passing in a depend target will limit the + * results to either internal, external or both. This method returns active depends only. + * + * @param Layer + * @return List + */ + List getWhatThisDependsOn(LayerInterface layer, DependTarget target); + + /** + * Returns a list of depends the frame depends on. Passing in a depend target will limit the + * results to either inernal, external, or both.This method returns active depends only. + * + * @param Frame + * @return List + */ + List getWhatThisDependsOn(FrameInterface frame, DependTarget target); + + /** + * Returns a list of dependencies where the supplied frame is the element being depended on. + * + * @param frame + * @param active + * @return + */ + List getWhatDependsOn(FrameInterface frame, boolean active); + + /** + * + * @param layer + * @param active + * @return + */ + List getWhatDependsOn(LayerInterface layer, boolean active); + + /** + * Returns a list of child FrameByFrame dependencies + * + * @param depend + * @return + */ + List getChildDepends(LightweightDependency depend); + + void insertDepend(JobOnJob d); + + void insertDepend(JobOnLayer d); + + void insertDepend(JobOnFrame d); + + void insertDepend(LayerOnJob d); + + void insertDepend(LayerOnLayer d); + + void insertDepend(LayerOnFrame d); + + void insertDepend(FrameOnJob d); + + void insertDepend(FrameOnLayer d); + + void insertDepend(FrameByFrame d); + + void insertDepend(FrameOnFrame d); + + void insertDepend(PreviousFrame d); + + void updateFrameState(FrameInterface f); + + /** + * Increment the depend count for the specified frame. + * + * @param f + * @throws DependException if the depend count was not incremented. + */ + void incrementDependCount(FrameInterface f); + + /** + * Decrement the depend count for the specified frame. Return false if the depend count is + * already 0, true if the depend count was decremented. + * + * @param f + */ + boolean decrementDependCount(FrameInterface f); + + /** + * Returns true if this is the thread that set the depend to inactive. + * + * @param depend + * @return + */ + boolean setInactive(LightweightDependency depend); + + /** + * Sets a dependency as active. If the dependency is already active return false, otherwise + * return true. Currently this only works on FrameOnFrame and LayerOnLayer. + * + * @param depend + * @return true if this thread actually updated the row. + */ + boolean setActive(LightweightDependency depend); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/DispatcherDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/DispatcherDao.java index 1cf0932b0..b751a23ca 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/DispatcherDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/DispatcherDao.java @@ -32,154 +32,155 @@ */ public interface DispatcherDao { - /** - * Finds the next frame on the specified job that can utilize the free resources on the host. - * - * @param host - * @param job - * @return - */ - DispatchFrame findNextDispatchFrame(JobInterface job, DispatchHost host); - - /** - * Returns the next frame based on the supplied job - * - * @param job - * @param proc - * @return DispatchFrame - */ - DispatchFrame findNextDispatchFrame(JobInterface job, VirtualProc proc); - - /** - * Finds the next frame on the specified job that can utilize the free resources on the host. - * - * @param host - * @param job - * @return - */ - List findNextDispatchFrames(JobInterface job, DispatchHost host, int limit); - - /** - * Returns the next frame based on the supplied job - * - * @param job - * @param proc - * @return DispatchFrame - */ - List findNextDispatchFrames(JobInterface job, VirtualProc proc, int limit); - - /** - * Return a list of jobs which could use resources of the specified host. It does not consider - * show priority. - * - * @param host - * @param numJobs - * @return - */ - Set findDispatchJobsForAllShows(DispatchHost host, int numJobs); - - /** - * Return a list of jobs which could use resources of the specified host - * - * @param host - * @param numJobs - * @return - */ - Set findDispatchJobs(DispatchHost host, int numJobs); - - /** - * Return a list of jobs which could use resources of the specified host that are in the specified - * group. - * - * @param host - * @param numJobs - * @return - */ - Set findDispatchJobs(DispatchHost host, GroupInterface g); - - /** - * Finds an under proced job if one exists and returns it, otherwise it returns null. - * - * @param excludeJob - * @param proc - * @return - */ - boolean findUnderProcedJob(JobInterface excludeJob, VirtualProc proc); - - /** - * Returns true if there exists a higher priority job than the base job - * - * @param baseJob - * @param proc - * @return boolean - */ - boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc); - - /** - * Dispatch the given host to the specified show. Look for a max of numJobs. - * - * @param host - * @param show - * @param numJobs - * @return - */ - Set findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs); - - /** - * Find a list of local dispatch jobs. - * - * @param host - * @return - */ - Set findLocalDispatchJobs(DispatchHost host); - - /** - * Return a list of frames from the given layer. - * - * @param layer - * @param proc - * @param limit - * @return - */ - List findNextDispatchFrames(LayerInterface layer, VirtualProc proc, int limit); - - /** - * Return a list of frames from the given layer. - * - * @param layer - * @param host - * @param limit - * @return - */ - List findNextDispatchFrames(LayerInterface layer, DispatchHost host, int limit); - - /** - * Return Scheduling Mode selected - * - * @return - */ - SchedulingMode getSchedulingMode(); - - /** - * Set Scheduling Mode. - * - * @param schedulingMode - */ - void setSchedulingMode(SchedulingMode schedulingMode); - - /** - * - PRIORITY_ONLY: Sort by priority only - FIFO: Whether or not to enable FIFO scheduling in the - * same priority. - BALANCED: Use a rank formula that takes into account time waiting, and number - * of cores required: rank = priority + (100 * (1 - (job.cores/job.int_min_cores))) + age in days - */ - enum SchedulingMode { - PRIORITY_ONLY, FIFO, BALANCED - } - - /** - * Clear bookableShows cache - * - * @return - */ - void clearCache(); + /** + * Finds the next frame on the specified job that can utilize the free resources on the host. + * + * @param host + * @param job + * @return + */ + DispatchFrame findNextDispatchFrame(JobInterface job, DispatchHost host); + + /** + * Returns the next frame based on the supplied job + * + * @param job + * @param proc + * @return DispatchFrame + */ + DispatchFrame findNextDispatchFrame(JobInterface job, VirtualProc proc); + + /** + * Finds the next frame on the specified job that can utilize the free resources on the host. + * + * @param host + * @param job + * @return + */ + List findNextDispatchFrames(JobInterface job, DispatchHost host, int limit); + + /** + * Returns the next frame based on the supplied job + * + * @param job + * @param proc + * @return DispatchFrame + */ + List findNextDispatchFrames(JobInterface job, VirtualProc proc, int limit); + + /** + * Return a list of jobs which could use resources of the specified host. It does not consider + * show priority. + * + * @param host + * @param numJobs + * @return + */ + Set findDispatchJobsForAllShows(DispatchHost host, int numJobs); + + /** + * Return a list of jobs which could use resources of the specified host + * + * @param host + * @param numJobs + * @return + */ + Set findDispatchJobs(DispatchHost host, int numJobs); + + /** + * Return a list of jobs which could use resources of the specified host that are in the + * specified group. + * + * @param host + * @param numJobs + * @return + */ + Set findDispatchJobs(DispatchHost host, GroupInterface g); + + /** + * Finds an under proced job if one exists and returns it, otherwise it returns null. + * + * @param excludeJob + * @param proc + * @return + */ + boolean findUnderProcedJob(JobInterface excludeJob, VirtualProc proc); + + /** + * Returns true if there exists a higher priority job than the base job + * + * @param baseJob + * @param proc + * @return boolean + */ + boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc); + + /** + * Dispatch the given host to the specified show. Look for a max of numJobs. + * + * @param host + * @param show + * @param numJobs + * @return + */ + Set findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs); + + /** + * Find a list of local dispatch jobs. + * + * @param host + * @return + */ + Set findLocalDispatchJobs(DispatchHost host); + + /** + * Return a list of frames from the given layer. + * + * @param layer + * @param proc + * @param limit + * @return + */ + List findNextDispatchFrames(LayerInterface layer, VirtualProc proc, int limit); + + /** + * Return a list of frames from the given layer. + * + * @param layer + * @param host + * @param limit + * @return + */ + List findNextDispatchFrames(LayerInterface layer, DispatchHost host, int limit); + + /** + * Return Scheduling Mode selected + * + * @return + */ + SchedulingMode getSchedulingMode(); + + /** + * Set Scheduling Mode. + * + * @param schedulingMode + */ + void setSchedulingMode(SchedulingMode schedulingMode); + + /** + * - PRIORITY_ONLY: Sort by priority only - FIFO: Whether or not to enable FIFO scheduling in + * the same priority. - BALANCED: Use a rank formula that takes into account time waiting, and + * number of cores required: rank = priority + (100 * (1 - (job.cores/job.int_min_cores))) + age + * in days + */ + enum SchedulingMode { + PRIORITY_ONLY, FIFO, BALANCED + } + + /** + * Clear bookableShows cache + * + * @return + */ + void clearCache(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/FacilityDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/FacilityDao.java index 0d269f672..23c268f83 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/FacilityDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/FacilityDao.java @@ -20,51 +20,51 @@ public interface FacilityDao { - /** - * Returns the default facility - * - * @return - */ - public FacilityInterface getDefaultFacility(); + /** + * Returns the default facility + * + * @return + */ + public FacilityInterface getDefaultFacility(); - /** - * Gets a facility by Id - * - * @param id - * @return - */ - public FacilityInterface getFacility(String id); + /** + * Gets a facility by Id + * + * @param id + * @return + */ + public FacilityInterface getFacility(String id); - /** - * Returns true if a facility exists - * - * @param name - * @return - */ - public boolean facilityExists(String name); + /** + * Returns true if a facility exists + * + * @param name + * @return + */ + public boolean facilityExists(String name); - /** - * Insert and return a facility. - * - * @param name - * @return - */ - public FacilityInterface insertFacility(FacilityEntity facility); + /** + * Insert and return a facility. + * + * @param name + * @return + */ + public FacilityInterface insertFacility(FacilityEntity facility); - /** - * Deletes a facility record, if possible. - * - * @param facility - * @return - */ - public int deleteFacility(FacilityInterface facility); + /** + * Deletes a facility record, if possible. + * + * @param facility + * @return + */ + public int deleteFacility(FacilityInterface facility); - /** - * Rename the specified facility. - * - * @param facility - * @param name - * @return - */ - int updateFacilityName(FacilityInterface facility, String name); + /** + * Rename the specified facility. + * + * @param facility + * @param name + * @return + */ + int updateFacilityName(FacilityInterface facility, String name); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/FilterDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/FilterDao.java index 8de1c83fe..baa27fcf0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/FilterDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/FilterDao.java @@ -29,32 +29,32 @@ */ public interface FilterDao { - List getActiveFilters(ShowInterface show); + List getActiveFilters(ShowInterface show); - List getFilters(ShowInterface show); + List getFilters(ShowInterface show); - void updateSetFilterEnabled(FilterInterface f, boolean enabled); + void updateSetFilterEnabled(FilterInterface f, boolean enabled); - void updateSetFilterName(FilterInterface f, String name); + void updateSetFilterName(FilterInterface f, String name); - void updateSetFilterType(FilterInterface f, FilterType type); + void updateSetFilterType(FilterInterface f, FilterType type); - void updateSetFilterOrder(FilterInterface f, double order); + void updateSetFilterOrder(FilterInterface f, double order); - void deleteFilter(FilterInterface f); + void deleteFilter(FilterInterface f); - void insertFilter(FilterEntity f); + void insertFilter(FilterEntity f); - void reorderFilters(ShowInterface s); + void reorderFilters(ShowInterface s); - void lowerFilterOrder(FilterInterface f, int by); + void lowerFilterOrder(FilterInterface f, int by); - void raiseFilterOrder(FilterInterface f, int by); + void raiseFilterOrder(FilterInterface f, int by); - FilterEntity getFilter(String id); + FilterEntity getFilter(String id); - FilterEntity getFilter(FilterInterface filter); + FilterEntity getFilter(FilterInterface filter); - FilterEntity findFilter(ShowInterface show, String name); + FilterEntity findFilter(ShowInterface show, String name); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/FrameDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/FrameDao.java index 20ce02fbf..bad12e35a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/FrameDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/FrameDao.java @@ -35,357 +35,358 @@ public interface FrameDao { - /** - * finds the frame in the job that used the lowest amount of memory - * - * @param job - * @return - */ - public FrameDetail findLowestMemoryFrame(JobInterface job); - - /** - * finds the frame in the job that used the highest amount of memory, - * - * @param job - * @return - */ - public FrameDetail findHighestMemoryFrame(JobInterface job); - - /** - * Returns the data for the shortest succeeded frame. - * - * @param job - * @return - */ - public FrameDetail findShortestFrame(JobInterface job); - - /** - * Returns the data for the longest succeeded frame. - * - * @param job - * @return - */ - public FrameDetail findLongestFrame(JobInterface job); - - /** - * Checks to see how many retries a frame has. If that number is greater than or equal to the jobs - * max retries, the frame is marked as dead. - * - * @param frame - */ - void checkRetries(FrameInterface frame); - - /** - * Batch inserts a frameSet of frames. - * - * @param frame - */ - void insertFrames(LayerDetail layer, List frames); - - /** - * Retrieve a FrameDetail from something that implements Frame - * - * @param frame - * @return FrameDetail - */ - FrameDetail getFrameDetail(FrameInterface frame); - - /** - * Retrieve a FrameDetail from its unique ID. - * - * @param id - * @return FrameDetail - */ - FrameDetail getFrameDetail(String id); - - /** - * - * @param job - * @param name - * @return - */ - FrameDetail findFrameDetail(JobInterface job, String name); - - /** - * Returns a minimal Frame from its ID - * - * @param id - * @return Frame - */ - FrameInterface getFrame(String id); - - /** - * Finds a minimal frame from its job and frame name - * - * @param job - * @param name - * @return Frame - */ - FrameInterface findFrame(JobInterface job, String name); - - /** - * Finds a minimal frame from its layer and number. - * - * @param job - * @param name - * @return Frame - */ - FrameInterface findFrame(LayerInterface layer, int number); - - /** - * Find a list of minimal frames from a job and FrameLookupRequest. - * - * @param job - * @param r - * @return List - */ - List findFrames(FrameSearchInterface r); - - /** - * Find a list of FrameDetail objects from a job and FrameLookupRequest. - * - * @param job - * @param r - * @return List - */ - List findFrameDetails(FrameSearchInterface r); - - /** - * Updates the specified frame's state. - * - * @param frame - * @param state - */ - boolean updateFrameState(FrameInterface frame, FrameState state); - - /** - * Updates a frame to indicate its now running. - * - * @param proc - * @param frame - * @return - */ - void updateFrameStarted(VirtualProc proc, FrameInterface frame); - - /** - * Updates a frame to the stopped state. The frame MUST be in the Running state to be stopped. - * - * @param proc - * @param frame - * @param report - */ - boolean updateFrameStopped(FrameInterface frame, FrameState state, int exitStatus); - - /** - * Updates a frame to the stopped state. The frame MUST be in the Running state to be stopped. - * - * @param frame - * @param state - * @param exitStatus - * @param maxRss - * @return - */ - boolean updateFrameStopped(FrameInterface frame, FrameState state, int exitStatus, long maxRss); - - /** - * Sets a frame to an unreserved waiting state. - * - * @param frame - * @return - */ - boolean updateFrameCleared(FrameInterface frame); - - /** - * Sets a frame exitStatus to EXIT_STATUS_MEMORY_FAILURE - * - * @param frame - * @return whether the frame has been updated - */ - boolean updateFrameMemoryError(FrameInterface frame); - - /** - * Sets a frame to an unreserved waiting state. - * - * @param frame - * @return - */ - boolean updateFrameHostDown(FrameInterface frame); - - /** - * Returns a DispatchFrame object from the frame's uinique ID. - * - * @param uuid - * @return DispatchFrame - */ - DispatchFrame getDispatchFrame(String uuid); - - /** - * Set the specified frame to the Waiting state and its depend count to 0. - * - * @param frame - */ - void markFrameAsWaiting(FrameInterface frame); - - /** - * If the specified frame has active dependencies, reset the dependency count and set the frame - * state to Depend - * - * @param frame - */ - void markFrameAsDepend(FrameInterface frame); - - /** - * Reverses the specified frame range. The revese layer implementation is is more intensive than - * other reorder operations because we look up the dispatch order for each frame and then switch - * them. - * - * @param layer - * @param frameSet - */ - public void reorderLayerReverse(LayerInterface layer, FrameSet frameSet); - - /** - * - * Reorders specified frames to the end of the dispatch order. This works by finding the frame - * with the highest dispatch value, and updating the specified frames with higher values. The rest - * of the frames in the layer are not touched. - * - * @param layer - * @param frameSet - */ - public void reorderFramesLast(LayerInterface layer, FrameSet frameSet); - - /** - * Reorders specified frames to the top of the dispatch order. This works by finding the frame - * with the lowest dispatch order and updating targeted frames with even lower dispatcher orders, - * negative numbers are allowed. - * - * @param layer - * @param frameSet - */ - public void reorderFramesFirst(LayerInterface layer, FrameSet frameSet); - - /** - * This would reorder frames so that it would render the specified sequence on a staggered frame - * range. The frame set must be a staggered range. - * - * @param layer - * @param frameSet - */ - public void staggerLayer(LayerInterface layer, String range, int stagger); - - /** - * Returns a list of Running frames that have not had a proc assigned to them in over 5 min. This - * can happen when an operation aborts due to a deadlock. - * - * @return - */ - List getOrphanedFrames(); - - /** - * Return a list of all frames that have positive dependency counts for the specified dependency. - * - * @param depend - * @return - */ - List getDependentFrames(LightweightDependency depend); - - /** - * Returns true if the frame is succeeded. - * - * @param f - * @return - */ - public boolean isFrameComplete(FrameInterface f); - - /** - * Attempts to fix the case where a proc is assigned to a frame but the frame is in the waiting - * state. - * - * @param proc - * @param frame - * @return - */ - boolean updateFrameFixed(VirtualProc proc, FrameInterface frame); - - /** - * Return a ResourceUsage object which repesents the amount of clock and core time the frame has - * used up until this point. - * - * @param f - * @return - */ - ResourceUsage getResourceUsage(FrameInterface f); - - /** - * Update memory usage values and LLU time for the given frame. The frame must be in the Running - * state. If the frame is locked by another thread, the process is aborted because we'll most - * likely get a new update one minute later. - * - * @param f - * @param maxRss - * @param rss - * @param lluTime - * @throws FrameReservationException if the frame is locked by another thread. - */ - void updateFrameMemoryUsageAndLluTime(FrameInterface f, long maxRss, long rss, long lluTime); - - /** - * Attempt to put a exclusive row lock on the given frame. The frame must be in the specified - * state. - * - * @param frame - * @param state - * @throws FrameReservationException if the frame changes state before the lock can be applied. - */ - void lockFrameForUpdate(FrameInterface frame, FrameState state); - - /** - * Return true if the specified frame is an orphan. - * - * @param frame - * @return - */ - boolean isOrphan(FrameInterface frame); - - /** - * Update a frame's checkpoint state status. - * - * @param frame - * @param state - * @return - */ - boolean updateFrameCheckpointState(FrameInterface frame, CheckpointState state); - - /** - * Return a list of checkpoints that have failed to report back in within a certain cutoff time. - * - * @param cutoffTime - * @return - */ - List getStaleCheckpoints(int cutoffTimeMs); - - /** - * Create a frame state display override. - * - * @param frameId String - * @param override FrameStateDisplayOverride - */ - void setFrameStateDisplayOverride(String frameId, FrameStateDisplayOverride override); - - /** - * Get the frame overrides for a specific frame - * - * @param frameId - * @return List - */ - FrameStateDisplayOverrideSeq getFrameStateDisplayOverrides(String frameId); - - /** - * Update a frame override with new text/color - * - * @param frameId - * @param override FrameStateDisplayOverride - */ - void updateFrameStateDisplayOverride(String frameId, FrameStateDisplayOverride override); + /** + * finds the frame in the job that used the lowest amount of memory + * + * @param job + * @return + */ + public FrameDetail findLowestMemoryFrame(JobInterface job); + + /** + * finds the frame in the job that used the highest amount of memory, + * + * @param job + * @return + */ + public FrameDetail findHighestMemoryFrame(JobInterface job); + + /** + * Returns the data for the shortest succeeded frame. + * + * @param job + * @return + */ + public FrameDetail findShortestFrame(JobInterface job); + + /** + * Returns the data for the longest succeeded frame. + * + * @param job + * @return + */ + public FrameDetail findLongestFrame(JobInterface job); + + /** + * Checks to see how many retries a frame has. If that number is greater than or equal to the + * jobs max retries, the frame is marked as dead. + * + * @param frame + */ + void checkRetries(FrameInterface frame); + + /** + * Batch inserts a frameSet of frames. + * + * @param frame + */ + void insertFrames(LayerDetail layer, List frames); + + /** + * Retrieve a FrameDetail from something that implements Frame + * + * @param frame + * @return FrameDetail + */ + FrameDetail getFrameDetail(FrameInterface frame); + + /** + * Retrieve a FrameDetail from its unique ID. + * + * @param id + * @return FrameDetail + */ + FrameDetail getFrameDetail(String id); + + /** + * + * @param job + * @param name + * @return + */ + FrameDetail findFrameDetail(JobInterface job, String name); + + /** + * Returns a minimal Frame from its ID + * + * @param id + * @return Frame + */ + FrameInterface getFrame(String id); + + /** + * Finds a minimal frame from its job and frame name + * + * @param job + * @param name + * @return Frame + */ + FrameInterface findFrame(JobInterface job, String name); + + /** + * Finds a minimal frame from its layer and number. + * + * @param job + * @param name + * @return Frame + */ + FrameInterface findFrame(LayerInterface layer, int number); + + /** + * Find a list of minimal frames from a job and FrameLookupRequest. + * + * @param job + * @param r + * @return List + */ + List findFrames(FrameSearchInterface r); + + /** + * Find a list of FrameDetail objects from a job and FrameLookupRequest. + * + * @param job + * @param r + * @return List + */ + List findFrameDetails(FrameSearchInterface r); + + /** + * Updates the specified frame's state. + * + * @param frame + * @param state + */ + boolean updateFrameState(FrameInterface frame, FrameState state); + + /** + * Updates a frame to indicate its now running. + * + * @param proc + * @param frame + * @return + */ + void updateFrameStarted(VirtualProc proc, FrameInterface frame); + + /** + * Updates a frame to the stopped state. The frame MUST be in the Running state to be stopped. + * + * @param proc + * @param frame + * @param report + */ + boolean updateFrameStopped(FrameInterface frame, FrameState state, int exitStatus); + + /** + * Updates a frame to the stopped state. The frame MUST be in the Running state to be stopped. + * + * @param frame + * @param state + * @param exitStatus + * @param maxRss + * @return + */ + boolean updateFrameStopped(FrameInterface frame, FrameState state, int exitStatus, long maxRss); + + /** + * Sets a frame to an unreserved waiting state. + * + * @param frame + * @return + */ + boolean updateFrameCleared(FrameInterface frame); + + /** + * Sets a frame exitStatus to EXIT_STATUS_MEMORY_FAILURE + * + * @param frame + * @return whether the frame has been updated + */ + boolean updateFrameMemoryError(FrameInterface frame); + + /** + * Sets a frame to an unreserved waiting state. + * + * @param frame + * @return + */ + boolean updateFrameHostDown(FrameInterface frame); + + /** + * Returns a DispatchFrame object from the frame's uinique ID. + * + * @param uuid + * @return DispatchFrame + */ + DispatchFrame getDispatchFrame(String uuid); + + /** + * Set the specified frame to the Waiting state and its depend count to 0. + * + * @param frame + */ + void markFrameAsWaiting(FrameInterface frame); + + /** + * If the specified frame has active dependencies, reset the dependency count and set the frame + * state to Depend + * + * @param frame + */ + void markFrameAsDepend(FrameInterface frame); + + /** + * Reverses the specified frame range. The revese layer implementation is is more intensive than + * other reorder operations because we look up the dispatch order for each frame and then switch + * them. + * + * @param layer + * @param frameSet + */ + public void reorderLayerReverse(LayerInterface layer, FrameSet frameSet); + + /** + * + * Reorders specified frames to the end of the dispatch order. This works by finding the frame + * with the highest dispatch value, and updating the specified frames with higher values. The + * rest of the frames in the layer are not touched. + * + * @param layer + * @param frameSet + */ + public void reorderFramesLast(LayerInterface layer, FrameSet frameSet); + + /** + * Reorders specified frames to the top of the dispatch order. This works by finding the frame + * with the lowest dispatch order and updating targeted frames with even lower dispatcher + * orders, negative numbers are allowed. + * + * @param layer + * @param frameSet + */ + public void reorderFramesFirst(LayerInterface layer, FrameSet frameSet); + + /** + * This would reorder frames so that it would render the specified sequence on a staggered frame + * range. The frame set must be a staggered range. + * + * @param layer + * @param frameSet + */ + public void staggerLayer(LayerInterface layer, String range, int stagger); + + /** + * Returns a list of Running frames that have not had a proc assigned to them in over 5 min. + * This can happen when an operation aborts due to a deadlock. + * + * @return + */ + List getOrphanedFrames(); + + /** + * Return a list of all frames that have positive dependency counts for the specified + * dependency. + * + * @param depend + * @return + */ + List getDependentFrames(LightweightDependency depend); + + /** + * Returns true if the frame is succeeded. + * + * @param f + * @return + */ + public boolean isFrameComplete(FrameInterface f); + + /** + * Attempts to fix the case where a proc is assigned to a frame but the frame is in the waiting + * state. + * + * @param proc + * @param frame + * @return + */ + boolean updateFrameFixed(VirtualProc proc, FrameInterface frame); + + /** + * Return a ResourceUsage object which repesents the amount of clock and core time the frame has + * used up until this point. + * + * @param f + * @return + */ + ResourceUsage getResourceUsage(FrameInterface f); + + /** + * Update memory usage values and LLU time for the given frame. The frame must be in the Running + * state. If the frame is locked by another thread, the process is aborted because we'll most + * likely get a new update one minute later. + * + * @param f + * @param maxRss + * @param rss + * @param lluTime + * @throws FrameReservationException if the frame is locked by another thread. + */ + void updateFrameMemoryUsageAndLluTime(FrameInterface f, long maxRss, long rss, long lluTime); + + /** + * Attempt to put a exclusive row lock on the given frame. The frame must be in the specified + * state. + * + * @param frame + * @param state + * @throws FrameReservationException if the frame changes state before the lock can be applied. + */ + void lockFrameForUpdate(FrameInterface frame, FrameState state); + + /** + * Return true if the specified frame is an orphan. + * + * @param frame + * @return + */ + boolean isOrphan(FrameInterface frame); + + /** + * Update a frame's checkpoint state status. + * + * @param frame + * @param state + * @return + */ + boolean updateFrameCheckpointState(FrameInterface frame, CheckpointState state); + + /** + * Return a list of checkpoints that have failed to report back in within a certain cutoff time. + * + * @param cutoffTime + * @return + */ + List getStaleCheckpoints(int cutoffTimeMs); + + /** + * Create a frame state display override. + * + * @param frameId String + * @param override FrameStateDisplayOverride + */ + void setFrameStateDisplayOverride(String frameId, FrameStateDisplayOverride override); + + /** + * Get the frame overrides for a specific frame + * + * @param frameId + * @return List + */ + FrameStateDisplayOverrideSeq getFrameStateDisplayOverrides(String frameId); + + /** + * Update a frame override with new text/color + * + * @param frameId + * @param override FrameStateDisplayOverride + */ + void updateFrameStateDisplayOverride(String frameId, FrameStateDisplayOverride override); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/GroupDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/GroupDao.java index cedae2516..be6f0a5e4 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/GroupDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/GroupDao.java @@ -30,205 +30,205 @@ */ public interface GroupDao { - /** - * returns the group from its unique id - * - * @param id - * @return - */ - GroupInterface getGroup(String id); - - /** - * returns a list of groups using their unique ids - * - * @param id - * @return - */ - List getGroups(List id); - - /** - * - * @param show - * @return - */ - GroupDetail getRootGroupDetail(ShowInterface show); - - /** - * Returns the show's root group. - * - * @param show - * @return - */ - String getRootGroupId(ShowInterface show); - - /** - * Insert group into specified parent - * - * @param group - */ - void insertGroup(GroupDetail group, GroupInterface parent); - - /** - * - * @param group - */ - void insertGroup(GroupDetail group); - - /** - * Updates the groups department. - * - * @param group - * @param dept - */ - void updateDepartment(GroupInterface group, DepartmentInterface dept); - - /** - * Removes the specified group. You cannot delete a group that contains jobs or other groups or - * the shows root folder. - * - * @param group - */ - void deleteGroup(GroupInterface group); - - /** - * Sets the group's new parent. Triggers will handle any recursive level changes. - * - * @param group - * @param parent - * - * @throws EntityModificationError throws this if the group is the top level group which cannot be - * parented to another group. - */ - void updateGroupParent(GroupInterface group, GroupInterface parent); - - /** - * Sets the maximum number of procs the group should be running. - * - * @param group - * @param value - */ - void updateDefaultJobMaxCores(GroupInterface group, int value); - - /** - * Sets the minimum number of procs the group should be running. - * - * @param group - * @param value - */ - void updateDefaultJobMinCores(GroupInterface group, int value); - - /** - * Sets the maximum number of cores for this group - * - * @param group - * @param value - */ - public void updateMaxCores(GroupInterface group, int value); - - /** - * Set the minimum number of cores for this group - * - * @param group - * @param value - */ - - public void updateMinCores(GroupInterface group, int value); - - /** - * Sets the maximum number of gpus the group should be running. - * - * @param group - * @param value - */ - void updateDefaultJobMaxGpus(GroupInterface group, int value); - - /** - * Sets the minimum number of gpus the group should be running. - * - * @param group - * @param value - */ - void updateDefaultJobMinGpus(GroupInterface group, int value); - - /** - * Sets the maximum number of gpus for this group - * - * @param group - * @param value - */ - public void updateMaxGpus(GroupInterface group, int value); - - /** - * Set the minimum number of gpus for this group - * - * @param group - * @param value - */ - - public void updateMinGpus(GroupInterface group, int value); - - /** - * Renames the group - * - * @param group - * @param value - */ - void updateName(GroupInterface group, String value); - - /** - * Updates a group's priority. - * - * @param group - * @param value - */ - void updateDefaultJobPriority(GroupInterface group, int value); - - /** - * Returns a full GroupDetail object from its unique id - * - * @param id - * @return - */ - GroupDetail getGroupDetail(String id); - - /** - * Returns a recursive list of a group's children - * - * @param group - * @return - */ - List getChildrenRecursive(GroupInterface group); - - /** - * - * Returns a list of a groups immediate children - * - * @param group - * @return - */ - List getChildren(GroupInterface group); - - /** - * Returns true if the group of the specified job is at or over its min proc - * - * @param job - * @return - */ - boolean isOverMinCores(JobInterface job); - - /** - * Returns true if the group is managed. - * - * @param group - * @return - */ - boolean isManaged(GroupInterface group); - - /** - * Return a GroupDetail for the specified job. - * - * @param job - * @return - */ - GroupDetail getGroupDetail(JobInterface job); + /** + * returns the group from its unique id + * + * @param id + * @return + */ + GroupInterface getGroup(String id); + + /** + * returns a list of groups using their unique ids + * + * @param id + * @return + */ + List getGroups(List id); + + /** + * + * @param show + * @return + */ + GroupDetail getRootGroupDetail(ShowInterface show); + + /** + * Returns the show's root group. + * + * @param show + * @return + */ + String getRootGroupId(ShowInterface show); + + /** + * Insert group into specified parent + * + * @param group + */ + void insertGroup(GroupDetail group, GroupInterface parent); + + /** + * + * @param group + */ + void insertGroup(GroupDetail group); + + /** + * Updates the groups department. + * + * @param group + * @param dept + */ + void updateDepartment(GroupInterface group, DepartmentInterface dept); + + /** + * Removes the specified group. You cannot delete a group that contains jobs or other groups or + * the shows root folder. + * + * @param group + */ + void deleteGroup(GroupInterface group); + + /** + * Sets the group's new parent. Triggers will handle any recursive level changes. + * + * @param group + * @param parent + * + * @throws EntityModificationError throws this if the group is the top level group which cannot + * be parented to another group. + */ + void updateGroupParent(GroupInterface group, GroupInterface parent); + + /** + * Sets the maximum number of procs the group should be running. + * + * @param group + * @param value + */ + void updateDefaultJobMaxCores(GroupInterface group, int value); + + /** + * Sets the minimum number of procs the group should be running. + * + * @param group + * @param value + */ + void updateDefaultJobMinCores(GroupInterface group, int value); + + /** + * Sets the maximum number of cores for this group + * + * @param group + * @param value + */ + public void updateMaxCores(GroupInterface group, int value); + + /** + * Set the minimum number of cores for this group + * + * @param group + * @param value + */ + + public void updateMinCores(GroupInterface group, int value); + + /** + * Sets the maximum number of gpus the group should be running. + * + * @param group + * @param value + */ + void updateDefaultJobMaxGpus(GroupInterface group, int value); + + /** + * Sets the minimum number of gpus the group should be running. + * + * @param group + * @param value + */ + void updateDefaultJobMinGpus(GroupInterface group, int value); + + /** + * Sets the maximum number of gpus for this group + * + * @param group + * @param value + */ + public void updateMaxGpus(GroupInterface group, int value); + + /** + * Set the minimum number of gpus for this group + * + * @param group + * @param value + */ + + public void updateMinGpus(GroupInterface group, int value); + + /** + * Renames the group + * + * @param group + * @param value + */ + void updateName(GroupInterface group, String value); + + /** + * Updates a group's priority. + * + * @param group + * @param value + */ + void updateDefaultJobPriority(GroupInterface group, int value); + + /** + * Returns a full GroupDetail object from its unique id + * + * @param id + * @return + */ + GroupDetail getGroupDetail(String id); + + /** + * Returns a recursive list of a group's children + * + * @param group + * @return + */ + List getChildrenRecursive(GroupInterface group); + + /** + * + * Returns a list of a groups immediate children + * + * @param group + * @return + */ + List getChildren(GroupInterface group); + + /** + * Returns true if the group of the specified job is at or over its min proc + * + * @param job + * @return + */ + boolean isOverMinCores(JobInterface job); + + /** + * Returns true if the group is managed. + * + * @param group + * @return + */ + boolean isManaged(GroupInterface group); + + /** + * Return a GroupDetail for the specified job. + * + * @param job + * @return + */ + GroupDetail getGroupDetail(JobInterface job); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/HistoricalDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/HistoricalDao.java index 907ca6ab1..17f568829 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/HistoricalDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/HistoricalDao.java @@ -21,19 +21,19 @@ public interface HistoricalDao { - /** - * Return all jobs that have been finished longer than the specified cut off in hours. - * - * @param cutoffHours - * @return - */ - List getFinishedJobs(int cutoffHours); - - /** - * Transfer a job from the live tables to the historical tables. - * - * @param job - */ - void transferJob(JobInterface job); + /** + * Return all jobs that have been finished longer than the specified cut off in hours. + * + * @param cutoffHours + * @return + */ + List getFinishedJobs(int cutoffHours); + + /** + * Transfer a job from the live tables to the historical tables. + * + * @param job + */ + void transferJob(JobInterface job); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/HostDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/HostDao.java index 9af9bd8ca..dfd0397a2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/HostDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/HostDao.java @@ -35,288 +35,289 @@ */ public interface HostDao { - /** - * Attempt to obtain an exclusive lock on the host. If another thread alrady has the host locked, - * a ResourceReservationFailureException is thrown. - * - * @param host HostInterface - * @throws ResourceReservationFailureException when an exclusive lock cannot be made. - */ - void lockForUpdate(HostInterface host); - - /** - * returns true if the specified host id is locked - * - * @param host HostInterface - * @return Boolean - */ - boolean isHostLocked(HostInterface host); - - /** - * deletes the passed host - * - * @param host HostInterface object to delete - */ - void deleteHost(HostInterface host); - - /** - * deletes the down state hosts - */ - void deleteDownHosts(); - - /** - * updates a host with the passed hardware state - * - * @param host HostInterface - * @param state HardwareState - */ - void updateHostState(HostInterface host, HardwareState state); - - /** - * updates a host with the passed free temporary directory - * - * @param host - * @param freeTempDir - */ - void updateHostFreeTempDir(HostInterface host, Long freeTempDir); - - /** - * returns a full host detail - * - * @param host HostInterface - * @return HostDetail - */ - HostEntity getHostDetail(HostInterface host); - - /** - * returns full host detail - * - * @param id String - * @return HostEntity - */ - HostEntity getHostDetail(String id); - - /** - * returns full host detail - * - * @param name String - * @return HostEntity - */ - HostEntity findHostDetail(String name); - - /** - * Return a DispatchHost object from its unique host name - * - * @param fqdn String - * @return DispatchHost - */ - DispatchHost findDispatchHost(String fqdn); - - /** - * Return a dispatch host object by id - * - * @param id String - * @return DispatchHost - */ - DispatchHost getDispatchHost(String id); - - /** - * Returns a host object by name - * - * @param name String - * @return HostInterface - */ - HostInterface findHost(String name); - - /** - * Returns a host object by ID. - * - * @param id String - * @return HostInterface - */ - HostInterface getHost(String id); - - /** - * Return the host involved with the given LocalJobAssignment. - * - * @param l LocalHostAssignment - * @return HostInterface - */ - HostInterface getHost(LocalHostAssignment l); - - /** - * Inserts a render host and its supporting procs into an allocation. - * - * @param report RenderHost - * @param a AllocationInterface - * @param useLongNames boolean - */ - void insertRenderHost(RenderHost report, AllocationInterface a, boolean useLongNames); - - /** - * Checks to see if a render host exists by name and returns true if it does, false if it doesn't. - * - * @param hostname String - * @return boolean - */ - boolean hostExists(String hostname); - - /** - * Updates the host's lock state. Open, Locked, NimbyLocked. Records the source of the lock. - * - * @param host HostInterface - * @param state LockState - * @param source Source - */ - void updateHostLock(HostInterface host, LockState state, Source source); - - /** - * Sets the reboot when idle boolean to true or false. If true the cue will issue the reboot - * command to hosts that ping in idle then set the flag back to false. - * - * @param host HostInterface - * @param enabled boolean - */ - void updateHostRebootWhenIdle(HostInterface host, boolean enabled); - - /** - * Updates a host's allocation - * - * @param host HostInterface - * @param alloc AllocationInterface - */ - void updateHostSetAllocation(HostInterface host, AllocationInterface alloc); - - /** - * - * @param id String - * @param tag String - * @param type HostTagType - */ - void tagHost(String id, String tag, HostTagType type); - - /** - * - * @param host HostInterface - * @param tag String - * @param type HostTagType - */ - void tagHost(HostInterface host, String tag, HostTagType type); - - /** - * - * @param host HostInterface - * @param type HostTagType - */ - void removeTagsByType(HostInterface host, HostTagType type); - - /** - * removes a tag - * - * @param host HostInterface - * @param tag String - */ - void removeTag(HostInterface host, String tag); - - /** - * renames a tag from oldTag to newTag - * - * @param host HostInterface - * @param oldTag String - * @param newTag String - */ - void renameTag(HostInterface host, String oldTag, String newTag); - - /** - * You must run this AFTER you've changed any type of job tags. The reason this is not a trigger - * or something of that nature is because is an intense process. - * - * @param id String - */ - void recalcuateTags(final String id); - - /** - * - * @param host HostInterface - * @param mode ThreadMode - */ - void updateThreadMode(HostInterface host, ThreadMode mode); - - /** - * Update the specified host's hardware information. - * - * @param host HostInterface - * @param totalMemory long - * @param freeMemory long - * @param totalSwap long - * @param freeSwap long - * @param totalMcp long - * @param freeMcp long - * @param totalGpuMemory long - * @param freeGpuMemory long - * @param load int - * @param os String - */ - void updateHostStats(HostInterface host, long totalMemory, long freeMemory, long totalSwap, - long freeSwap, long totalMcp, long freeMcp, long totalGpuMemory, long freeGpuMemory, int load, - Timestamp bootTime, String os); - - /** - * Return true if the HardwareState is Up, false if it is anything else. - * - * @param host HostInterface - * @return boolean - */ - boolean isHostUp(HostInterface host); - - /** - * Return the number of whole stranded cores on this host. The must have less than - * Dispacher.MEM_STRANDED_THRESHHOLD for the cores to be considered stranded. - * - * @param h HostInterface - * @return int - */ - int getStrandedCoreUnits(HostInterface h); - - /** - * Return the number of whole stranded gpus on this host. The must have less than - * Dispacher.MEM_STRANDED_THRESHHOLD for the gpus to be considered stranded. - * - * @param h HostInterface - * @return int - */ - int getStrandedGpus(HostInterface h); - - /** - * Return true if the host is preferring a particular show. - * - * @param h HostInterface - * @return boolean - */ - boolean isPreferShow(HostInterface h); - - /** - * Return true if the host is a NIMBY host. - * - * @param h HostInterface - * @return boolean - */ - boolean isNimbyHost(HostInterface h); - - /** - * Update the host's operating system setting. - * - * @param host HostInterface - * @param os String - */ - void updateHostOs(HostInterface host, String os); - - /** - * Update a host's resource pool using the latest host report. - * - * @param host HostInterface - * @param report HostReport - */ - void updateHostResources(HostInterface host, HostReport report); + /** + * Attempt to obtain an exclusive lock on the host. If another thread alrady has the host + * locked, a ResourceReservationFailureException is thrown. + * + * @param host HostInterface + * @throws ResourceReservationFailureException when an exclusive lock cannot be made. + */ + void lockForUpdate(HostInterface host); + + /** + * returns true if the specified host id is locked + * + * @param host HostInterface + * @return Boolean + */ + boolean isHostLocked(HostInterface host); + + /** + * deletes the passed host + * + * @param host HostInterface object to delete + */ + void deleteHost(HostInterface host); + + /** + * deletes the down state hosts + */ + void deleteDownHosts(); + + /** + * updates a host with the passed hardware state + * + * @param host HostInterface + * @param state HardwareState + */ + void updateHostState(HostInterface host, HardwareState state); + + /** + * updates a host with the passed free temporary directory + * + * @param host + * @param freeTempDir + */ + void updateHostFreeTempDir(HostInterface host, Long freeTempDir); + + /** + * returns a full host detail + * + * @param host HostInterface + * @return HostDetail + */ + HostEntity getHostDetail(HostInterface host); + + /** + * returns full host detail + * + * @param id String + * @return HostEntity + */ + HostEntity getHostDetail(String id); + + /** + * returns full host detail + * + * @param name String + * @return HostEntity + */ + HostEntity findHostDetail(String name); + + /** + * Return a DispatchHost object from its unique host name + * + * @param fqdn String + * @return DispatchHost + */ + DispatchHost findDispatchHost(String fqdn); + + /** + * Return a dispatch host object by id + * + * @param id String + * @return DispatchHost + */ + DispatchHost getDispatchHost(String id); + + /** + * Returns a host object by name + * + * @param name String + * @return HostInterface + */ + HostInterface findHost(String name); + + /** + * Returns a host object by ID. + * + * @param id String + * @return HostInterface + */ + HostInterface getHost(String id); + + /** + * Return the host involved with the given LocalJobAssignment. + * + * @param l LocalHostAssignment + * @return HostInterface + */ + HostInterface getHost(LocalHostAssignment l); + + /** + * Inserts a render host and its supporting procs into an allocation. + * + * @param report RenderHost + * @param a AllocationInterface + * @param useLongNames boolean + */ + void insertRenderHost(RenderHost report, AllocationInterface a, boolean useLongNames); + + /** + * Checks to see if a render host exists by name and returns true if it does, false if it + * doesn't. + * + * @param hostname String + * @return boolean + */ + boolean hostExists(String hostname); + + /** + * Updates the host's lock state. Open, Locked, NimbyLocked. Records the source of the lock. + * + * @param host HostInterface + * @param state LockState + * @param source Source + */ + void updateHostLock(HostInterface host, LockState state, Source source); + + /** + * Sets the reboot when idle boolean to true or false. If true the cue will issue the reboot + * command to hosts that ping in idle then set the flag back to false. + * + * @param host HostInterface + * @param enabled boolean + */ + void updateHostRebootWhenIdle(HostInterface host, boolean enabled); + + /** + * Updates a host's allocation + * + * @param host HostInterface + * @param alloc AllocationInterface + */ + void updateHostSetAllocation(HostInterface host, AllocationInterface alloc); + + /** + * + * @param id String + * @param tag String + * @param type HostTagType + */ + void tagHost(String id, String tag, HostTagType type); + + /** + * + * @param host HostInterface + * @param tag String + * @param type HostTagType + */ + void tagHost(HostInterface host, String tag, HostTagType type); + + /** + * + * @param host HostInterface + * @param type HostTagType + */ + void removeTagsByType(HostInterface host, HostTagType type); + + /** + * removes a tag + * + * @param host HostInterface + * @param tag String + */ + void removeTag(HostInterface host, String tag); + + /** + * renames a tag from oldTag to newTag + * + * @param host HostInterface + * @param oldTag String + * @param newTag String + */ + void renameTag(HostInterface host, String oldTag, String newTag); + + /** + * You must run this AFTER you've changed any type of job tags. The reason this is not a trigger + * or something of that nature is because is an intense process. + * + * @param id String + */ + void recalcuateTags(final String id); + + /** + * + * @param host HostInterface + * @param mode ThreadMode + */ + void updateThreadMode(HostInterface host, ThreadMode mode); + + /** + * Update the specified host's hardware information. + * + * @param host HostInterface + * @param totalMemory long + * @param freeMemory long + * @param totalSwap long + * @param freeSwap long + * @param totalMcp long + * @param freeMcp long + * @param totalGpuMemory long + * @param freeGpuMemory long + * @param load int + * @param os String + */ + void updateHostStats(HostInterface host, long totalMemory, long freeMemory, long totalSwap, + long freeSwap, long totalMcp, long freeMcp, long totalGpuMemory, long freeGpuMemory, + int load, Timestamp bootTime, String os); + + /** + * Return true if the HardwareState is Up, false if it is anything else. + * + * @param host HostInterface + * @return boolean + */ + boolean isHostUp(HostInterface host); + + /** + * Return the number of whole stranded cores on this host. The must have less than + * Dispacher.MEM_STRANDED_THRESHHOLD for the cores to be considered stranded. + * + * @param h HostInterface + * @return int + */ + int getStrandedCoreUnits(HostInterface h); + + /** + * Return the number of whole stranded gpus on this host. The must have less than + * Dispacher.MEM_STRANDED_THRESHHOLD for the gpus to be considered stranded. + * + * @param h HostInterface + * @return int + */ + int getStrandedGpus(HostInterface h); + + /** + * Return true if the host is preferring a particular show. + * + * @param h HostInterface + * @return boolean + */ + boolean isPreferShow(HostInterface h); + + /** + * Return true if the host is a NIMBY host. + * + * @param h HostInterface + * @return boolean + */ + boolean isNimbyHost(HostInterface h); + + /** + * Update the host's operating system setting. + * + * @param host HostInterface + * @param os String + */ + void updateHostOs(HostInterface host, String os); + + /** + * Update a host's resource pool using the latest host report. + * + * @param host HostInterface + * @param report HostReport + */ + void updateHostResources(HostInterface host, HostReport report); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/JobDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/JobDao.java index 3fbfd2651..e99ee92fa 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/JobDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/JobDao.java @@ -37,444 +37,444 @@ public interface JobDao { - /** - * Updates all jobs in the speficed group to the max cores value. - * - * @param g - * @param cores - */ - public void updateMaxCores(GroupInterface g, int cores); - - /** - * Updates all jobs in the specifid group to the min cores value. - * - * @param g - * @param cores - */ - public void updateMinCores(GroupInterface g, int cores); - - /** - * Updates all jobs in the speficed group to the max gpu value. - * - * @param g - * @param gpu - */ - public void updateMaxGpus(GroupInterface g, int gpus); - - /** - * Updates all jobs in the specifid group to the min gpu value. - * - * @param g - * @param gpu - */ - public void updateMinGpus(GroupInterface g, int gpus); - - /** - * Updates all jobs in the specified group to the set priority. - * - * @param g - * @param priority - */ - public void updatePriority(GroupInterface g, int priority); - - /** - * Updates a jobs parent group to specified group - * - * @param job - * @param group - */ - void updateParent(JobInterface job, GroupDetail group, Inherit[] inherit); - - /** - * Returns an execution summary for the specified job. - * - * @param job - * @return - */ - ExecutionSummary getExecutionSummary(JobInterface job); - - /** - * returns a FrameStateTotals object with all of the job's frame state totals. - * - * @param job - * @return - */ - FrameStateTotals getFrameStateTotals(JobInterface job); - - /** - * Returns a DispatchJob from its unique id - * - * @param uuid - * @return - */ - DispatchJob getDispatchJob(String uuid); - - /** - * Returns true if the job has no more frames that can possibly be dispatched. - * - * @param job - * @return - */ - boolean isJobComplete(JobInterface job); - - /** - * Inserts a JobDetail. The job will not be pending until its activated. - * - * @param j - */ - void insertJob(JobDetail j, JobLogUtil jobLogUtil); - - /** - * Finds a Job from its name. This method returns only the current running job. - * - * @param name - * @return - */ - JobInterface findJob(String name); - - /** - * Finds a JobDetail from its name. This method returns only the current running job. - * - * @param name - * @return - */ - JobDetail findJobDetail(String name); - - /** - * Gets a JobDetail from its unique ID - * - * @param id - * @return - */ - JobDetail getJobDetail(String id); - - /** - * Returns a job by its ID - * - * @param id - * @return - */ - JobInterface getJob(String id); - - /** - * Returns a list of jobs assigned to a specific task. - * - * @param idl - * @return - */ - List getJobs(TaskEntity t); - - /** - * Finds all the jobs in a show. - * - * @param show - * @return - */ - List findJobs(ShowInterface show); - - /** - * - * @param group - * @return - */ - List findJobs(GroupInterface group); - - /** - * Returns true if an active job with the specified name exists - * - * @param name - * @return - */ - boolean exists(String name); - - /** - * Deletes specified job from DB - * - * @param job - */ - void deleteJob(JobInterface job); - - /** - * Activate job in lauching state. - * - * @param job - */ - void activateJob(JobInterface job, JobState jobState); - - /** - * updates the state of a job with new job state - * - * @param job - * @param state - */ - void updateState(JobInterface job, JobState state); - - /** - * updates a job to the finished state. returns true if the job was updated - * - * @param job - */ - boolean updateJobFinished(JobInterface job); - - /** - * reteurns true if job is over its minimum proc - * - * @param job - * @return boolean - */ - boolean isOverMinCores(JobInterface job); - - /** - * returns true if a job has pending frames. - * - * @param job - * @return - */ - boolean hasPendingFrames(JobInterface job); - - /** - * returns true if job is over max procs - * - * @param job - * @return - */ - boolean isOverMaxCores(JobInterface job); - - /** - * returns true if job is at its max proc - * - * @param job - * @return - */ - boolean isAtMaxCores(JobInterface job); - - /** - * Return true if adding given core units to the job will set the job over its max core value. - * - * @param job - * @param coreUnits - * @return - */ - boolean isOverMaxCores(JobInterface job, int coreUnits); - - /** - * returns true if job is over max gpus - * - * @param job - * @return - */ - boolean isOverMaxGpus(JobInterface job); - - /** - * returns true if job is at its max gpus - * - * @param job - * @return - */ - boolean isAtMaxGpus(JobInterface job); - - /** - * Return true if adding given gpus to the job will set the job over its max gpus value. - * - * @param job - * @param gpus - * @return - */ - boolean isOverMaxGpus(JobInterface job, int gpus); - - /** - * sets the jobs new priority value - * - * @param j - * @param v - */ - void updatePriority(JobInterface j, int v); - - /** - * sets the jobs new min proc value - * - * @param j - * @param v - */ - void updateMinCores(JobInterface j, int v); - - /** - * sets the jobs new max proc value - * - * @param j - * @param v - */ - void updateMaxCores(JobInterface j, int v); - - /** - * sets the jobs new min gpu value - * - * @param j - * @param v - */ - void updateMinGpus(JobInterface j, int v); - - /** - * sets the jobs new max gpu value - * - * @param j - * @param v - */ - void updateMaxGpus(JobInterface j, int v); - - /** - * Update a job's paused state - * - * @param j - * @param b - */ - void updatePaused(JobInterface j, boolean b); - - /** - * Update a jobs auto-eat state - * - * @param j - * @param b - */ - void updateAutoEat(JobInterface j, boolean b); - - /** - * Updates the int_max_retries column with the value of max_retries. Checks to make sure - * max_retries is greater than 0 and less than or equal to MAX_FRAME_RETRIES - * - * @param Job - * @param max_retries - */ - void updateMaxFrameRetries(JobInterface j, int max_retries); - - /** - * Inserts a map into the job's env table - * - * - * @param job - * @param env - */ - void insertEnvironment(JobInterface job, Map env); - - /** - * Update jobs max RSS. Only updates if the passed in value is greater than the current value of - * int_max_rss - * - * @param job - * @param env - */ - void updateMaxRSS(JobInterface job, long maxRss); - - /** - * Inserts a key/value pair into the jobs env table - * - * @param job - * @param key - * @param value - */ - void insertEnvironment(JobInterface job, String key, String value); - - /** - * Grabs the job environment - * - * @param job - * @return - */ - Map getEnvironment(JobInterface job); - - /** - * Updates the job's log path in the DB. This doesn't touch the file system. - * - * @param job - * @param path - */ - public void updateLogPath(JobInterface job, String path); - - /** - * - * @param name - * @return - */ - public JobDetail findLastJob(String name); - - /** - * Returns true of the cue has some pending jobs - * - * @return - */ - public boolean cueHasPendingJobs(FacilityInterface f); - - /** - * Enables/disables autobooking for specified job. - * - * @param value - */ - public void enableAutoBooking(JobInterface job, boolean value); - - /** - * Enables/disables auto unbooking for specified job. - * - * @param job - * @param value - */ - void enableAutoUnBooking(JobInterface job, boolean value); - - /** - * Maps the post job to the specified job - * - * @param job - */ - void mapPostJob(BuildableJob job); - - /** - * Activates the specified job's post job - * - * @param job - */ - void activatePostJob(JobInterface job); - - /** - * Update all jobs in the specified group to the specified department. - * - * @param group - */ - void updateDepartment(GroupInterface group, DepartmentInterface dept); - - /** - * Update the specified job to the specified department. - * - * @param group - */ - void updateDepartment(JobInterface job, DepartmentInterface dept); - - /** - * Set the job's new parent. The job will automatically inherit all relevant settings from the - * group. - * - * @param job - * @param dest - */ - void updateParent(JobInterface job, GroupDetail dest); - - /** - * Update layer usage with processor time usage. This happens when the proc has completed or - * failed some work. - * - * @param proc - * @param newState - */ - void updateUsage(JobInterface job, ResourceUsage usage, int exitStatus); - - /** - * Returns true if the job is launching - * - * @param j - * @return - */ - boolean isLaunching(JobInterface j); - - void updateEmail(JobInterface job, String email); - - String getEmail(JobInterface job); + /** + * Updates all jobs in the speficed group to the max cores value. + * + * @param g + * @param cores + */ + public void updateMaxCores(GroupInterface g, int cores); + + /** + * Updates all jobs in the specifid group to the min cores value. + * + * @param g + * @param cores + */ + public void updateMinCores(GroupInterface g, int cores); + + /** + * Updates all jobs in the speficed group to the max gpu value. + * + * @param g + * @param gpu + */ + public void updateMaxGpus(GroupInterface g, int gpus); + + /** + * Updates all jobs in the specifid group to the min gpu value. + * + * @param g + * @param gpu + */ + public void updateMinGpus(GroupInterface g, int gpus); + + /** + * Updates all jobs in the specified group to the set priority. + * + * @param g + * @param priority + */ + public void updatePriority(GroupInterface g, int priority); + + /** + * Updates a jobs parent group to specified group + * + * @param job + * @param group + */ + void updateParent(JobInterface job, GroupDetail group, Inherit[] inherit); + + /** + * Returns an execution summary for the specified job. + * + * @param job + * @return + */ + ExecutionSummary getExecutionSummary(JobInterface job); + + /** + * returns a FrameStateTotals object with all of the job's frame state totals. + * + * @param job + * @return + */ + FrameStateTotals getFrameStateTotals(JobInterface job); + + /** + * Returns a DispatchJob from its unique id + * + * @param uuid + * @return + */ + DispatchJob getDispatchJob(String uuid); + + /** + * Returns true if the job has no more frames that can possibly be dispatched. + * + * @param job + * @return + */ + boolean isJobComplete(JobInterface job); + + /** + * Inserts a JobDetail. The job will not be pending until its activated. + * + * @param j + */ + void insertJob(JobDetail j, JobLogUtil jobLogUtil); + + /** + * Finds a Job from its name. This method returns only the current running job. + * + * @param name + * @return + */ + JobInterface findJob(String name); + + /** + * Finds a JobDetail from its name. This method returns only the current running job. + * + * @param name + * @return + */ + JobDetail findJobDetail(String name); + + /** + * Gets a JobDetail from its unique ID + * + * @param id + * @return + */ + JobDetail getJobDetail(String id); + + /** + * Returns a job by its ID + * + * @param id + * @return + */ + JobInterface getJob(String id); + + /** + * Returns a list of jobs assigned to a specific task. + * + * @param idl + * @return + */ + List getJobs(TaskEntity t); + + /** + * Finds all the jobs in a show. + * + * @param show + * @return + */ + List findJobs(ShowInterface show); + + /** + * + * @param group + * @return + */ + List findJobs(GroupInterface group); + + /** + * Returns true if an active job with the specified name exists + * + * @param name + * @return + */ + boolean exists(String name); + + /** + * Deletes specified job from DB + * + * @param job + */ + void deleteJob(JobInterface job); + + /** + * Activate job in lauching state. + * + * @param job + */ + void activateJob(JobInterface job, JobState jobState); + + /** + * updates the state of a job with new job state + * + * @param job + * @param state + */ + void updateState(JobInterface job, JobState state); + + /** + * updates a job to the finished state. returns true if the job was updated + * + * @param job + */ + boolean updateJobFinished(JobInterface job); + + /** + * reteurns true if job is over its minimum proc + * + * @param job + * @return boolean + */ + boolean isOverMinCores(JobInterface job); + + /** + * returns true if a job has pending frames. + * + * @param job + * @return + */ + boolean hasPendingFrames(JobInterface job); + + /** + * returns true if job is over max procs + * + * @param job + * @return + */ + boolean isOverMaxCores(JobInterface job); + + /** + * returns true if job is at its max proc + * + * @param job + * @return + */ + boolean isAtMaxCores(JobInterface job); + + /** + * Return true if adding given core units to the job will set the job over its max core value. + * + * @param job + * @param coreUnits + * @return + */ + boolean isOverMaxCores(JobInterface job, int coreUnits); + + /** + * returns true if job is over max gpus + * + * @param job + * @return + */ + boolean isOverMaxGpus(JobInterface job); + + /** + * returns true if job is at its max gpus + * + * @param job + * @return + */ + boolean isAtMaxGpus(JobInterface job); + + /** + * Return true if adding given gpus to the job will set the job over its max gpus value. + * + * @param job + * @param gpus + * @return + */ + boolean isOverMaxGpus(JobInterface job, int gpus); + + /** + * sets the jobs new priority value + * + * @param j + * @param v + */ + void updatePriority(JobInterface j, int v); + + /** + * sets the jobs new min proc value + * + * @param j + * @param v + */ + void updateMinCores(JobInterface j, int v); + + /** + * sets the jobs new max proc value + * + * @param j + * @param v + */ + void updateMaxCores(JobInterface j, int v); + + /** + * sets the jobs new min gpu value + * + * @param j + * @param v + */ + void updateMinGpus(JobInterface j, int v); + + /** + * sets the jobs new max gpu value + * + * @param j + * @param v + */ + void updateMaxGpus(JobInterface j, int v); + + /** + * Update a job's paused state + * + * @param j + * @param b + */ + void updatePaused(JobInterface j, boolean b); + + /** + * Update a jobs auto-eat state + * + * @param j + * @param b + */ + void updateAutoEat(JobInterface j, boolean b); + + /** + * Updates the int_max_retries column with the value of max_retries. Checks to make sure + * max_retries is greater than 0 and less than or equal to MAX_FRAME_RETRIES + * + * @param Job + * @param max_retries + */ + void updateMaxFrameRetries(JobInterface j, int max_retries); + + /** + * Inserts a map into the job's env table + * + * + * @param job + * @param env + */ + void insertEnvironment(JobInterface job, Map env); + + /** + * Update jobs max RSS. Only updates if the passed in value is greater than the current value of + * int_max_rss + * + * @param job + * @param env + */ + void updateMaxRSS(JobInterface job, long maxRss); + + /** + * Inserts a key/value pair into the jobs env table + * + * @param job + * @param key + * @param value + */ + void insertEnvironment(JobInterface job, String key, String value); + + /** + * Grabs the job environment + * + * @param job + * @return + */ + Map getEnvironment(JobInterface job); + + /** + * Updates the job's log path in the DB. This doesn't touch the file system. + * + * @param job + * @param path + */ + public void updateLogPath(JobInterface job, String path); + + /** + * + * @param name + * @return + */ + public JobDetail findLastJob(String name); + + /** + * Returns true of the cue has some pending jobs + * + * @return + */ + public boolean cueHasPendingJobs(FacilityInterface f); + + /** + * Enables/disables autobooking for specified job. + * + * @param value + */ + public void enableAutoBooking(JobInterface job, boolean value); + + /** + * Enables/disables auto unbooking for specified job. + * + * @param job + * @param value + */ + void enableAutoUnBooking(JobInterface job, boolean value); + + /** + * Maps the post job to the specified job + * + * @param job + */ + void mapPostJob(BuildableJob job); + + /** + * Activates the specified job's post job + * + * @param job + */ + void activatePostJob(JobInterface job); + + /** + * Update all jobs in the specified group to the specified department. + * + * @param group + */ + void updateDepartment(GroupInterface group, DepartmentInterface dept); + + /** + * Update the specified job to the specified department. + * + * @param group + */ + void updateDepartment(JobInterface job, DepartmentInterface dept); + + /** + * Set the job's new parent. The job will automatically inherit all relevant settings from the + * group. + * + * @param job + * @param dest + */ + void updateParent(JobInterface job, GroupDetail dest); + + /** + * Update layer usage with processor time usage. This happens when the proc has completed or + * failed some work. + * + * @param proc + * @param newState + */ + void updateUsage(JobInterface job, ResourceUsage usage, int exitStatus); + + /** + * Returns true if the job is launching + * + * @param j + * @return + */ + boolean isLaunching(JobInterface j); + + void updateEmail(JobInterface job, String email); + + String getEmail(JobInterface job); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/LayerDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/LayerDao.java index 06d83737f..5d5433ada 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/LayerDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/LayerDao.java @@ -31,417 +31,418 @@ public interface LayerDao { - /** - * - * @param layer - * @return - */ - public ExecutionSummary getExecutionSummary(LayerInterface layer); - - /** - * return the frame state totals for the specified layer - * - * @param layer - * @return - */ - public FrameStateTotals getFrameStateTotals(LayerInterface layer); - - /** - * returns a list of layers by job - * - * @param job - * @return - */ - public List getLayerDetails(JobInterface job); - - /** - * Returns true if supplied layer is complete. - * - * @param layer - * @return boolean - */ - boolean isLayerComplete(LayerInterface layer); - - /** - * Returns true if supplied layer is dispatchable. - * - * @param l - * @return boolean - */ - boolean isLayerDispatchable(LayerInterface l); - - /** - * Inserts a LayerDetail - * - * @param l - */ - void insertLayerDetail(LayerDetail l); - - /** - * gets a layer detail from an object that implements layer - * - * @param layer - * @return LayerDetail - */ - LayerDetail getLayerDetail(LayerInterface layer); - - /** - * get layer detail from the the unique id - * - * @param id - * @return - */ - LayerDetail getLayerDetail(String id); - - /** - * get a layer detail from the job and layer name - * - * @param job - * @param name - * @return - */ - LayerDetail findLayerDetail(JobInterface job, String name); - - /** - * Get a minimal layer from the layer id - * - * @param id - * @return - */ - LayerInterface getLayer(String id); - - /** - * Find a minimal layer from the job and layer name - * - * @param job - * @param name - * @return - */ - LayerInterface findLayer(JobInterface job, String name); - - /** - * update the number of min cores the layer requires - * - * @param layer - * @param val - */ - void updateLayerMinCores(LayerInterface layer, int val); - - /** - * update the number of gpus the layer requires - * - * @param layer - * @param val - */ - void updateLayerMinGpus(LayerInterface layer, int val); - - /** - * update the amount of memory required by all subsequent running frames in the specified layer. - * - * @param layer - * @param val - */ - void updateLayerMinMemory(LayerInterface layer, long kb); - - /** - * update the amount of gpu memory in kb required by all subsequent running frames in the - * specified layer. - * - * @param layer - * @param val - */ - void updateLayerMinGpuMemory(LayerInterface layer, long val); - - /** - * Update a layer with new host tags. - * - * @param layer - * @param val - */ - void updateLayerTags(LayerInterface layer, Set tags); - - /** - * Insert a key/value pair into the layer environment - * - * @param layer - * @param key - * @param value - */ - void insertLayerEnvironment(LayerInterface layer, String key, String value); - - /** - * Insert a map key/value pairs into the layer environment - * - * @param layer - * @param env - */ - void insertLayerEnvironment(LayerInterface layer, Map env); - - /** - * Get the layer environment map - * - * @param layer - * @return - */ - Map getLayerEnvironment(LayerInterface layer); - - /** - * Updated the layers MaxRSS value. If force is true then the value is updated no matter what the - * current value is. If force is false, the value is only updated the val is greater than than the - * existing value. - * - * @param layer - * @param val - */ - void updateLayerMaxRSS(LayerInterface layer, long val, boolean force); - - /** - * Increases the value of the minimum memory when the supplied value is larger than the current - * value - * - * @param layer - * @param val - */ - void increaseLayerMinMemory(LayerInterface layer, long val); - - /** - * Increases the value of the minimum gpu when the supplied value is larger than the current value - * - * @param layer - * @param val - */ - void increaseLayerMinGpuMemory(LayerInterface layer, long val); - - /** - * Tries to find a max RSS value for layer in the specified job. The layer must have at least 25% - * of its pending frames completed for this to return a valid result. If the layer cannot be found - * then 0 is returned. - * - * @param job - * @param name - * @return - */ - long findPastMaxRSS(JobInterface job, String name); - - /** - * Returns a list of layers from the specified job. - * - * @param job - * @return - */ - public List getLayers(JobInterface job); - - /** - * Update all layers of the set type in specified job with the new tags. - * - * @param job - * @param tags - * @param type - */ - void updateTags(JobInterface job, String tags, LayerType type); - - /** - * Update all layers of the set type in the specified job with the new memory requirement. - * - * @param job - * @param mem - * @param type - */ - void updateMinMemory(JobInterface job, long mem, LayerType type); - - /** - * Update all layers of the set type in the specified job with the new gpu requirement. - * - * @param job - * @param mem - * @param type - */ - void updateMinGpuMemory(JobInterface job, long mem, LayerType type); - - /** - * Update all layers of the set type in the specified job with the new max cores requirement. - * - * @param job - * @param cores - * @param type - */ - void updateMaxCores(JobInterface job, int cores, LayerType type); - - /** - * Update all layers of the set type in the specified job with the new min cores requirement. - * - * @param job - * @param cores - * @param type - */ - void updateMinCores(JobInterface job, int cores, LayerType type); - - /** - * Update all layers of the set type in the specified job with the new min gpu requirement. - * - * @param job - * @param gpus - * @param type - */ - void updateMinGpus(JobInterface job, int gpus, LayerType type); - - /** - * Update a layer's max cores value, which limits how much threading can go on. - * - * @param layer - * @param threadable - */ - void updateThreadable(LayerInterface layer, boolean threadable); - - /** - * Update a layer's timeout value, which limits how much the frame can run on a host. - * - * @param layer - * @param timeout - */ - void updateTimeout(LayerInterface layer, int timeout); - - /** - * Update a layer's LLU timeout value, which limits how much the frame can run on a host without - * updates in the log file. - * - * @param layer - * @param timeout_llu - */ - void updateTimeoutLLU(LayerInterface layer, int timeout_llu); - - /** - * Lowers the minimum memory on a layer if the layer is using less memory and the currnet min - * memory is the dispatcher default. - * - * @param layer - * @param val - * @return - */ - boolean balanceLayerMinMemory(LayerInterface layer, long val); - - /** - * Appends a tag to the current set of tags. If the tag already exists then nothing happens. - * - * @param layer - * @param val - */ - void appendLayerTags(LayerInterface layer, String val); - - /** - * Returns true if the layer can be optimized to use util based on the specified criteria. - * - * @param l - * @param succeeded - * @param avg - * @return - */ - boolean isOptimizable(LayerInterface l, int succeeded, float avg); - - /** - * Update layer usage with processor time usage. This happens when the proc has completed or - * failed some work. - * - * @param layer - * @param newState - * @param exitStatus - */ - void updateUsage(LayerInterface layer, ResourceUsage usage, int exitStatus); - - /** - * Returns true of the layer is launching. - * - * @param l - * @return - */ - boolean isLaunching(LayerInterface l); - - /** - * Return true if the application running in the given layer is threadable. - * - * @param l - * @return - */ - boolean isThreadable(LayerInterface l); - - /** - * Enable/disable memory optimizer. - * - * @param layer - * @param state - */ - void enableMemoryOptimizer(LayerInterface layer, boolean state); - - /** - * Return a list of outputs mapped to the given layer. - * - * @param layer - * @return - */ - List getLayerOutputs(LayerInterface layer); - - /** - * Add a list of filespecs to the given layer's output table. - * - * @param layer - * @param specs - */ - void insertLayerOutput(LayerInterface layer, String spec); - - /** - * Return the thread stats for the given layer. - * - * @param layer - * @return - */ - List getThreadStats(LayerInterface layer); - - /** - * Set the layer's max cores value to the given int. The max cores value will not allow the - * dispatcher to book over the given number of cores. - * - * @param layer - * @param val - */ - void updateLayerMaxCores(LayerInterface layer, int val); - - /** - * Set the layer's max gpus value to the given int. The max gpu value will not allow the - * dispatcher to book over the given number of gpu. - * - * @param layer - * @param val - */ - void updateLayerMaxGpus(LayerInterface layer, int val); - - /** - * Add a limit to the given layer. - * - * @param layer - * @param limit_id - */ - void addLimit(LayerInterface layer, String limitId); - - /** - * Remove a limit to the given layer. - * - * @param layer - * @param limit_id - */ - void dropLimit(LayerInterface layer, String limitId); - - /** - * Return a list of limits on the layer. - * - * @param layer - */ - List getLimits(LayerInterface layer); - - /** - * Return a list of limit names on the layer. - * - * @param layer - */ - List getLimitNames(LayerInterface layer); + /** + * + * @param layer + * @return + */ + public ExecutionSummary getExecutionSummary(LayerInterface layer); + + /** + * return the frame state totals for the specified layer + * + * @param layer + * @return + */ + public FrameStateTotals getFrameStateTotals(LayerInterface layer); + + /** + * returns a list of layers by job + * + * @param job + * @return + */ + public List getLayerDetails(JobInterface job); + + /** + * Returns true if supplied layer is complete. + * + * @param layer + * @return boolean + */ + boolean isLayerComplete(LayerInterface layer); + + /** + * Returns true if supplied layer is dispatchable. + * + * @param l + * @return boolean + */ + boolean isLayerDispatchable(LayerInterface l); + + /** + * Inserts a LayerDetail + * + * @param l + */ + void insertLayerDetail(LayerDetail l); + + /** + * gets a layer detail from an object that implements layer + * + * @param layer + * @return LayerDetail + */ + LayerDetail getLayerDetail(LayerInterface layer); + + /** + * get layer detail from the the unique id + * + * @param id + * @return + */ + LayerDetail getLayerDetail(String id); + + /** + * get a layer detail from the job and layer name + * + * @param job + * @param name + * @return + */ + LayerDetail findLayerDetail(JobInterface job, String name); + + /** + * Get a minimal layer from the layer id + * + * @param id + * @return + */ + LayerInterface getLayer(String id); + + /** + * Find a minimal layer from the job and layer name + * + * @param job + * @param name + * @return + */ + LayerInterface findLayer(JobInterface job, String name); + + /** + * update the number of min cores the layer requires + * + * @param layer + * @param val + */ + void updateLayerMinCores(LayerInterface layer, int val); + + /** + * update the number of gpus the layer requires + * + * @param layer + * @param val + */ + void updateLayerMinGpus(LayerInterface layer, int val); + + /** + * update the amount of memory required by all subsequent running frames in the specified layer. + * + * @param layer + * @param val + */ + void updateLayerMinMemory(LayerInterface layer, long kb); + + /** + * update the amount of gpu memory in kb required by all subsequent running frames in the + * specified layer. + * + * @param layer + * @param val + */ + void updateLayerMinGpuMemory(LayerInterface layer, long val); + + /** + * Update a layer with new host tags. + * + * @param layer + * @param val + */ + void updateLayerTags(LayerInterface layer, Set tags); + + /** + * Insert a key/value pair into the layer environment + * + * @param layer + * @param key + * @param value + */ + void insertLayerEnvironment(LayerInterface layer, String key, String value); + + /** + * Insert a map key/value pairs into the layer environment + * + * @param layer + * @param env + */ + void insertLayerEnvironment(LayerInterface layer, Map env); + + /** + * Get the layer environment map + * + * @param layer + * @return + */ + Map getLayerEnvironment(LayerInterface layer); + + /** + * Updated the layers MaxRSS value. If force is true then the value is updated no matter what + * the current value is. If force is false, the value is only updated the val is greater than + * than the existing value. + * + * @param layer + * @param val + */ + void updateLayerMaxRSS(LayerInterface layer, long val, boolean force); + + /** + * Increases the value of the minimum memory when the supplied value is larger than the current + * value + * + * @param layer + * @param val + */ + void increaseLayerMinMemory(LayerInterface layer, long val); + + /** + * Increases the value of the minimum gpu when the supplied value is larger than the current + * value + * + * @param layer + * @param val + */ + void increaseLayerMinGpuMemory(LayerInterface layer, long val); + + /** + * Tries to find a max RSS value for layer in the specified job. The layer must have at least + * 25% of its pending frames completed for this to return a valid result. If the layer cannot be + * found then 0 is returned. + * + * @param job + * @param name + * @return + */ + long findPastMaxRSS(JobInterface job, String name); + + /** + * Returns a list of layers from the specified job. + * + * @param job + * @return + */ + public List getLayers(JobInterface job); + + /** + * Update all layers of the set type in specified job with the new tags. + * + * @param job + * @param tags + * @param type + */ + void updateTags(JobInterface job, String tags, LayerType type); + + /** + * Update all layers of the set type in the specified job with the new memory requirement. + * + * @param job + * @param mem + * @param type + */ + void updateMinMemory(JobInterface job, long mem, LayerType type); + + /** + * Update all layers of the set type in the specified job with the new gpu requirement. + * + * @param job + * @param mem + * @param type + */ + void updateMinGpuMemory(JobInterface job, long mem, LayerType type); + + /** + * Update all layers of the set type in the specified job with the new max cores requirement. + * + * @param job + * @param cores + * @param type + */ + void updateMaxCores(JobInterface job, int cores, LayerType type); + + /** + * Update all layers of the set type in the specified job with the new min cores requirement. + * + * @param job + * @param cores + * @param type + */ + void updateMinCores(JobInterface job, int cores, LayerType type); + + /** + * Update all layers of the set type in the specified job with the new min gpu requirement. + * + * @param job + * @param gpus + * @param type + */ + void updateMinGpus(JobInterface job, int gpus, LayerType type); + + /** + * Update a layer's max cores value, which limits how much threading can go on. + * + * @param layer + * @param threadable + */ + void updateThreadable(LayerInterface layer, boolean threadable); + + /** + * Update a layer's timeout value, which limits how much the frame can run on a host. + * + * @param layer + * @param timeout + */ + void updateTimeout(LayerInterface layer, int timeout); + + /** + * Update a layer's LLU timeout value, which limits how much the frame can run on a host without + * updates in the log file. + * + * @param layer + * @param timeout_llu + */ + void updateTimeoutLLU(LayerInterface layer, int timeout_llu); + + /** + * Lowers the minimum memory on a layer if the layer is using less memory and the currnet min + * memory is the dispatcher default. + * + * @param layer + * @param val + * @return + */ + boolean balanceLayerMinMemory(LayerInterface layer, long val); + + /** + * Appends a tag to the current set of tags. If the tag already exists then nothing happens. + * + * @param layer + * @param val + */ + void appendLayerTags(LayerInterface layer, String val); + + /** + * Returns true if the layer can be optimized to use util based on the specified criteria. + * + * @param l + * @param succeeded + * @param avg + * @return + */ + boolean isOptimizable(LayerInterface l, int succeeded, float avg); + + /** + * Update layer usage with processor time usage. This happens when the proc has completed or + * failed some work. + * + * @param layer + * @param newState + * @param exitStatus + */ + void updateUsage(LayerInterface layer, ResourceUsage usage, int exitStatus); + + /** + * Returns true of the layer is launching. + * + * @param l + * @return + */ + boolean isLaunching(LayerInterface l); + + /** + * Return true if the application running in the given layer is threadable. + * + * @param l + * @return + */ + boolean isThreadable(LayerInterface l); + + /** + * Enable/disable memory optimizer. + * + * @param layer + * @param state + */ + void enableMemoryOptimizer(LayerInterface layer, boolean state); + + /** + * Return a list of outputs mapped to the given layer. + * + * @param layer + * @return + */ + List getLayerOutputs(LayerInterface layer); + + /** + * Add a list of filespecs to the given layer's output table. + * + * @param layer + * @param specs + */ + void insertLayerOutput(LayerInterface layer, String spec); + + /** + * Return the thread stats for the given layer. + * + * @param layer + * @return + */ + List getThreadStats(LayerInterface layer); + + /** + * Set the layer's max cores value to the given int. The max cores value will not allow the + * dispatcher to book over the given number of cores. + * + * @param layer + * @param val + */ + void updateLayerMaxCores(LayerInterface layer, int val); + + /** + * Set the layer's max gpus value to the given int. The max gpu value will not allow the + * dispatcher to book over the given number of gpu. + * + * @param layer + * @param val + */ + void updateLayerMaxGpus(LayerInterface layer, int val); + + /** + * Add a limit to the given layer. + * + * @param layer + * @param limit_id + */ + void addLimit(LayerInterface layer, String limitId); + + /** + * Remove a limit to the given layer. + * + * @param layer + * @param limit_id + */ + void dropLimit(LayerInterface layer, String limitId); + + /** + * Return a list of limits on the layer. + * + * @param layer + */ + List getLimits(LayerInterface layer); + + /** + * Return a list of limit names on the layer. + * + * @param layer + */ + List getLimitNames(LayerInterface layer); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/LimitDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/LimitDao.java index 686c35831..14e5054da 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/LimitDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/LimitDao.java @@ -20,53 +20,53 @@ public interface LimitDao { - /** - * Insert and return a facility. - * - * @param limit - * @return - */ - public String createLimit(String name, int maxValue); + /** + * Insert and return a facility. + * + * @param limit + * @return + */ + public String createLimit(String name, int maxValue); - /** - * Deletes a limit record, if possible. - * - * @param limit - * @return - */ - public void deleteLimit(LimitInterface limit); + /** + * Deletes a limit record, if possible. + * + * @param limit + * @return + */ + public void deleteLimit(LimitInterface limit); - /** - * Find a limit by it's name - * - * @param name - * @return LimitEntity - */ - public LimitEntity findLimit(String name); + /** + * Find a limit by it's name + * + * @param name + * @return LimitEntity + */ + public LimitEntity findLimit(String name); - /** - * Gets a limit by Id - * - * @param id - * @return LimitEntity - */ - public LimitEntity getLimit(String id); + /** + * Gets a limit by Id + * + * @param id + * @return LimitEntity + */ + public LimitEntity getLimit(String id); - /** - * Set the specified limit's name. - * - * @param limit - * @param name - * @return - */ - public void setLimitName(LimitInterface limit, String name); + /** + * Set the specified limit's name. + * + * @param limit + * @param name + * @return + */ + public void setLimitName(LimitInterface limit, String name); - /** - * Set the specified limit's max value. - * - * @param limit - * @param value - * @return - */ - public void setMaxValue(LimitInterface limit, int value); + /** + * Set the specified limit's max value. + * + * @param limit + * @param value + * @return + */ + public void setMaxValue(LimitInterface limit, int value); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/MaintenanceDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/MaintenanceDao.java index 6359ea936..3ae7c71b2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/MaintenanceDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/MaintenanceDao.java @@ -24,37 +24,37 @@ */ public interface MaintenanceDao { - /** - * Set hosts to the down state that have not pinged in within 5 minutes and return the number - * hosts that failed the check. - * - * @return int - */ - int setUpHostsToDown(); - - /** - * Lock specified task - * - * @param task - * @return - */ - boolean lockTask(MaintenanceTask task); - - /** - * Locks a test for the specified number of minutes. No other thread will execute this task, even - * if the task is unlocked for N amount of time. - * - * @param task - * @param minutes - * @return - */ - public boolean lockTask(MaintenanceTask task, int minutes); - - /** - * Unlock specified task - * - * @param task - */ - void unlockTask(MaintenanceTask task); + /** + * Set hosts to the down state that have not pinged in within 5 minutes and return the number + * hosts that failed the check. + * + * @return int + */ + int setUpHostsToDown(); + + /** + * Lock specified task + * + * @param task + * @return + */ + boolean lockTask(MaintenanceTask task); + + /** + * Locks a test for the specified number of minutes. No other thread will execute this task, + * even if the task is unlocked for N amount of time. + * + * @param task + * @param minutes + * @return + */ + public boolean lockTask(MaintenanceTask task, int minutes); + + /** + * Unlock specified task + * + * @param task + */ + void unlockTask(MaintenanceTask task); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/MatcherDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/MatcherDao.java index 0f5dd5928..1f447a87d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/MatcherDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/MatcherDao.java @@ -23,16 +23,16 @@ public interface MatcherDao { - void insertMatcher(MatcherEntity matcher); + void insertMatcher(MatcherEntity matcher); - void deleteMatcher(MatcherInterface matcher); + void deleteMatcher(MatcherInterface matcher); - void updateMatcher(MatcherEntity matcher); + void updateMatcher(MatcherEntity matcher); - MatcherEntity getMatcher(String id); + MatcherEntity getMatcher(String id); - MatcherEntity getMatcher(MatcherInterface matcher); + MatcherEntity getMatcher(MatcherInterface matcher); - List getMatchers(FilterInterface filter); + List getMatchers(FilterInterface filter); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/NestedWhiteboardDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/NestedWhiteboardDao.java index 7f137d670..e39d2e48e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/NestedWhiteboardDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/NestedWhiteboardDao.java @@ -26,19 +26,19 @@ */ public interface NestedWhiteboardDao { - /** - * returns a grouped whiteboard for specified show. - * - * @param show - * @return - */ - NestedGroup getJobWhiteboard(ShowInterface show); + /** + * returns a grouped whiteboard for specified show. + * + * @param show + * @return + */ + NestedGroup getJobWhiteboard(ShowInterface show); - /** - * get a list of hosts - * - * @return List - */ - NestedHostSeq getHostWhiteboard(); + /** + * get a list of hosts + * + * @return List + */ + NestedHostSeq getHostWhiteboard(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/OwnerDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/OwnerDao.java index f12c1820a..8d8e81608 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/OwnerDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/OwnerDao.java @@ -22,55 +22,55 @@ public interface OwnerDao { - /** - * Return true if the given owner owns the particualar host. - * - * @param owner - * @param host - * @return - */ - boolean isOwner(OwnerEntity owner, HostInterface host); + /** + * Return true if the given owner owns the particualar host. + * + * @param owner + * @param host + * @return + */ + boolean isOwner(OwnerEntity owner, HostInterface host); - /** - * Get an owner record by ID. - * - * @param id - */ - OwnerEntity getOwner(String id); + /** + * Get an owner record by ID. + * + * @param id + */ + OwnerEntity getOwner(String id); - /** - * Return the owner of the given host. - * - * @param host - * @return - */ - OwnerEntity getOwner(HostInterface host); + /** + * Return the owner of the given host. + * + * @param host + * @return + */ + OwnerEntity getOwner(HostInterface host); - /** - * Return an owner record by name. - * - * @param name - */ - OwnerEntity findOwner(String name); + /** + * Return an owner record by name. + * + * @param name + */ + OwnerEntity findOwner(String name); - /** - * Delete the specified owner and all his/her deeds. Return true if the owner was actually - * deleted. False if not. - */ - boolean deleteOwner(Entity owner); + /** + * Delete the specified owner and all his/her deeds. Return true if the owner was actually + * deleted. False if not. + */ + boolean deleteOwner(Entity owner); - /** - * Insert a new owner record. - * - * @param owner - */ - void insertOwner(OwnerEntity owner, ShowInterface show); + /** + * Insert a new owner record. + * + * @param owner + */ + void insertOwner(OwnerEntity owner, ShowInterface show); - /** - * Set the owner's show. This can be null. - * - * @param owner - * @param show - */ - void updateShow(Entity owner, ShowInterface show); + /** + * Set the owner's show. This can be null. + * + * @param owner + * @param show + */ + void updateShow(Entity owner, ShowInterface show); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/PointDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/PointDao.java index a1afdcf34..96a211231 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/PointDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/PointDao.java @@ -25,102 +25,102 @@ public interface PointDao { - /** - * Inserts a render into the point table - * - * @param t - * @return - */ - void insertPointConf(PointDetail t); - - /** - * Inserts and returns an empty render point detail - * - * @param show - * @param dept - * @return - */ - PointDetail insertPointConf(ShowInterface show, DepartmentInterface dept); - - /** - * Returns true if the department is being managed by track-it. - * - * @param show - * @param dept - * @return - */ - boolean isManaged(ShowInterface show, DepartmentInterface dept); - - /** - * Returns true if a render point config already exists for the specified show and department - * - * @param show - * @param dept - * @return - */ - boolean pointConfExists(ShowInterface show, DepartmentInterface dept); - - /** - * Updates the number of cores managed by this department - * - * @param cdept - * @param cores - */ - void updateManagedCores(PointInterface cdept, int cores); - - /** - * Enables TI managed. - * - * @param p - * @param task - * @param cores - */ - void updateEnableManaged(PointInterface cdept, String task, int cores); - - /** - * Disables TI mananaged. - * - * @param p - */ - void updateDisableManaged(PointInterface cdept); - - /** - * Returns a list of all managed point configs. - * - * @return - */ - List getManagedPointConfs(); - - /** - * Returns a DepartmentConfigDetail by unique ID - * - * @param id - * @return - */ - PointDetail getPointConfDetail(String id); - - /** - * Returns a DepartmentConfigDetail using the specified show and department - * - * - * @param show - * @param dept - * @return - */ - PointDetail getPointConfigDetail(ShowInterface show, DepartmentInterface dept); - - /** - * Updates the time at which the point config was last updated. - * - * @param t - */ - void updatePointConfUpdateTime(PointInterface t); - - /** - * - * @param job - * @return - */ - boolean isOverMinCores(JobInterface job); + /** + * Inserts a render into the point table + * + * @param t + * @return + */ + void insertPointConf(PointDetail t); + + /** + * Inserts and returns an empty render point detail + * + * @param show + * @param dept + * @return + */ + PointDetail insertPointConf(ShowInterface show, DepartmentInterface dept); + + /** + * Returns true if the department is being managed by track-it. + * + * @param show + * @param dept + * @return + */ + boolean isManaged(ShowInterface show, DepartmentInterface dept); + + /** + * Returns true if a render point config already exists for the specified show and department + * + * @param show + * @param dept + * @return + */ + boolean pointConfExists(ShowInterface show, DepartmentInterface dept); + + /** + * Updates the number of cores managed by this department + * + * @param cdept + * @param cores + */ + void updateManagedCores(PointInterface cdept, int cores); + + /** + * Enables TI managed. + * + * @param p + * @param task + * @param cores + */ + void updateEnableManaged(PointInterface cdept, String task, int cores); + + /** + * Disables TI mananaged. + * + * @param p + */ + void updateDisableManaged(PointInterface cdept); + + /** + * Returns a list of all managed point configs. + * + * @return + */ + List getManagedPointConfs(); + + /** + * Returns a DepartmentConfigDetail by unique ID + * + * @param id + * @return + */ + PointDetail getPointConfDetail(String id); + + /** + * Returns a DepartmentConfigDetail using the specified show and department + * + * + * @param show + * @param dept + * @return + */ + PointDetail getPointConfigDetail(ShowInterface show, DepartmentInterface dept); + + /** + * Updates the time at which the point config was last updated. + * + * @param t + */ + void updatePointConfUpdateTime(PointInterface t); + + /** + * + * @param job + * @return + */ + boolean isOverMinCores(JobInterface job); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/ProcDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/ProcDao.java index 221b4f99b..be1fa3bb6 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/ProcDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/ProcDao.java @@ -34,270 +34,270 @@ */ public interface ProcDao { - /** - * Returns the amount of reserved memory a proc has - * - * @param proc - * @return - */ - - long getReservedMemory(ProcInterface proc); - - /** - * Returns the amount of reserved gpu memory a proc has - * - * @param proc - * @return - */ - - long getReservedGpuMemory(ProcInterface proc); - - /** - * Removes a little bit of reserved memory from every other running frame in order to give some to - * the target proc. - * - * @param targetProc - * @param targetMem - * @return - */ - boolean balanceUnderUtilizedProcs(ProcInterface targetProc, long targetMem); - - /** - * Increase a proc's reserved memory. - * - * @param id - * @param value - * @return - */ - boolean increaseReservedMemory(ProcInterface p, long value); - - /** - * Set a proc's reserved memory. - * - * @param id - * @param value - * @return - */ - void updateReservedMemory(ProcInterface p, long value); - - /** - * verifies the mapping bewtween a proc id and a frame id - * - * @param procid - * @param frameid - * @return - */ - boolean verifyRunningProc(String procid, String frameid); - - /** - * Creates a new virtual proc - * - * @param proc - */ - void insertVirtualProc(VirtualProc proc); - - /** - * Deletes an existing virtual proc - * - * @param proc - */ - boolean deleteVirtualProc(VirtualProc proc); - - /** - * Clears a virtual proc assignement. This keeps the proc around but sets pk_frame to null. This - * would normally happen after a frame completes and before the proc is dispatched again. - * - * @param proc - */ - boolean clearVirtualProcAssignment(ProcInterface proc); - - /** - * Clear a proc assignment by frame id. Return true if an assignment was cleared. - * - * @param frame - * @return - */ - boolean clearVirtualProcAssignment(FrameInterface frame); - - /** - * Updates an existing proc's assignment - * - * @param proc - */ - void updateVirtualProcAssignment(VirtualProc proc); - - /** - * Update a procs memory usage based on the given frame it should be running. - * - * @param proc - * @param usedKb - * @param maxKb - */ - void updateProcMemoryUsage(FrameInterface f, long rss, long maxRss, long vsize, long maxVsize, - long usedGpuMemory, long maxUsedGpuMemory, long usedSwapMemory, byte[] children); - - /** - * get aq virual proc from its unique id - * - * @param id - * @return - */ - VirtualProc getVirtualProc(String id); - - /** - * get a virtual proc from the frame its assigned to - * - * @param frame - * @return - */ - VirtualProc findVirtualProc(FrameInterface frame); - - /** - * gets a list of virtual procs from a FrameLookupRequest - * - * @param job - * @param req - * @return - */ - List findVirtualProcs(FrameSearchInterface s); - - /** - * get the list of procs from the host. - * - * @param host - * @return - */ - List findVirtualProcs(HostInterface host); - - /** - * find all procs booked on a specified layer - * - * @param layer - * @return - */ - List findVirtualProcs(LayerInterface layer); - - /** - * find all procs booked on specified job - * - * @param job - * @return - */ - List findVirtualProcs(JobInterface job); - - /** - * - * @return - */ - List findOrphanedVirtualProcs(); - - /** - * - * @return - */ - List findOrphanedVirtualProcs(int limit); - - /** - * Returns procs with a host in a particular hardware state. - * - * @param state - * @return - */ - public List findVirtualProcs(HardwareState state); - - /** - * Returns a list if procs using a ProcSearchInterface object. - * - * @param r - A ProcSearchInterface object - * @return a list of virtual procs - */ - List findVirtualProcs(ProcSearchInterface r); - - /** - * Unbooks a list of virtual procs using a batch query - * - * @param procs - * @return - */ - void unbookVirtualProcs(List procs); - - /** - * Unbooks a single virtual proc - * - * @param procs - * @return - */ - void unbookProc(ProcInterface proc); - - /** - * Used to set the unbook flag on a proc to true or false. - * - * @param proc - * @param unbooked - */ - public boolean setUnbookState(ProcInterface proc, boolean unbooked); - - /** - * Updates the proc record with the name of its redirect target. - * - * @param p - * @param r - */ - public boolean setRedirectTarget(ProcInterface p, Redirect r); - - /** - * Returns the unique id of the proc's current show - * - * @param p - * @return - */ - public String getCurrentShowId(ProcInterface p); - - /** - * Returns the unique id of the procs current job - * - * @param p - * @return - */ - public String getCurrentJobId(ProcInterface p); - - /** - * Returns the unique id of the procs current layer - * - * @param p - * @return - */ - public String getCurrentLayerId(ProcInterface p); - - /** - * Returns the unique id of the procs current frame - * - * @param p - * @return - */ - public String getCurrentFrameId(ProcInterface p); - - /** - * Returns an array of booked virutal procs. - * - * @param r - * @return - */ - List findBookedVirtualProcs(ProcSearchInterface r); - - /** - * Return true if the proc is an orphan. - * - * @param proc - * @return - */ - boolean isOrphan(ProcInterface proc); - - /** - * Return a list of all procs that are booked as part of the given local job assignment. - * - * @param l - * @return - */ - List findVirtualProcs(LocalHostAssignment l); + /** + * Returns the amount of reserved memory a proc has + * + * @param proc + * @return + */ + + long getReservedMemory(ProcInterface proc); + + /** + * Returns the amount of reserved gpu memory a proc has + * + * @param proc + * @return + */ + + long getReservedGpuMemory(ProcInterface proc); + + /** + * Removes a little bit of reserved memory from every other running frame in order to give some + * to the target proc. + * + * @param targetProc + * @param targetMem + * @return + */ + boolean balanceUnderUtilizedProcs(ProcInterface targetProc, long targetMem); + + /** + * Increase a proc's reserved memory. + * + * @param id + * @param value + * @return + */ + boolean increaseReservedMemory(ProcInterface p, long value); + + /** + * Set a proc's reserved memory. + * + * @param id + * @param value + * @return + */ + void updateReservedMemory(ProcInterface p, long value); + + /** + * verifies the mapping bewtween a proc id and a frame id + * + * @param procid + * @param frameid + * @return + */ + boolean verifyRunningProc(String procid, String frameid); + + /** + * Creates a new virtual proc + * + * @param proc + */ + void insertVirtualProc(VirtualProc proc); + + /** + * Deletes an existing virtual proc + * + * @param proc + */ + boolean deleteVirtualProc(VirtualProc proc); + + /** + * Clears a virtual proc assignement. This keeps the proc around but sets pk_frame to null. This + * would normally happen after a frame completes and before the proc is dispatched again. + * + * @param proc + */ + boolean clearVirtualProcAssignment(ProcInterface proc); + + /** + * Clear a proc assignment by frame id. Return true if an assignment was cleared. + * + * @param frame + * @return + */ + boolean clearVirtualProcAssignment(FrameInterface frame); + + /** + * Updates an existing proc's assignment + * + * @param proc + */ + void updateVirtualProcAssignment(VirtualProc proc); + + /** + * Update a procs memory usage based on the given frame it should be running. + * + * @param proc + * @param usedKb + * @param maxKb + */ + void updateProcMemoryUsage(FrameInterface f, long rss, long maxRss, long vsize, long maxVsize, + long usedGpuMemory, long maxUsedGpuMemory, long usedSwapMemory, byte[] children); + + /** + * get aq virual proc from its unique id + * + * @param id + * @return + */ + VirtualProc getVirtualProc(String id); + + /** + * get a virtual proc from the frame its assigned to + * + * @param frame + * @return + */ + VirtualProc findVirtualProc(FrameInterface frame); + + /** + * gets a list of virtual procs from a FrameLookupRequest + * + * @param job + * @param req + * @return + */ + List findVirtualProcs(FrameSearchInterface s); + + /** + * get the list of procs from the host. + * + * @param host + * @return + */ + List findVirtualProcs(HostInterface host); + + /** + * find all procs booked on a specified layer + * + * @param layer + * @return + */ + List findVirtualProcs(LayerInterface layer); + + /** + * find all procs booked on specified job + * + * @param job + * @return + */ + List findVirtualProcs(JobInterface job); + + /** + * + * @return + */ + List findOrphanedVirtualProcs(); + + /** + * + * @return + */ + List findOrphanedVirtualProcs(int limit); + + /** + * Returns procs with a host in a particular hardware state. + * + * @param state + * @return + */ + public List findVirtualProcs(HardwareState state); + + /** + * Returns a list if procs using a ProcSearchInterface object. + * + * @param r - A ProcSearchInterface object + * @return a list of virtual procs + */ + List findVirtualProcs(ProcSearchInterface r); + + /** + * Unbooks a list of virtual procs using a batch query + * + * @param procs + * @return + */ + void unbookVirtualProcs(List procs); + + /** + * Unbooks a single virtual proc + * + * @param procs + * @return + */ + void unbookProc(ProcInterface proc); + + /** + * Used to set the unbook flag on a proc to true or false. + * + * @param proc + * @param unbooked + */ + public boolean setUnbookState(ProcInterface proc, boolean unbooked); + + /** + * Updates the proc record with the name of its redirect target. + * + * @param p + * @param r + */ + public boolean setRedirectTarget(ProcInterface p, Redirect r); + + /** + * Returns the unique id of the proc's current show + * + * @param p + * @return + */ + public String getCurrentShowId(ProcInterface p); + + /** + * Returns the unique id of the procs current job + * + * @param p + * @return + */ + public String getCurrentJobId(ProcInterface p); + + /** + * Returns the unique id of the procs current layer + * + * @param p + * @return + */ + public String getCurrentLayerId(ProcInterface p); + + /** + * Returns the unique id of the procs current frame + * + * @param p + * @return + */ + public String getCurrentFrameId(ProcInterface p); + + /** + * Returns an array of booked virutal procs. + * + * @param r + * @return + */ + List findBookedVirtualProcs(ProcSearchInterface r); + + /** + * Return true if the proc is an orphan. + * + * @param proc + * @return + */ + boolean isOrphan(ProcInterface proc); + + /** + * Return a list of all procs that are booked as part of the given local job assignment. + * + * @param l + * @return + */ + List findVirtualProcs(LocalHostAssignment l); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/RedirectDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/RedirectDao.java index 9ab9dee31..3285801cc 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/RedirectDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/RedirectDao.java @@ -22,46 +22,46 @@ @Transactional(propagation = Propagation.MANDATORY) public interface RedirectDao { - /** - * Check for redirect existence. - * - * @param key Redirect key - * - * @return True if redirect exists - */ - boolean containsKey(String key); + /** + * Check for redirect existence. + * + * @param key Redirect key + * + * @return True if redirect exists + */ + boolean containsKey(String key); - /** - * Count redirects in a group. - * - * @param groupId the group to query - * - * @return count of redirects in group - */ - int countRedirectsWithGroup(String groupId); + /** + * Count redirects in a group. + * + * @param groupId the group to query + * + * @return count of redirects in group + */ + int countRedirectsWithGroup(String groupId); - /** - * Delete all expired redirects. - * - * @return number of redirects deleted - */ - int deleteExpired(); + /** + * Delete all expired redirects. + * + * @return number of redirects deleted + */ + int deleteExpired(); - /** - * Add redirect. - * - * @param key Redirect key - * - * @param r Redirect to add - */ - void put(String key, Redirect r); + /** + * Add redirect. + * + * @param key Redirect key + * + * @param r Redirect to add + */ + void put(String key, Redirect r); - /** - * Delete and return specified redirect. - * - * @param key Redirect key - * - * @return the redirect that was deleted or null - */ - Redirect remove(String key); + /** + * Delete and return specified redirect. + * + * @param key Redirect key + * + * @return the redirect that was deleted or null + */ + Redirect remove(String key); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/ServiceDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/ServiceDao.java index 77ba25f21..d7ef8e05c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/ServiceDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/ServiceDao.java @@ -20,23 +20,23 @@ public interface ServiceDao { - void insert(ServiceEntity service); + void insert(ServiceEntity service); - void insert(ServiceOverrideEntity service); + void insert(ServiceOverrideEntity service); - ServiceEntity get(String identifier); + ServiceEntity get(String identifier); - void update(ServiceEntity service); + void update(ServiceEntity service); - void update(ServiceOverrideEntity service); + void update(ServiceOverrideEntity service); - void delete(ServiceOverrideEntity service); + void delete(ServiceOverrideEntity service); - void delete(ServiceEntity service); + void delete(ServiceEntity service); - ServiceOverrideEntity getOverride(String id); + ServiceOverrideEntity getOverride(String id); - ServiceOverrideEntity getOverride(String id, String show); + ServiceOverrideEntity getOverride(String id, String show); - boolean isOverridden(String service, String show); + boolean isOverridden(String service, String show); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/ShowDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/ShowDao.java index 709bb5582..b33b893ca 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/ShowDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/ShowDao.java @@ -24,124 +24,124 @@ */ public interface ShowDao { - /** - * find show detail by name - * - * @param name - * @return ShowDetail - */ - ShowEntity findShowDetail(String name); - - /** - * get show detail from its unique id - * - * @param id - * @return ShowDetail - */ - ShowEntity getShowDetail(String id); - - /** - * Get show detail from its preferred show. - * - * @param id - * @return ShowDetail - */ - ShowEntity getShowDetail(HostInterface host); - - /** - * create a show from ShowDetail - * - * @param show - */ - void insertShow(ShowEntity show); - - /** - * return true if show exists, false if not - * - * @param name - * @return boolean - */ - boolean showExists(String name); - - /** - * - * @param s - * @param val - */ - void updateShowDefaultMinCores(ShowInterface s, int val); - - /** - * - * @param s - * @param val - */ - void updateShowDefaultMaxCores(ShowInterface s, int val); - - /** - * - * @param s - * @param val - */ - void updateShowDefaultMinGpus(ShowInterface s, int val); - - /** - * - * @param s - * @param val - */ - void updateShowDefaultMaxGpus(ShowInterface s, int val); - - /** - * Disabling this would stop new proc assignement. The show would get no new procs, but any procs - * already assigned to a job would continue to dispatch. - * - * @param s - * @param enabled - */ - void updateBookingEnabled(ShowInterface s, boolean enabled); - - /** - * Disabling dispatching would unbook each proc after it had completed a frame. - * - * @param s - * @param enabled - */ - void updateDispatchingEnabled(ShowInterface s, boolean enabled); - - /** - * Deletes a show if no data has been added to it. - * - * @param s - */ - void delete(ShowInterface s); - - /** - * Updates the show frame counter. This counts all failed succceeded frames, forver. - * - * @param s - * @param exitStatus - */ - void updateFrameCounters(ShowInterface s, int exitStatus); - - /** - * Set the enabled status of a show to true/false. - * - * @param s - * @param enabled - */ - void updateActive(ShowInterface s, boolean enabled); - - /** - * An array of email addresses for which all job comments are echoed to. - * - * @param s - * @param emails - */ - void updateShowCommentEmail(ShowInterface s, String[] emails); - - /** - * Scheduled task to update shows. Set show as inactive if it has at least 1 job in job_history - * service th - */ - void updateShowsStatus(); + /** + * find show detail by name + * + * @param name + * @return ShowDetail + */ + ShowEntity findShowDetail(String name); + + /** + * get show detail from its unique id + * + * @param id + * @return ShowDetail + */ + ShowEntity getShowDetail(String id); + + /** + * Get show detail from its preferred show. + * + * @param id + * @return ShowDetail + */ + ShowEntity getShowDetail(HostInterface host); + + /** + * create a show from ShowDetail + * + * @param show + */ + void insertShow(ShowEntity show); + + /** + * return true if show exists, false if not + * + * @param name + * @return boolean + */ + boolean showExists(String name); + + /** + * + * @param s + * @param val + */ + void updateShowDefaultMinCores(ShowInterface s, int val); + + /** + * + * @param s + * @param val + */ + void updateShowDefaultMaxCores(ShowInterface s, int val); + + /** + * + * @param s + * @param val + */ + void updateShowDefaultMinGpus(ShowInterface s, int val); + + /** + * + * @param s + * @param val + */ + void updateShowDefaultMaxGpus(ShowInterface s, int val); + + /** + * Disabling this would stop new proc assignement. The show would get no new procs, but any + * procs already assigned to a job would continue to dispatch. + * + * @param s + * @param enabled + */ + void updateBookingEnabled(ShowInterface s, boolean enabled); + + /** + * Disabling dispatching would unbook each proc after it had completed a frame. + * + * @param s + * @param enabled + */ + void updateDispatchingEnabled(ShowInterface s, boolean enabled); + + /** + * Deletes a show if no data has been added to it. + * + * @param s + */ + void delete(ShowInterface s); + + /** + * Updates the show frame counter. This counts all failed succceeded frames, forver. + * + * @param s + * @param exitStatus + */ + void updateFrameCounters(ShowInterface s, int exitStatus); + + /** + * Set the enabled status of a show to true/false. + * + * @param s + * @param enabled + */ + void updateActive(ShowInterface s, boolean enabled); + + /** + * An array of email addresses for which all job comments are echoed to. + * + * @param s + * @param emails + */ + void updateShowCommentEmail(ShowInterface s, String[] emails); + + /** + * Scheduled task to update shows. Set show as inactive if it has at least 1 job in job_history + * service th + */ + void updateShowsStatus(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/SubscriptionDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/SubscriptionDao.java index ffc083d7d..70aa8c3ce 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/SubscriptionDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/SubscriptionDao.java @@ -23,95 +23,95 @@ public interface SubscriptionDao { - /** - * returns true if the subscription has running procs - * - * @param sub SubscriptionInterface - * @return boolean - */ - boolean hasRunningProcs(SubscriptionInterface sub); + /** + * returns true if the subscription has running procs + * + * @param sub SubscriptionInterface + * @return boolean + */ + boolean hasRunningProcs(SubscriptionInterface sub); - /** - * Return true if the given show is at or over its size value for the given allocation. - * - * @param show ShowInterface - * @param alloc AllocationInterface - * @return boolean - */ - boolean isShowAtOrOverSize(ShowInterface show, AllocationInterface alloc); + /** + * Return true if the given show is at or over its size value for the given allocation. + * + * @param show ShowInterface + * @param alloc AllocationInterface + * @return boolean + */ + boolean isShowAtOrOverSize(ShowInterface show, AllocationInterface alloc); - /** - * Return true if the given show is over its size value for the given allocation. - * - * @param show ShowInterface - * @param alloc AllocationInterface - * @return boolean - */ - boolean isShowOverSize(ShowInterface show, AllocationInterface alloc); + /** + * Return true if the given show is over its size value for the given allocation. + * + * @param show ShowInterface + * @param alloc AllocationInterface + * @return boolean + */ + boolean isShowOverSize(ShowInterface show, AllocationInterface alloc); - /** - * Return true if adding the given coreUnits would put the show over its burst value for the given - * allocation. - * - * @param show ShowInterface - * @param alloc AllocationInterface - * @param coreUnits int - * @return boolean - */ - boolean isShowOverBurst(ShowInterface show, AllocationInterface alloc, int coreUnits); + /** + * Return true if adding the given coreUnits would put the show over its burst value for the + * given allocation. + * + * @param show ShowInterface + * @param alloc AllocationInterface + * @param coreUnits int + * @return boolean + */ + boolean isShowOverBurst(ShowInterface show, AllocationInterface alloc, int coreUnits); - /** - * Return true if the given show is at or over its burst value for the given allocation. - * - * @param show ShowInterface - * @param alloc AllocationInterface - * @return boolean - */ - boolean isShowAtOrOverBurst(ShowInterface show, AllocationInterface alloc); + /** + * Return true if the given show is at or over its burst value for the given allocation. + * + * @param show ShowInterface + * @param alloc AllocationInterface + * @return boolean + */ + boolean isShowAtOrOverBurst(ShowInterface show, AllocationInterface alloc); - /** - * Return true if the show that is utilizing the given proc has exceeded its burst. - * - * @param proc VirtualProc - * @return boolean - */ - boolean isShowOverSize(VirtualProc proc); + /** + * Return true if the show that is utilizing the given proc has exceeded its burst. + * + * @param proc VirtualProc + * @return boolean + */ + boolean isShowOverSize(VirtualProc proc); - /** - * Return a SubscriptionDetail from its unique id - * - * @param id String - * @return SubscriptionEntity - */ - SubscriptionEntity getSubscriptionDetail(String id); + /** + * Return a SubscriptionDetail from its unique id + * + * @param id String + * @return SubscriptionEntity + */ + SubscriptionEntity getSubscriptionDetail(String id); - /** - * Insert a new subscription - * - * @param detail SubscriptionEntity - */ - void insertSubscription(SubscriptionEntity detail); + /** + * Insert a new subscription + * + * @param detail SubscriptionEntity + */ + void insertSubscription(SubscriptionEntity detail); - /** - * Delete specified subscription - * - * @param sub SubscriptionInterface - */ - void deleteSubscription(SubscriptionInterface sub); + /** + * Delete specified subscription + * + * @param sub SubscriptionInterface + */ + void deleteSubscription(SubscriptionInterface sub); - /** - * update the size of a subscription - * - * @param sub SubscriptionInterface - * @param size int - */ - void updateSubscriptionSize(SubscriptionInterface sub, int size); + /** + * update the size of a subscription + * + * @param sub SubscriptionInterface + * @param size int + */ + void updateSubscriptionSize(SubscriptionInterface sub, int size); - /** - * update the subscription burst - * - * @param sub SubscriptionInterface - * @param size int - */ - void updateSubscriptionBurst(SubscriptionInterface sub, int size); + /** + * update the subscription burst + * + * @param sub SubscriptionInterface + * @param size int + */ + void updateSubscriptionBurst(SubscriptionInterface sub, int size); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/TaskDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/TaskDao.java index d7a3d5e00..b39b82207 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/TaskDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/TaskDao.java @@ -24,102 +24,102 @@ public interface TaskDao { - /** - * Delete all tasks for the specified dept config - * - * @param d - */ - void deleteTasks(PointInterface cdept); - - /** - * Delete all tasks for the specified show and dept - * - * @param d - */ - void deleteTasks(ShowInterface show, DepartmentInterface dept); - - /** - * Inserts a new task. A task is a shot based department priority. - * - * @param task - */ - void insertTask(TaskEntity task); - - /** - * Remove specified task. - * - * @param task - */ - void deleteTask(TaskInterface task); - - /** - * Returns a task from its unique id - * - * @param id - */ - TaskEntity getTaskDetail(String id); - - /** - * Returns a job's task representation - * - * @param j - * @return - */ - TaskEntity getTaskDetail(JobInterface j); - - /** - * Updates the specified tasks min procs - * - * @param t - * @param value - */ - void updateTaskMinCores(TaskInterface t, int value); - - /** - * Inserts a task if if does not exist, otherwise its updated. - * - * @param t - */ - void mergeTask(TaskEntity t); - - /** - * Returns true if the task is ti-managed. - */ - boolean isManaged(TaskInterface t); - - /** - * Adjusts the specified task's min cores to value. Only use adjust when the task is managed. - * - * @param t - * @param value - */ - void adjustTaskMinCores(TaskInterface t, int value); - - /** - * - * @param cdept - */ - void clearTaskAdjustments(PointInterface cdept); - - /** - * - * @param t - */ - void clearTaskAdjustment(TaskInterface t); - - /** - * Returns a TaskDetail from a department id and shot name. - * - * @param d - * @param shot - * @return - */ - TaskEntity getTaskDetail(DepartmentInterface d, String shot); - - /** - * Returns true if the specified job is being managed by a task. - * - * @param Job - */ - boolean isManaged(JobInterface j); + /** + * Delete all tasks for the specified dept config + * + * @param d + */ + void deleteTasks(PointInterface cdept); + + /** + * Delete all tasks for the specified show and dept + * + * @param d + */ + void deleteTasks(ShowInterface show, DepartmentInterface dept); + + /** + * Inserts a new task. A task is a shot based department priority. + * + * @param task + */ + void insertTask(TaskEntity task); + + /** + * Remove specified task. + * + * @param task + */ + void deleteTask(TaskInterface task); + + /** + * Returns a task from its unique id + * + * @param id + */ + TaskEntity getTaskDetail(String id); + + /** + * Returns a job's task representation + * + * @param j + * @return + */ + TaskEntity getTaskDetail(JobInterface j); + + /** + * Updates the specified tasks min procs + * + * @param t + * @param value + */ + void updateTaskMinCores(TaskInterface t, int value); + + /** + * Inserts a task if if does not exist, otherwise its updated. + * + * @param t + */ + void mergeTask(TaskEntity t); + + /** + * Returns true if the task is ti-managed. + */ + boolean isManaged(TaskInterface t); + + /** + * Adjusts the specified task's min cores to value. Only use adjust when the task is managed. + * + * @param t + * @param value + */ + void adjustTaskMinCores(TaskInterface t, int value); + + /** + * + * @param cdept + */ + void clearTaskAdjustments(PointInterface cdept); + + /** + * + * @param t + */ + void clearTaskAdjustment(TaskInterface t); + + /** + * Returns a TaskDetail from a department id and shot name. + * + * @param d + * @param shot + * @return + */ + TaskEntity getTaskDetail(DepartmentInterface d, String shot); + + /** + * Returns true if the specified job is being managed by a task. + * + * @param Job + */ + boolean isManaged(JobInterface j); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/TrackitDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/TrackitDao.java index 545b87be5..94a175977 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/TrackitDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/TrackitDao.java @@ -21,14 +21,14 @@ public interface TrackitDao { - /** - * Downloads a list of all tasks for the specified department and inserts them into the Task - * table. - * - * @param show - * @param department - * @return - */ - List getTasks(String show, String dept); + /** + * Downloads a list of all tasks for the specified department and inserts them into the Task + * table. + * + * @param show + * @param department + * @return + */ + List getTasks(String show, String dept); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/WhiteboardDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/WhiteboardDao.java index 8b1f83ec2..aacdd0af1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/WhiteboardDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/WhiteboardDao.java @@ -87,610 +87,610 @@ */ public interface WhiteboardDao { - /** - * Returns a list of hosts - * - * @param h HostInterface - * @return ProcSeq - */ - ProcSeq getProcs(HostInterface h); - - /** - * Returns a list of hosts - * - * @param r HostSearchInterface - * @return HostSeq - */ - HostSeq getHosts(HostSearchInterface r); - - /** - * Returns a list of jobs - * - * @param r JobSearchInterface - * @return JobSeq - */ - JobSeq getJobs(JobSearchInterface r); - - /** - * Returns a list of job names - * - * @param r JobSearchInterface - * @return List of Strings - */ - - List getJobNames(JobSearchInterface r); - - /** - * Returns the comments for the specified job - * - * @param j JobInterface - * @return CommentSeq - */ - CommentSeq getComments(JobInterface j); - - /** - * Returns the comments for the specified host - * - * @param h HostInterface - * @return CommentSeq - */ - CommentSeq getComments(HostInterface h); - - /** - * returns the host a proc is part of - * - * @param id String - * @return Host - */ - Host getHost(String id); - - /** - * returns the host by name - * - * @param name String - * @return Host - */ - Host findHost(String name); - - /** - * Return a dependency by its unique id - * - * @param id String - * @return Depend - */ - Depend getDepend(String id); - - /** - * Returns a list of all dependencies this job is involved with. - * - * @param job JobInterface - * @return DependSeq - */ - DependSeq getDepends(JobInterface job); - - /** - * Returns an array of depends that depend on the specified job. - * - * @param job JobInterface - * @return DependSeq - */ - DependSeq getWhatDependsOnThis(JobInterface job); - - /** - * Returns an array of depends that depend on the specified layer. - * - * @param layer LayerInterface - * @return DependSeq - */ - DependSeq getWhatDependsOnThis(LayerInterface layer); - - /** - * Returns an array of depends that depend on the specified job. - * - * @param frame FrameInterface - * @return DependSeq - */ - DependSeq getWhatDependsOnThis(FrameInterface frame); - - /** - * Returns an array of depends that the specified job is waiting on. - * - * @param job JobInterface - * @return DependSeq - */ - DependSeq getWhatThisDependsOn(JobInterface job); - - /** - * Returns an array of depends that the specified layer is waiting on. - * - * @param layer LayerInterface - * @return DependSeq - */ - DependSeq getWhatThisDependsOn(LayerInterface layer); - - /** - * Returns an array of depends that the specified frame is waiting on. - * - * @param frame FrameInterface - * @return DependSeq - */ - DependSeq getWhatThisDependsOn(FrameInterface frame); - - /** - * Returns the specified dependency - * - * @param depend DependInterface - * @return Depend - */ - Depend getDepend(DependInterface depend); - - Filter findFilter(String show, String name); - - Filter findFilter(ShowInterface show, String name); - - Filter getFilter(FilterInterface filter); - - MatcherSeq getMatchers(FilterInterface filter); - - Matcher getMatcher(MatcherInterface matcher); - - ActionSeq getActions(FilterInterface filter); - - Action getAction(ActionInterface action); - - /** - * Returns the frame by unique ID - * - * @param id String - * @return Frame - */ - Frame getFrame(String id); - - /** - * Returns a list of filters by show - * - * @param show ShowInterface - * @return FilterSeq - */ - - FilterSeq getFilters(ShowInterface show); - - /** - * Frame search - * - * @param r FrameSearchInterface - * @return FrameSeq - */ - FrameSeq getFrames(FrameSearchInterface r); - - /** - * Returns a list of layers for the specified job. - * - * @param job JobInterface - * @return LayerSeq - */ - LayerSeq getLayers(JobInterface job); - - /** - * Returns a layer from its unique ID - * - * @param id String - * @return Layer - */ - Layer getLayer(String id); - - /** - * Returns a list of limits for the specified layer. - * - * @param id String - * @return Layer - */ - List getLimits(LayerInterface layer); - - /** - * - * @param group GroupInterface - * @return JobSeq - */ - JobSeq getJobs(GroupInterface group); - - /** - * Finds an active job record based on the name - * - * @param name String - * @return Job - */ - Job findJob(String name); - - /** - * Gets an active job based on the Id - * - * @param id String - * @return Job - */ - Job getJob(String id); - - /** - * returns a subscription by its id - * - * @return Subscription - */ - Subscription getSubscription(String id); - - /** - * Find subscription using the show, facility, and alloc name. - * - * @param show String - * @param alloc String - * @return Subscription - */ - Subscription findSubscription(String show, String alloc); - - /** - * returns a list of subscriptions - * - * @param show ShowInterface - * @return SubscriptionSeq - */ - SubscriptionSeq getSubscriptions(ShowInterface show); - - /** - * returns all subscriptions on the specified allocation - * - * @param alloc AllocationInterface - * @return SubscriptionSeq - */ - SubscriptionSeq getSubscriptions(AllocationInterface alloc); - - /** - * returns a show by Id. - * - * @param id String - * @return Show - */ - Show getShow(String id); - - /** - * returns a show by its name. - * - * @param name String - * @return Show - */ - Show findShow(String name); - - /** - * - * return a list of shows from a whiteboard request - * - * @return ShowSeq - */ - ShowSeq getShows(); - - /** - * returns an allocation by Id. - * - * @param id String - * @return Allocation - */ - Allocation getAllocation(String id); - - /** - * returns a show by its name. - * - * @param name String - * @return Allocation - */ - Allocation findAllocation(String name); - - /** - * - * return the current list of allocations - * - * @return List of Allocations - */ - AllocationSeq getAllocations(); - - /** - * - * return the current list of allocations - * - * @param facility FacilityInterface - * @return List of Allocations - */ - AllocationSeq getAllocations(FacilityInterface facility); - - /** - * - * @param show ShowInterface - * @return Group - */ - Group getRootGroup(ShowInterface show); - - /** - * - * @param id String - * @return Group - */ - Group getGroup(String id); - - /** - * Finds a group by show name and group name - * - * @param show String - * @param group String - * @return Group - */ - Group findGroup(String show, String group); - - /** - * - * - * @param show ShowInterface - * @return GroupSeq - */ - GroupSeq getGroups(ShowInterface show); - - /** - * - * @param group GroupInterface - * @return GroupSeq - */ - GroupSeq getGroups(GroupInterface group); - - /** - * - * @param job String - * @param layer String - * @return Layer - */ - Layer findLayer(String job, String layer); - - /** - * - * @param job String - * @param layer String - * @param frame int - * @return Frame - */ - Frame findFrame(String job, String layer, int frame); - - /** - * returns an UpdatedFrameCheckResult which contains an array of updated frames. - * - * @param job JobInterface - * @param layers List of LayerInterfaces - * @param lastUpdate int - * @return UpdatedFrameCheckResult - */ - UpdatedFrameCheckResult getUpdatedFrames(JobInterface job, List layers, - int lastUpdate); - - /** - * - * @param show ShowInterface - * @return DepartmentSeq - */ - DepartmentSeq getDepartments(ShowInterface show); - - /** - * - * @param show ShowInterface - * @param name String - * @return Department - */ - Department getDepartment(ShowInterface show, String name); - - /** - * Returns a list of available department names - * - * @return List of Strings - */ - List getDepartmentNames(); - - /** - * - * @param show ShowInterface - * @param dept DepartmentInterface - * @param shot String - * @return Task - */ - Task getTask(ShowInterface show, DepartmentInterface dept, String shot); - - /** - * - * @param show ShowInterface - * @param dept DepartmentInterface - * @return List of Tasks - */ - TaskSeq getTasks(ShowInterface show, DepartmentInterface dept); - - /** - * Returns procs from a ProcSearchInterface criteria. - * - * @param p ProcSearchInterface - * @return ProcSeq - */ - ProcSeq getProcs(ProcSearchInterface p); - - /** - * Return the grpc representation of the given AbstractDepend. - * - * @param depend AbstractDepend - * @return Depend - */ - Depend getDepend(AbstractDepend depend); - - /** - * Return the Host record for the given Deed. - * - * @param deed DeedEntity - * @return Host - */ - Host getHost(DeedEntity deed); - - /** - * Return the Owner of the given Deed. - * - * @param deed DeedEntity - * @return Owner - */ - Owner getOwner(DeedEntity deed); - - /** - * Return a list of all Deeds controlled by the given Owner. - * - * @param owner OwnerEntity - * @return DeedSeq - */ - DeedSeq getDeeds(OwnerEntity owner); - - /** - * Return a list of all Hosts controlled by the given Owner. - * - * @param owner OwnerEntity - * @return HostSeq - */ - HostSeq getHosts(OwnerEntity owner); - - /** - * Return the Owner of the given host. - * - * @param host HostInterface - * @return Owner - */ - Owner getOwner(HostInterface host); - - /** - * Return the Deed for the given Host. - * - * @param host HostInterface - * @return Deed - */ - Deed getDeed(HostInterface host); - - /** - * Return the owner by name. - * - * @param name String - * @return Owner - */ - Owner getOwner(String name); - - /** - * Return a list of owners by show. - * - * @param show ShowInterface - * @return List of Owners - */ - List getOwners(ShowInterface show); - - /** - * Return a list of Deeds by show. - * - * @param show ShowInterface - * @return DeedSeq - */ - DeedSeq getDeeds(ShowInterface show); - - /** - * Return a RenderPartion from its associated LocalHostAssignment. - * - * @param l LocalHostAssignment - * @return RenderPartition - */ - RenderPartition getRenderPartition(LocalHostAssignment l); - - /** - * Return a list or RenderPartition for the given Host. - * - * @param host HostInterface - * @return RenderPartitionSeq - */ - RenderPartitionSeq getRenderPartitions(HostInterface host); - - /** - * Return a facility by name or id. - * - * @param name String - * @return Facility - */ - Facility getFacility(String name); - - /** - * Return the full list of facilities. - * - * @return List of Facilities - */ - FacilitySeq getFacilities(); - - /** - * Return a list of all active shows. - * - * @return ShowSeq - */ - ShowSeq getActiveShows(); - - /** - * Return the given service. - * - * @param id String - * @return Service - */ - Service getService(String id); - - /** - * Return the list of cluster wide service defaults. - * - * @return ServiceSeq - */ - ServiceSeq getDefaultServices(); - - /** - * Return the list of service overrides for a particular show. - * - * @param show ShowInterface - * @return List of ServiceOverrides - */ - ServiceOverrideSeq getServiceOverrides(ShowInterface show); - - /** - * Return the given show override. - * - * @param show ShowInterface - * @param name String - * @return ServiceOverride - */ - ServiceOverride getServiceOverride(ShowInterface show, String name); - - /** - * Find a service by name. - * - * @param name String - * @return Service - */ - Service findService(String name); - - /** - * Find a limit by name. - * - * @param name String - * @return Service - */ - Limit findLimit(String name); - - /** - * Return a service by ID. - * - * @param id String - * @return Limit - */ - Limit getLimit(String id); - - /** - * Returns a list of all limits. - * - * @param id String - * @return Layer - */ - List getLimits(); + /** + * Returns a list of hosts + * + * @param h HostInterface + * @return ProcSeq + */ + ProcSeq getProcs(HostInterface h); + + /** + * Returns a list of hosts + * + * @param r HostSearchInterface + * @return HostSeq + */ + HostSeq getHosts(HostSearchInterface r); + + /** + * Returns a list of jobs + * + * @param r JobSearchInterface + * @return JobSeq + */ + JobSeq getJobs(JobSearchInterface r); + + /** + * Returns a list of job names + * + * @param r JobSearchInterface + * @return List of Strings + */ + + List getJobNames(JobSearchInterface r); + + /** + * Returns the comments for the specified job + * + * @param j JobInterface + * @return CommentSeq + */ + CommentSeq getComments(JobInterface j); + + /** + * Returns the comments for the specified host + * + * @param h HostInterface + * @return CommentSeq + */ + CommentSeq getComments(HostInterface h); + + /** + * returns the host a proc is part of + * + * @param id String + * @return Host + */ + Host getHost(String id); + + /** + * returns the host by name + * + * @param name String + * @return Host + */ + Host findHost(String name); + + /** + * Return a dependency by its unique id + * + * @param id String + * @return Depend + */ + Depend getDepend(String id); + + /** + * Returns a list of all dependencies this job is involved with. + * + * @param job JobInterface + * @return DependSeq + */ + DependSeq getDepends(JobInterface job); + + /** + * Returns an array of depends that depend on the specified job. + * + * @param job JobInterface + * @return DependSeq + */ + DependSeq getWhatDependsOnThis(JobInterface job); + + /** + * Returns an array of depends that depend on the specified layer. + * + * @param layer LayerInterface + * @return DependSeq + */ + DependSeq getWhatDependsOnThis(LayerInterface layer); + + /** + * Returns an array of depends that depend on the specified job. + * + * @param frame FrameInterface + * @return DependSeq + */ + DependSeq getWhatDependsOnThis(FrameInterface frame); + + /** + * Returns an array of depends that the specified job is waiting on. + * + * @param job JobInterface + * @return DependSeq + */ + DependSeq getWhatThisDependsOn(JobInterface job); + + /** + * Returns an array of depends that the specified layer is waiting on. + * + * @param layer LayerInterface + * @return DependSeq + */ + DependSeq getWhatThisDependsOn(LayerInterface layer); + + /** + * Returns an array of depends that the specified frame is waiting on. + * + * @param frame FrameInterface + * @return DependSeq + */ + DependSeq getWhatThisDependsOn(FrameInterface frame); + + /** + * Returns the specified dependency + * + * @param depend DependInterface + * @return Depend + */ + Depend getDepend(DependInterface depend); + + Filter findFilter(String show, String name); + + Filter findFilter(ShowInterface show, String name); + + Filter getFilter(FilterInterface filter); + + MatcherSeq getMatchers(FilterInterface filter); + + Matcher getMatcher(MatcherInterface matcher); + + ActionSeq getActions(FilterInterface filter); + + Action getAction(ActionInterface action); + + /** + * Returns the frame by unique ID + * + * @param id String + * @return Frame + */ + Frame getFrame(String id); + + /** + * Returns a list of filters by show + * + * @param show ShowInterface + * @return FilterSeq + */ + + FilterSeq getFilters(ShowInterface show); + + /** + * Frame search + * + * @param r FrameSearchInterface + * @return FrameSeq + */ + FrameSeq getFrames(FrameSearchInterface r); + + /** + * Returns a list of layers for the specified job. + * + * @param job JobInterface + * @return LayerSeq + */ + LayerSeq getLayers(JobInterface job); + + /** + * Returns a layer from its unique ID + * + * @param id String + * @return Layer + */ + Layer getLayer(String id); + + /** + * Returns a list of limits for the specified layer. + * + * @param id String + * @return Layer + */ + List getLimits(LayerInterface layer); + + /** + * + * @param group GroupInterface + * @return JobSeq + */ + JobSeq getJobs(GroupInterface group); + + /** + * Finds an active job record based on the name + * + * @param name String + * @return Job + */ + Job findJob(String name); + + /** + * Gets an active job based on the Id + * + * @param id String + * @return Job + */ + Job getJob(String id); + + /** + * returns a subscription by its id + * + * @return Subscription + */ + Subscription getSubscription(String id); + + /** + * Find subscription using the show, facility, and alloc name. + * + * @param show String + * @param alloc String + * @return Subscription + */ + Subscription findSubscription(String show, String alloc); + + /** + * returns a list of subscriptions + * + * @param show ShowInterface + * @return SubscriptionSeq + */ + SubscriptionSeq getSubscriptions(ShowInterface show); + + /** + * returns all subscriptions on the specified allocation + * + * @param alloc AllocationInterface + * @return SubscriptionSeq + */ + SubscriptionSeq getSubscriptions(AllocationInterface alloc); + + /** + * returns a show by Id. + * + * @param id String + * @return Show + */ + Show getShow(String id); + + /** + * returns a show by its name. + * + * @param name String + * @return Show + */ + Show findShow(String name); + + /** + * + * return a list of shows from a whiteboard request + * + * @return ShowSeq + */ + ShowSeq getShows(); + + /** + * returns an allocation by Id. + * + * @param id String + * @return Allocation + */ + Allocation getAllocation(String id); + + /** + * returns a show by its name. + * + * @param name String + * @return Allocation + */ + Allocation findAllocation(String name); + + /** + * + * return the current list of allocations + * + * @return List of Allocations + */ + AllocationSeq getAllocations(); + + /** + * + * return the current list of allocations + * + * @param facility FacilityInterface + * @return List of Allocations + */ + AllocationSeq getAllocations(FacilityInterface facility); + + /** + * + * @param show ShowInterface + * @return Group + */ + Group getRootGroup(ShowInterface show); + + /** + * + * @param id String + * @return Group + */ + Group getGroup(String id); + + /** + * Finds a group by show name and group name + * + * @param show String + * @param group String + * @return Group + */ + Group findGroup(String show, String group); + + /** + * + * + * @param show ShowInterface + * @return GroupSeq + */ + GroupSeq getGroups(ShowInterface show); + + /** + * + * @param group GroupInterface + * @return GroupSeq + */ + GroupSeq getGroups(GroupInterface group); + + /** + * + * @param job String + * @param layer String + * @return Layer + */ + Layer findLayer(String job, String layer); + + /** + * + * @param job String + * @param layer String + * @param frame int + * @return Frame + */ + Frame findFrame(String job, String layer, int frame); + + /** + * returns an UpdatedFrameCheckResult which contains an array of updated frames. + * + * @param job JobInterface + * @param layers List of LayerInterfaces + * @param lastUpdate int + * @return UpdatedFrameCheckResult + */ + UpdatedFrameCheckResult getUpdatedFrames(JobInterface job, List layers, + int lastUpdate); + + /** + * + * @param show ShowInterface + * @return DepartmentSeq + */ + DepartmentSeq getDepartments(ShowInterface show); + + /** + * + * @param show ShowInterface + * @param name String + * @return Department + */ + Department getDepartment(ShowInterface show, String name); + + /** + * Returns a list of available department names + * + * @return List of Strings + */ + List getDepartmentNames(); + + /** + * + * @param show ShowInterface + * @param dept DepartmentInterface + * @param shot String + * @return Task + */ + Task getTask(ShowInterface show, DepartmentInterface dept, String shot); + + /** + * + * @param show ShowInterface + * @param dept DepartmentInterface + * @return List of Tasks + */ + TaskSeq getTasks(ShowInterface show, DepartmentInterface dept); + + /** + * Returns procs from a ProcSearchInterface criteria. + * + * @param p ProcSearchInterface + * @return ProcSeq + */ + ProcSeq getProcs(ProcSearchInterface p); + + /** + * Return the grpc representation of the given AbstractDepend. + * + * @param depend AbstractDepend + * @return Depend + */ + Depend getDepend(AbstractDepend depend); + + /** + * Return the Host record for the given Deed. + * + * @param deed DeedEntity + * @return Host + */ + Host getHost(DeedEntity deed); + + /** + * Return the Owner of the given Deed. + * + * @param deed DeedEntity + * @return Owner + */ + Owner getOwner(DeedEntity deed); + + /** + * Return a list of all Deeds controlled by the given Owner. + * + * @param owner OwnerEntity + * @return DeedSeq + */ + DeedSeq getDeeds(OwnerEntity owner); + + /** + * Return a list of all Hosts controlled by the given Owner. + * + * @param owner OwnerEntity + * @return HostSeq + */ + HostSeq getHosts(OwnerEntity owner); + + /** + * Return the Owner of the given host. + * + * @param host HostInterface + * @return Owner + */ + Owner getOwner(HostInterface host); + + /** + * Return the Deed for the given Host. + * + * @param host HostInterface + * @return Deed + */ + Deed getDeed(HostInterface host); + + /** + * Return the owner by name. + * + * @param name String + * @return Owner + */ + Owner getOwner(String name); + + /** + * Return a list of owners by show. + * + * @param show ShowInterface + * @return List of Owners + */ + List getOwners(ShowInterface show); + + /** + * Return a list of Deeds by show. + * + * @param show ShowInterface + * @return DeedSeq + */ + DeedSeq getDeeds(ShowInterface show); + + /** + * Return a RenderPartion from its associated LocalHostAssignment. + * + * @param l LocalHostAssignment + * @return RenderPartition + */ + RenderPartition getRenderPartition(LocalHostAssignment l); + + /** + * Return a list or RenderPartition for the given Host. + * + * @param host HostInterface + * @return RenderPartitionSeq + */ + RenderPartitionSeq getRenderPartitions(HostInterface host); + + /** + * Return a facility by name or id. + * + * @param name String + * @return Facility + */ + Facility getFacility(String name); + + /** + * Return the full list of facilities. + * + * @return List of Facilities + */ + FacilitySeq getFacilities(); + + /** + * Return a list of all active shows. + * + * @return ShowSeq + */ + ShowSeq getActiveShows(); + + /** + * Return the given service. + * + * @param id String + * @return Service + */ + Service getService(String id); + + /** + * Return the list of cluster wide service defaults. + * + * @return ServiceSeq + */ + ServiceSeq getDefaultServices(); + + /** + * Return the list of service overrides for a particular show. + * + * @param show ShowInterface + * @return List of ServiceOverrides + */ + ServiceOverrideSeq getServiceOverrides(ShowInterface show); + + /** + * Return the given show override. + * + * @param show ShowInterface + * @param name String + * @return ServiceOverride + */ + ServiceOverride getServiceOverride(ShowInterface show, String name); + + /** + * Find a service by name. + * + * @param name String + * @return Service + */ + Service findService(String name); + + /** + * Find a limit by name. + * + * @param name String + * @return Service + */ + Limit findLimit(String name); + + /** + * Return a service by ID. + * + * @param id String + * @return Limit + */ + Limit getLimit(String id); + + /** + * Returns a list of all limits. + * + * @param id String + * @return Layer + */ + List getLimits(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/CriteriaException.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/CriteriaException.java index 8d1e2df8b..4ecb6eb91 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/CriteriaException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/CriteriaException.java @@ -18,23 +18,23 @@ @SuppressWarnings("serial") public class CriteriaException extends RuntimeException { - public CriteriaException() { - // TODO Auto-generated constructor stub - } - - public CriteriaException(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public CriteriaException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } - - public CriteriaException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } + public CriteriaException() { + // TODO Auto-generated constructor stub + } + + public CriteriaException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public CriteriaException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } + + public CriteriaException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/CriteriaInterface.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/CriteriaInterface.java index 567e50a32..951abd0c2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/CriteriaInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/CriteriaInterface.java @@ -18,19 +18,19 @@ import java.util.List; public interface CriteriaInterface { - String toString(); + String toString(); - void setFirstResult(int firstResult); + void setFirstResult(int firstResult); - void setMaxResults(int maxResults); + void setMaxResults(int maxResults); - void addSort(Sort o); + void addSort(Sort o); - String getWhereClause(); + String getWhereClause(); - String getFilteredQuery(String query); + String getFilteredQuery(String query); - List getValues(); + List getValues(); - Object[] getValuesArray(); + Object[] getValuesArray(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Direction.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Direction.java index 7e68b87ee..87a06b895 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Direction.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Direction.java @@ -16,5 +16,5 @@ package com.imageworks.spcue.dao.criteria; public enum Direction { - ASC, DESC + ASC, DESC } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchFactory.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchFactory.java index 45c40bc55..37dd378ef 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchFactory.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchFactory.java @@ -25,55 +25,55 @@ import com.imageworks.spcue.grpc.job.FrameSearchCriteria; public class FrameSearchFactory { - private DatabaseEngine dbEngine; + private DatabaseEngine dbEngine; - public FrameSearchInterface create() { - return new FrameSearch(); - } + public FrameSearchInterface create() { + return new FrameSearch(); + } - public FrameSearchInterface create(List frameIds) { - FrameSearchInterface frameSearch = create(); - frameSearch.filterByFrameIds(frameIds); - return frameSearch; - } + public FrameSearchInterface create(List frameIds) { + FrameSearchInterface frameSearch = create(); + frameSearch.filterByFrameIds(frameIds); + return frameSearch; + } - public FrameSearchInterface create(JobInterface job) { - FrameSearchInterface frameSearch = create(); - frameSearch.filterByJob(job); - return frameSearch; - } + public FrameSearchInterface create(JobInterface job) { + FrameSearchInterface frameSearch = create(); + frameSearch.filterByJob(job); + return frameSearch; + } - public FrameSearchInterface create(FrameInterface frame) { - FrameSearchInterface frameSearch = create(); - frameSearch.filterByFrame(frame); - return frameSearch; - } + public FrameSearchInterface create(FrameInterface frame) { + FrameSearchInterface frameSearch = create(); + frameSearch.filterByFrame(frame); + return frameSearch; + } - public FrameSearchInterface create(JobInterface job, FrameSearchCriteria criteria) { - FrameSearchInterface frameSearch = create(); - frameSearch.setCriteria(criteria); - frameSearch.filterByJob(job); - return frameSearch; - } + public FrameSearchInterface create(JobInterface job, FrameSearchCriteria criteria) { + FrameSearchInterface frameSearch = create(); + frameSearch.setCriteria(criteria); + frameSearch.filterByJob(job); + return frameSearch; + } - public FrameSearchInterface create(LayerInterface layer) { - FrameSearchInterface frameSearch = create(); - frameSearch.filterByLayer(layer); - return frameSearch; - } + public FrameSearchInterface create(LayerInterface layer) { + FrameSearchInterface frameSearch = create(); + frameSearch.filterByLayer(layer); + return frameSearch; + } - public FrameSearchInterface create(LayerInterface layer, FrameSearchCriteria criteria) { - FrameSearchInterface frameSearch = create(); - frameSearch.setCriteria(criteria); - frameSearch.filterByLayer(layer); - return frameSearch; - } + public FrameSearchInterface create(LayerInterface layer, FrameSearchCriteria criteria) { + FrameSearchInterface frameSearch = create(); + frameSearch.setCriteria(criteria); + frameSearch.filterByLayer(layer); + return frameSearch; + } - public DatabaseEngine getDbEngine() { - return dbEngine; - } + public DatabaseEngine getDbEngine() { + return dbEngine; + } - public void setDbEngine(DatabaseEngine dbEngine) { - this.dbEngine = dbEngine; - } + public void setDbEngine(DatabaseEngine dbEngine) { + this.dbEngine = dbEngine; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchInterface.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchInterface.java index 1d2017f8d..7f85e298c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchInterface.java @@ -24,37 +24,37 @@ import com.imageworks.spcue.grpc.job.FrameState; public interface FrameSearchInterface extends CriteriaInterface { - int DEFAULT_PAGE = 1; - int DEFAULT_LIMIT = 1000; + int DEFAULT_PAGE = 1; + int DEFAULT_LIMIT = 1000; - FrameSearchCriteria getCriteria(); + FrameSearchCriteria getCriteria(); - void setCriteria(FrameSearchCriteria criteria); + void setCriteria(FrameSearchCriteria criteria); - String getSortedQuery(String query); + String getSortedQuery(String query); - void filterByFrameIds(List frameIds); + void filterByFrameIds(List frameIds); - void filterByJob(JobInterface job); + void filterByJob(JobInterface job); - void filterByFrame(FrameInterface frame); + void filterByFrame(FrameInterface frame); - void filterByLayer(LayerInterface layer); + void filterByLayer(LayerInterface layer); - void filterByLayers(List layers); + void filterByLayers(List layers); - void filterByFrameStates(List frameStates); + void filterByFrameStates(List frameStates); - void filterByFrameSet(String frameSet); + void filterByFrameSet(String frameSet); - void filterByMemoryRange(String range); + void filterByMemoryRange(String range); - void filterByDurationRange(String range); + void filterByDurationRange(String range); - void filterByChangeDate(int changeDate); + void filterByChangeDate(int changeDate); - static FrameSearchCriteria criteriaFactory() { - return FrameSearchCriteria.newBuilder().setPage(DEFAULT_PAGE).setLimit(DEFAULT_LIMIT) - .setChangeDate(0).build(); - } + static FrameSearchCriteria criteriaFactory() { + return FrameSearchCriteria.newBuilder().setPage(DEFAULT_PAGE).setLimit(DEFAULT_LIMIT) + .setChangeDate(0).build(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchFactory.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchFactory.java index 31039d917..3312da84e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchFactory.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchFactory.java @@ -22,23 +22,23 @@ public class HostSearchFactory { - private DatabaseEngine dbEngine; + private DatabaseEngine dbEngine; - public HostSearchInterface create(HostSearchCriteria criteria) { - return new HostSearch(criteria); - } + public HostSearchInterface create(HostSearchCriteria criteria) { + return new HostSearch(criteria); + } - public HostSearchInterface create(AllocationEntity allocEntity) { - HostSearchInterface hostSearch = create(HostSearchInterface.criteriaFactory()); - hostSearch.filterByAlloc(allocEntity); - return hostSearch; - } + public HostSearchInterface create(AllocationEntity allocEntity) { + HostSearchInterface hostSearch = create(HostSearchInterface.criteriaFactory()); + hostSearch.filterByAlloc(allocEntity); + return hostSearch; + } - public DatabaseEngine getDbEngine() { - return dbEngine; - } + public DatabaseEngine getDbEngine() { + return dbEngine; + } - public void setDbEngine(DatabaseEngine dbEngine) { - this.dbEngine = dbEngine; - } + public void setDbEngine(DatabaseEngine dbEngine) { + this.dbEngine = dbEngine; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchInterface.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchInterface.java index 4d162b238..59c1abe39 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchInterface.java @@ -19,11 +19,11 @@ import com.imageworks.spcue.grpc.host.HostSearchCriteria; public interface HostSearchInterface extends CriteriaInterface { - HostSearchCriteria getCriteria(); + HostSearchCriteria getCriteria(); - void filterByAlloc(AllocationInterface alloc); + void filterByAlloc(AllocationInterface alloc); - static HostSearchCriteria criteriaFactory() { - return HostSearchCriteria.newBuilder().build(); - } + static HostSearchCriteria criteriaFactory() { + return HostSearchCriteria.newBuilder().build(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchFactory.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchFactory.java index 00ac412ba..03087a7d3 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchFactory.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchFactory.java @@ -21,29 +21,29 @@ import com.imageworks.spcue.dao.criteria.postgres.JobSearch; public class JobSearchFactory { - private DatabaseEngine dbEngine; - - public JobSearchInterface create() { - return new JobSearch(); - } - - public JobSearchInterface create(JobSearchCriteria criteria) { - JobSearchInterface jobSearch = create(); - jobSearch.setCriteria(criteria); - return jobSearch; - } - - public JobSearchInterface create(ShowInterface show) { - JobSearchInterface jobSearch = create(); - jobSearch.filterByShow(show); - return jobSearch; - } - - public DatabaseEngine getDbEngine() { - return dbEngine; - } - - public void setDbEngine(DatabaseEngine dbEngine) { - this.dbEngine = dbEngine; - } + private DatabaseEngine dbEngine; + + public JobSearchInterface create() { + return new JobSearch(); + } + + public JobSearchInterface create(JobSearchCriteria criteria) { + JobSearchInterface jobSearch = create(); + jobSearch.setCriteria(criteria); + return jobSearch; + } + + public JobSearchInterface create(ShowInterface show) { + JobSearchInterface jobSearch = create(); + jobSearch.filterByShow(show); + return jobSearch; + } + + public DatabaseEngine getDbEngine() { + return dbEngine; + } + + public void setDbEngine(DatabaseEngine dbEngine) { + this.dbEngine = dbEngine; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchInterface.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchInterface.java index 8b84891b4..b3fb53a7f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchInterface.java @@ -19,13 +19,13 @@ import com.imageworks.spcue.grpc.job.JobSearchCriteria; public interface JobSearchInterface extends CriteriaInterface { - JobSearchCriteria getCriteria(); + JobSearchCriteria getCriteria(); - void setCriteria(JobSearchCriteria criteria); + void setCriteria(JobSearchCriteria criteria); - void filterByShow(ShowInterface show); + void filterByShow(ShowInterface show); - static JobSearchCriteria criteriaFactory() { - return JobSearchCriteria.newBuilder().setIncludeFinished(false).build(); - } + static JobSearchCriteria criteriaFactory() { + return JobSearchCriteria.newBuilder().setIncludeFinished(false).build(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Phrase.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Phrase.java index c57f43b18..b03ae6fef 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Phrase.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Phrase.java @@ -22,25 +22,25 @@ */ public class Phrase { - private final String column; - private final String comparison; - private final String value; - - public Phrase(String column, String comparison, String value) { - this.column = column; - this.comparison = comparison; - this.value = value; - } - - public String getColumn() { - return column; - } - - public String getComparison() { - return comparison; - } - - public String getValue() { - return value; - } + private final String column; + private final String comparison; + private final String value; + + public Phrase(String column, String comparison, String value) { + this.column = column; + this.comparison = comparison; + this.value = value; + } + + public String getColumn() { + return column; + } + + public String getComparison() { + return comparison; + } + + public String getValue() { + return value; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchFactory.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchFactory.java index abd787538..519a42dd3 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchFactory.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchFactory.java @@ -20,30 +20,30 @@ import com.imageworks.spcue.grpc.host.ProcSearchCriteria; public class ProcSearchFactory { - private DatabaseEngine dbEngine; - - public ProcSearchInterface create() { - return new ProcSearch(); - } - - public ProcSearchInterface create(ProcSearchCriteria criteria) { - ProcSearchInterface procSearch = create(); - procSearch.setCriteria(criteria); - return procSearch; - } - - public ProcSearchInterface create(ProcSearchCriteria criteria, Sort sort) { - ProcSearchInterface procSearch = create(); - procSearch.setCriteria(criteria); - procSearch.addSort(sort); - return procSearch; - } - - public DatabaseEngine getDbEngine() { - return dbEngine; - } - - public void setDbEngine(DatabaseEngine dbEngine) { - this.dbEngine = dbEngine; - } + private DatabaseEngine dbEngine; + + public ProcSearchInterface create() { + return new ProcSearch(); + } + + public ProcSearchInterface create(ProcSearchCriteria criteria) { + ProcSearchInterface procSearch = create(); + procSearch.setCriteria(criteria); + return procSearch; + } + + public ProcSearchInterface create(ProcSearchCriteria criteria, Sort sort) { + ProcSearchInterface procSearch = create(); + procSearch.setCriteria(criteria); + procSearch.addSort(sort); + return procSearch; + } + + public DatabaseEngine getDbEngine() { + return dbEngine; + } + + public void setDbEngine(DatabaseEngine dbEngine) { + this.dbEngine = dbEngine; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchInterface.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchInterface.java index aa092dd09..4a10d385c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchInterface.java @@ -26,29 +26,29 @@ import com.imageworks.spcue.grpc.host.ProcSearchCriteria; public interface ProcSearchInterface extends CriteriaInterface { - ProcSearchCriteria getCriteria(); + ProcSearchCriteria getCriteria(); - void setCriteria(ProcSearchCriteria criteria); + void setCriteria(ProcSearchCriteria criteria); - void notJobs(List jobs); + void notJobs(List jobs); - void notGroups(List groups); + void notGroups(List groups); - void filterByDurationRange(LessThanIntegerSearchCriterion criterion); + void filterByDurationRange(LessThanIntegerSearchCriterion criterion); - void filterByDurationRange(GreaterThanIntegerSearchCriterion criterion); + void filterByDurationRange(GreaterThanIntegerSearchCriterion criterion); - void filterByDurationRange(InRangeIntegerSearchCriterion criterion); + void filterByDurationRange(InRangeIntegerSearchCriterion criterion); - void filterByHost(HostInterface host); + void filterByHost(HostInterface host); - void sortByHostName(); + void sortByHostName(); - void sortByDispatchedTime(); + void sortByDispatchedTime(); - void sortByBookedTime(); + void sortByBookedTime(); - static ProcSearchCriteria criteriaFactory() { - return ProcSearchCriteria.newBuilder().build(); - } + static ProcSearchCriteria criteriaFactory() { + return ProcSearchCriteria.newBuilder().build(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Sort.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Sort.java index a53a19d77..b089d62da 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Sort.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/Sort.java @@ -17,27 +17,27 @@ public class Sort { - private final String col; - private final Direction dir; + private final String col; + private final Direction dir; - public Sort(String col, Direction dir) { - this.col = col; - this.dir = dir; - } + public Sort(String col, Direction dir) { + this.col = col; + this.dir = dir; + } - public static final Sort asc(String col) { - return new Sort(col, Direction.ASC); - } + public static final Sort asc(String col) { + return new Sort(col, Direction.ASC); + } - public static final Sort desc(String col) { - return new Sort(col, Direction.DESC); - } + public static final Sort desc(String col) { + return new Sort(col, Direction.DESC); + } - public String getColumn() { - return this.col; - } + public String getColumn() { + return this.col; + } - public Direction getDirection() { - return this.dir; - } + public Direction getDirection() { + return this.dir; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/Criteria.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/Criteria.java index 904fcfcd8..7b89e0454 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/Criteria.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/Criteria.java @@ -38,288 +38,289 @@ public abstract class Criteria implements CriteriaInterface { - List chunks = new ArrayList(12); - List values = new ArrayList(32); - Integer limit; - - boolean built = false; - private int firstResult = 1; - private int maxResults = 0; - private ArrayList order = new ArrayList(); - - abstract void buildWhereClause(); - - public String toString() { - return this.getWhereClause(); - } - - public void setFirstResult(int firstResult) { - this.firstResult = Math.max(firstResult, 1); - } - - public void setMaxResults(int maxResults) { - this.maxResults = maxResults; - } - - public void addSort(Sort sort) { - this.order.add(sort); - } - - public List getValues() { - return values; - } - - public Object[] getValuesArray() { - return values.toArray(); - } - - public String getWhereClause() { - build(); - return generateWhereClause(); - } - - public String getFilteredQuery(String query) { - build(); - return queryWithPaging(query); - } - - private void build() { - if (!built) { - buildWhereClause(); + List chunks = new ArrayList(12); + List values = new ArrayList(32); + Integer limit; + + boolean built = false; + private int firstResult = 1; + private int maxResults = 0; + private ArrayList order = new ArrayList(); + + abstract void buildWhereClause(); + + public String toString() { + return this.getWhereClause(); + } + + public void setFirstResult(int firstResult) { + this.firstResult = Math.max(firstResult, 1); + } + + public void setMaxResults(int maxResults) { + this.maxResults = maxResults; + } + + public void addSort(Sort sort) { + this.order.add(sort); + } + + public List getValues() { + return values; + } + + public Object[] getValuesArray() { + return values.toArray(); } - built = true; - } - - private String generateWhereClause() { - return chunks.stream().map(StringBuilder::toString).collect(Collectors.joining(" AND ")); - } - - private String queryWithPaging(String query) { - if (firstResult > 1 || maxResults > 0) { - if (order.size() == 0) { - query = query.replaceFirst("SELECT ", "SELECT row_number() OVER () AS RN,"); - } else { - query = - query.replaceFirst("SELECT ", "SELECT row_number() OVER (" + getOrder() + ") AS RN, "); - } + + public String getWhereClause() { + build(); + return generateWhereClause(); } - StringBuilder sb = new StringBuilder(4096); - if (maxResults > 0 || firstResult > 1) { - sb.append("SELECT * FROM ( "); + public String getFilteredQuery(String query) { + build(); + return queryWithPaging(query); } - sb.append(query); - sb.append(" "); - if (chunks.size() > 0) { - sb.append("AND "); - sb.append(chunks.stream().map(StringBuilder::toString).collect(Collectors.joining(" AND "))); + private void build() { + if (!built) { + buildWhereClause(); + } + built = true; } - if (firstResult > 1 || maxResults > 0) { - sb.append(") AS getQueryT WHERE "); + private String generateWhereClause() { + return chunks.stream().map(StringBuilder::toString).collect(Collectors.joining(" AND ")); } - if (firstResult > 1) { - sb.append(" RN >= ? "); - values.add(firstResult); + private String queryWithPaging(String query) { + if (firstResult > 1 || maxResults > 0) { + if (order.size() == 0) { + query = query.replaceFirst("SELECT ", "SELECT row_number() OVER () AS RN,"); + } else { + query = query.replaceFirst("SELECT ", + "SELECT row_number() OVER (" + getOrder() + ") AS RN, "); + } + } + + StringBuilder sb = new StringBuilder(4096); + if (maxResults > 0 || firstResult > 1) { + sb.append("SELECT * FROM ( "); + } + + sb.append(query); + sb.append(" "); + if (chunks.size() > 0) { + sb.append("AND "); + sb.append(chunks.stream().map(StringBuilder::toString) + .collect(Collectors.joining(" AND "))); + } + + if (firstResult > 1 || maxResults > 0) { + sb.append(") AS getQueryT WHERE "); + } + + if (firstResult > 1) { + sb.append(" RN >= ? "); + values.add(firstResult); + } + + if (maxResults > 0) { + if (firstResult > 1) { + sb.append(" AND "); + } + sb.append(" RN < ? "); + values.add(firstResult + maxResults); + } + + if (limit != null) { + sb.append(" LIMIT "); + sb.append(limit); + sb.append(" "); + } + + return sb.toString(); } - if (maxResults > 0) { - if (firstResult > 1) { - sb.append(" AND "); - } - sb.append(" RN < ? "); - values.add(firstResult + maxResults); + private String getOrder() { + if (order.size() < 1) { + return ""; + } + return " ORDER BY " + order.stream() + .map(sort -> sort.getColumn() + " " + sort.getDirection().toString()) + .collect(Collectors.joining(", ")); } - if (limit != null) { - sb.append(" LIMIT "); - sb.append(limit); - sb.append(" "); + void addPhrase(String col, Collection s) { + if (s == null || s.size() == 0) { + return; + } + + StringBuilder sb = new StringBuilder(1024); + sb.append("("); + for (String w : s) { + sb.append(col); + sb.append("=?"); + sb.append(" OR "); + values.add(w); + } + sb.delete(sb.length() - 4, sb.length()); + sb.append(")"); + chunks.add(sb); } - return sb.toString(); - } + void addPhrases(Collection phrases, String inclusion) { + if (phrases.size() == 0) { + return; + } + StringBuilder sb = new StringBuilder(1024); + sb.append("("); + for (Phrase p : phrases) { + sb.append(p.getColumn()); + sb.append(p.getComparison()); + sb.append("?"); + sb.append(" "); + sb.append(inclusion); + sb.append(" "); + values.add(p.getValue()); + } + sb.delete(sb.length() - 4, sb.length()); + sb.append(")"); + chunks.add(sb); + } - private String getOrder() { - if (order.size() < 1) { - return ""; + void addPhrase(String col, String v) { + if (v == null) { + return; + } + addPhrase(col, ImmutableList.of(v)); } - return " ORDER BY " - + order.stream().map(sort -> sort.getColumn() + " " + sort.getDirection().toString()) - .collect(Collectors.joining(", ")); - } - - void addPhrase(String col, Collection s) { - if (s == null || s.size() == 0) { - return; + + void addRegexPhrase(String col, Set s) { + if (s == null) { + return; + } + if (s.size() == 0) { + return; + } + StringBuilder sb = new StringBuilder(1024); + sb.append("("); + for (String w : s) { + sb.append(String.format("%s ~ ?", col)); + sb.append(" OR "); + values.add(w); + } + sb.delete(sb.length() - 4, sb.length()); + sb.append(")"); + chunks.add(sb); } - StringBuilder sb = new StringBuilder(1024); - sb.append("("); - for (String w : s) { - sb.append(col); - sb.append("=?"); - sb.append(" OR "); - values.add(w); + void addLikePhrase(String col, Set s) { + if (s == null) { + return; + } + if (s.size() == 0) { + return; + } + StringBuilder sb = new StringBuilder(1024); + sb.append("("); + for (String w : s) { + sb.append(col); + sb.append(" LIKE ?"); + sb.append(" OR "); + values.add("%" + w + "%"); + } + sb.delete(sb.length() - 4, sb.length()); + sb.append(")"); + chunks.add(sb); } - sb.delete(sb.length() - 4, sb.length()); - sb.append(")"); - chunks.add(sb); - } - - void addPhrases(Collection phrases, String inclusion) { - if (phrases.size() == 0) { - return; + + void addGreaterThanTimestamp(String col, Timestamp timestamp) { + if (timestamp == null) { + return; + } + StringBuilder sb = new StringBuilder(128); + sb.append("("); + sb.append(col); + sb.append(" > ?"); + sb.append(") "); + values.add(timestamp); + chunks.add(sb); } - StringBuilder sb = new StringBuilder(1024); - sb.append("("); - for (Phrase p : phrases) { - sb.append(p.getColumn()); - sb.append(p.getComparison()); - sb.append("?"); - sb.append(" "); - sb.append(inclusion); - sb.append(" "); - values.add(p.getValue()); + + void addLessThanTimestamp(String col, Timestamp timestamp) { + if (timestamp == null) { + return; + } + StringBuilder sb = new StringBuilder(128); + sb.append("("); + sb.append(col); + sb.append(" < ?"); + sb.append(") "); + values.add(timestamp); + chunks.add(sb); } - sb.delete(sb.length() - 4, sb.length()); - sb.append(")"); - chunks.add(sb); - } - - void addPhrase(String col, String v) { - if (v == null) { - return; + + void addRangePhrase(String col, EqualsIntegerSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" " + col + " = ?"); + chunks.add(sb); + values.add(criterion.getValue()); } - addPhrase(col, ImmutableList.of(v)); - } - void addRegexPhrase(String col, Set s) { - if (s == null) { - return; + void addRangePhrase(String col, LessThanIntegerSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" " + col + "<=? "); + chunks.add(sb); + values.add(criterion.getValue()); } - if (s.size() == 0) { - return; + + void addRangePhrase(String col, GreaterThanIntegerSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" " + col + " >= ? "); + chunks.add(sb); + values.add(criterion.getValue()); } - StringBuilder sb = new StringBuilder(1024); - sb.append("("); - for (String w : s) { - sb.append(String.format("%s ~ ?", col)); - sb.append(" OR "); - values.add(w); + + void addRangePhrase(String col, InRangeIntegerSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" " + col + " >= ? AND " + col + " <= ? "); + chunks.add(sb); + values.add(criterion.getMin()); + values.add(criterion.getMax()); } - sb.delete(sb.length() - 4, sb.length()); - sb.append(")"); - chunks.add(sb); - } - - void addLikePhrase(String col, Set s) { - if (s == null) { - return; + + void addRangePhrase(String col, EqualsFloatSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" " + col + " = ?"); + chunks.add(sb); + values.add(criterion.getValue()); } - if (s.size() == 0) { - return; + + void addRangePhrase(String col, LessThanFloatSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" " + col + " <= ? "); + chunks.add(sb); + values.add(criterion.getValue()); } - StringBuilder sb = new StringBuilder(1024); - sb.append("("); - for (String w : s) { - sb.append(col); - sb.append(" LIKE ?"); - sb.append(" OR "); - values.add("%" + w + "%"); + + void addRangePhrase(String col, GreaterThanFloatSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" " + col + " >= ? "); + chunks.add(sb); + values.add(criterion.getValue()); } - sb.delete(sb.length() - 4, sb.length()); - sb.append(")"); - chunks.add(sb); - } - - void addGreaterThanTimestamp(String col, Timestamp timestamp) { - if (timestamp == null) { - return; + + void addRangePhrase(String col, InRangeFloatSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" " + col + " >= ? "); + chunks.add(sb); + values.add(criterion.getMin()); + values.add(criterion.getMax()); } - StringBuilder sb = new StringBuilder(128); - sb.append("("); - sb.append(col); - sb.append(" > ?"); - sb.append(") "); - values.add(timestamp); - chunks.add(sb); - } - - void addLessThanTimestamp(String col, Timestamp timestamp) { - if (timestamp == null) { - return; + + boolean isValid(String v) { + return v != null && !v.isEmpty(); } - StringBuilder sb = new StringBuilder(128); - sb.append("("); - sb.append(col); - sb.append(" < ?"); - sb.append(") "); - values.add(timestamp); - chunks.add(sb); - } - - void addRangePhrase(String col, EqualsIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " = ?"); - chunks.add(sb); - values.add(criterion.getValue()); - } - - void addRangePhrase(String col, LessThanIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + "<=? "); - chunks.add(sb); - values.add(criterion.getValue()); - } - - void addRangePhrase(String col, GreaterThanIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " >= ? "); - chunks.add(sb); - values.add(criterion.getValue()); - } - - void addRangePhrase(String col, InRangeIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " >= ? AND " + col + " <= ? "); - chunks.add(sb); - values.add(criterion.getMin()); - values.add(criterion.getMax()); - } - - void addRangePhrase(String col, EqualsFloatSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " = ?"); - chunks.add(sb); - values.add(criterion.getValue()); - } - - void addRangePhrase(String col, LessThanFloatSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " <= ? "); - chunks.add(sb); - values.add(criterion.getValue()); - } - - void addRangePhrase(String col, GreaterThanFloatSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " >= ? "); - chunks.add(sb); - values.add(criterion.getValue()); - } - - void addRangePhrase(String col, InRangeFloatSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " >= ? "); - chunks.add(sb); - values.add(criterion.getMin()); - values.add(criterion.getMax()); - } - - boolean isValid(String v) { - return v != null && !v.isEmpty(); - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/FrameSearch.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/FrameSearch.java index 198e0899d..22b83044c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/FrameSearch.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/FrameSearch.java @@ -34,189 +34,190 @@ import com.imageworks.spcue.util.FrameSet; public class FrameSearch extends Criteria implements FrameSearchInterface { - private static final int MAX_RESULTS = 1000; - private static final Logger logger = LogManager.getLogger(FrameSearch.class); - private static final Pattern PATTERN_SINGLE_FRAME = Pattern.compile("^([0-9]+)$"); - private static final Pattern PATTERN_RANGE = Pattern.compile("^([0-9]+)\\-([0-9]+)$"); - private static final Pattern PATTERN_FLOAT_RANGE = Pattern.compile("^([0-9\\.]+)\\-([0-9\\.]+)$"); - private static final int RANGE_MAX_SIZE = 1000; - - private FrameSearchCriteria criteria; - private String sortedQuery; - - public FrameSearch() { - criteria = FrameSearchInterface.criteriaFactory(); - } - - @Override - public FrameSearchCriteria getCriteria() { - return criteria; - } - - @Override - public void setCriteria(FrameSearchCriteria criteria) { - this.criteria = criteria; - } - - @Override - public String getSortedQuery(String query) { - if (built) { - return sortedQuery; + private static final int MAX_RESULTS = 1000; + private static final Logger logger = LogManager.getLogger(FrameSearch.class); + private static final Pattern PATTERN_SINGLE_FRAME = Pattern.compile("^([0-9]+)$"); + private static final Pattern PATTERN_RANGE = Pattern.compile("^([0-9]+)\\-([0-9]+)$"); + private static final Pattern PATTERN_FLOAT_RANGE = + Pattern.compile("^([0-9\\.]+)\\-([0-9\\.]+)$"); + private static final int RANGE_MAX_SIZE = 1000; + + private FrameSearchCriteria criteria; + private String sortedQuery; + + public FrameSearch() { + criteria = FrameSearchInterface.criteriaFactory(); } - int limit = criteria.getLimit(); - int page = criteria.getPage(); - - if (limit <= 0 || limit >= MAX_RESULTS) { - criteria = criteria.toBuilder().setLimit(MAX_RESULTS).build(); + @Override + public FrameSearchCriteria getCriteria() { + return criteria; } - if (page <= 0) { - page = 1; + + @Override + public void setCriteria(FrameSearchCriteria criteria) { + this.criteria = criteria; } - StringBuilder sb = new StringBuilder(query.length() + 256); - sb.append("SELECT * FROM ("); - sb.append(getFilteredQuery(query)); - sb.append(" ) AS getSortedQueryT WHERE row_number > ?"); - sb.append(" AND row_number <= ?"); - values.add((page - 1) * limit); - values.add(page * limit); - sortedQuery = sb.toString(); - return sortedQuery; - } - - @Override - public void filterByFrameIds(List frameIds) { - criteria = criteria.toBuilder().addAllIds(frameIds).build(); - } - - @Override - public void filterByJob(JobInterface job) { - addPhrase("job.pk_job", job.getJobId()); - } - - @Override - public void filterByFrame(FrameInterface frame) { - filterByFrameIds(ImmutableList.of(frame.getFrameId())); - } - - @Override - public void filterByLayer(LayerInterface layer) { - addPhrase("layer.pk_layer", layer.getLayerId()); - } - - @Override - public void filterByLayers(List layers) { - addPhrase("layer.pk_layer", - layers.stream().map(LayerInterface::getLayerId).collect(Collectors.toList())); - } - - @Override - public void filterByFrameStates(List frameStates) { - addPhrase("frame.str_state", - frameStates.stream().map(FrameState::toString).collect(Collectors.toSet())); - } - - @Override - public void filterByFrameSet(String frameSet) { - StringBuilder sb = new StringBuilder(8096); - Matcher matchRange = PATTERN_RANGE.matcher(frameSet); - Matcher matchSingle = PATTERN_SINGLE_FRAME.matcher(frameSet); - - if (matchSingle.matches()) { - sb.append("frame.int_number=?"); - values.add(Integer.valueOf(matchSingle.group(1))); - } else if (matchRange.matches()) { - sb.append(" ( frame.int_number >= ? AND "); - sb.append(" frame.int_number <= ? )"); - values.add(Integer.valueOf(matchRange.group(1))); - values.add(Integer.valueOf(matchRange.group(2))); - } else { - FrameSet set = new FrameSet(frameSet); - int num_frames = set.size(); - if (num_frames <= RANGE_MAX_SIZE) { - sb.append("("); - for (int i = 0; i < num_frames; i++) { - sb.append("frame.int_number=? OR "); - values.add(set.get(i)); + @Override + public String getSortedQuery(String query) { + if (built) { + return sortedQuery; + } + + int limit = criteria.getLimit(); + int page = criteria.getPage(); + + if (limit <= 0 || limit >= MAX_RESULTS) { + criteria = criteria.toBuilder().setLimit(MAX_RESULTS).build(); + } + if (page <= 0) { + page = 1; } - sb.delete(sb.length() - 4, sb.length()); - sb.append(") "); - } + + StringBuilder sb = new StringBuilder(query.length() + 256); + sb.append("SELECT * FROM ("); + sb.append(getFilteredQuery(query)); + sb.append(" ) AS getSortedQueryT WHERE row_number > ?"); + sb.append(" AND row_number <= ?"); + values.add((page - 1) * limit); + values.add(page * limit); + sortedQuery = sb.toString(); + return sortedQuery; + } + + @Override + public void filterByFrameIds(List frameIds) { + criteria = criteria.toBuilder().addAllIds(frameIds).build(); } - chunks.add(sb); - } - - @Override - public void filterByMemoryRange(String range) { - StringBuilder sb = new StringBuilder(128); - Matcher matchRange = PATTERN_FLOAT_RANGE.matcher(range); - try { - if (matchRange.matches()) { - values.add(CueUtil.GB * Float.valueOf(matchRange.group(1))); - values.add(CueUtil.GB * Float.valueOf(matchRange.group(2))); - sb.append(" (frame.int_mem_max_used >= ? AND frame.int_mem_max_used <= ?) "); - } else { - values.add(CueUtil.GB * Float.valueOf(range)); - sb.append(" frame.int_mem_max_used >= ? "); - } - } catch (RuntimeException e) { - logger.warn("Failed to convert float range: " + range + "," + e); + + @Override + public void filterByJob(JobInterface job) { + addPhrase("job.pk_job", job.getJobId()); } - chunks.add(sb); - } - - @Override - public void filterByDurationRange(String range) { - StringBuilder sb = new StringBuilder(128); - Matcher matchRange = PATTERN_FLOAT_RANGE.matcher(range); - try { - if (matchRange.matches()) { - values.add((int) (3600 * Float.valueOf(matchRange.group(1)))); - values.add((int) (3600 * Float.valueOf(matchRange.group(2)))); - sb.append(" (frame.str_state != 'WAITING' "); - sb.append(" AND find_duration(frame.ts_started, frame.ts_stopped) "); - sb.append(" BETWEEN ? AND ? )"); - } else { - values.add((int) (3600 * Float.valueOf(range))); - sb.append(" (frame.str_state != 'WAITING' AND "); - sb.append("find_duration(frame.ts_started, frame.ts_stopped) >= ?) "); - } - } catch (RuntimeException e) { - logger.warn("Failed to convert float range: " + range + "," + e); - // a cast failed, ignore for now. + + @Override + public void filterByFrame(FrameInterface frame) { + filterByFrameIds(ImmutableList.of(frame.getFrameId())); } - System.out.println(sb.toString()); - System.out.println(values); - chunks.add(sb); - } - - @Override - public void filterByChangeDate(int changeDate) { - StringBuilder sb = new StringBuilder(); - sb.append("frame.ts_updated > ?"); - chunks.add(sb); - values.add(new java.sql.Timestamp(changeDate * 1000L)); - } - - @Override - void buildWhereClause() { - addPhrase("frame.pk_frame", criteria.getIdsList()); - - addPhrase("frame.str_name", criteria.getFramesList()); - addPhrase("layer.str_name", criteria.getLayersList()); - filterByFrameStates(criteria.getStates().getFrameStatesList()); - if (isValid(criteria.getFrameRange())) { - filterByFrameSet(criteria.getFrameRange()); + + @Override + public void filterByLayer(LayerInterface layer) { + addPhrase("layer.pk_layer", layer.getLayerId()); } - if (isValid(criteria.getMemoryRange())) { - filterByMemoryRange(criteria.getMemoryRange()); + + @Override + public void filterByLayers(List layers) { + addPhrase("layer.pk_layer", + layers.stream().map(LayerInterface::getLayerId).collect(Collectors.toList())); } - if (isValid(criteria.getDurationRange())) { - filterByDurationRange(criteria.getDurationRange()); + + @Override + public void filterByFrameStates(List frameStates) { + addPhrase("frame.str_state", + frameStates.stream().map(FrameState::toString).collect(Collectors.toSet())); } - if (criteria.getChangeDate() > 0) { - filterByChangeDate(criteria.getChangeDate()); + + @Override + public void filterByFrameSet(String frameSet) { + StringBuilder sb = new StringBuilder(8096); + Matcher matchRange = PATTERN_RANGE.matcher(frameSet); + Matcher matchSingle = PATTERN_SINGLE_FRAME.matcher(frameSet); + + if (matchSingle.matches()) { + sb.append("frame.int_number=?"); + values.add(Integer.valueOf(matchSingle.group(1))); + } else if (matchRange.matches()) { + sb.append(" ( frame.int_number >= ? AND "); + sb.append(" frame.int_number <= ? )"); + values.add(Integer.valueOf(matchRange.group(1))); + values.add(Integer.valueOf(matchRange.group(2))); + } else { + FrameSet set = new FrameSet(frameSet); + int num_frames = set.size(); + if (num_frames <= RANGE_MAX_SIZE) { + sb.append("("); + for (int i = 0; i < num_frames; i++) { + sb.append("frame.int_number=? OR "); + values.add(set.get(i)); + } + sb.delete(sb.length() - 4, sb.length()); + sb.append(") "); + } + } + chunks.add(sb); + } + + @Override + public void filterByMemoryRange(String range) { + StringBuilder sb = new StringBuilder(128); + Matcher matchRange = PATTERN_FLOAT_RANGE.matcher(range); + try { + if (matchRange.matches()) { + values.add(CueUtil.GB * Float.valueOf(matchRange.group(1))); + values.add(CueUtil.GB * Float.valueOf(matchRange.group(2))); + sb.append(" (frame.int_mem_max_used >= ? AND frame.int_mem_max_used <= ?) "); + } else { + values.add(CueUtil.GB * Float.valueOf(range)); + sb.append(" frame.int_mem_max_used >= ? "); + } + } catch (RuntimeException e) { + logger.warn("Failed to convert float range: " + range + "," + e); + } + chunks.add(sb); + } + + @Override + public void filterByDurationRange(String range) { + StringBuilder sb = new StringBuilder(128); + Matcher matchRange = PATTERN_FLOAT_RANGE.matcher(range); + try { + if (matchRange.matches()) { + values.add((int) (3600 * Float.valueOf(matchRange.group(1)))); + values.add((int) (3600 * Float.valueOf(matchRange.group(2)))); + sb.append(" (frame.str_state != 'WAITING' "); + sb.append(" AND find_duration(frame.ts_started, frame.ts_stopped) "); + sb.append(" BETWEEN ? AND ? )"); + } else { + values.add((int) (3600 * Float.valueOf(range))); + sb.append(" (frame.str_state != 'WAITING' AND "); + sb.append("find_duration(frame.ts_started, frame.ts_stopped) >= ?) "); + } + } catch (RuntimeException e) { + logger.warn("Failed to convert float range: " + range + "," + e); + // a cast failed, ignore for now. + } + System.out.println(sb.toString()); + System.out.println(values); + chunks.add(sb); + } + + @Override + public void filterByChangeDate(int changeDate) { + StringBuilder sb = new StringBuilder(); + sb.append("frame.ts_updated > ?"); + chunks.add(sb); + values.add(new java.sql.Timestamp(changeDate * 1000L)); + } + + @Override + void buildWhereClause() { + addPhrase("frame.pk_frame", criteria.getIdsList()); + + addPhrase("frame.str_name", criteria.getFramesList()); + addPhrase("layer.str_name", criteria.getLayersList()); + filterByFrameStates(criteria.getStates().getFrameStatesList()); + if (isValid(criteria.getFrameRange())) { + filterByFrameSet(criteria.getFrameRange()); + } + if (isValid(criteria.getMemoryRange())) { + filterByMemoryRange(criteria.getMemoryRange()); + } + if (isValid(criteria.getDurationRange())) { + filterByDurationRange(criteria.getDurationRange()); + } + if (criteria.getChangeDate() > 0) { + filterByChangeDate(criteria.getChangeDate()); + } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/HostSearch.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/HostSearch.java index f7a2dc63e..6fdcb4628 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/HostSearch.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/HostSearch.java @@ -24,31 +24,31 @@ import com.imageworks.spcue.grpc.host.HostSearchCriteria; public class HostSearch extends Criteria implements HostSearchInterface { - private HostSearchCriteria criteria; - - public HostSearch(HostSearchCriteria criteria) { - this.criteria = criteria; - } - - public HostSearchCriteria getCriteria() { - return this.criteria; - } - - public void filterByAlloc(AllocationInterface alloc) { - addPhrase("host.pk_alloc", alloc.getAllocationId()); - } - - @Override - public void buildWhereClause() { - addPhrase("host.pk_host", criteria.getIdsList()); - addPhrase("host.str_name", criteria.getHostsList()); - addLikePhrase("host.str_name", new HashSet<>(criteria.getSubstrList())); - addRegexPhrase("host.str_name", new HashSet<>(criteria.getRegexList())); - addPhrase("alloc.str_name", criteria.getAllocsList()); - Set items = new HashSet<>(criteria.getStates().getStateCount()); - for (HardwareState w : criteria.getStates().getStateList()) { - items.add(w.toString()); + private HostSearchCriteria criteria; + + public HostSearch(HostSearchCriteria criteria) { + this.criteria = criteria; + } + + public HostSearchCriteria getCriteria() { + return this.criteria; + } + + public void filterByAlloc(AllocationInterface alloc) { + addPhrase("host.pk_alloc", alloc.getAllocationId()); + } + + @Override + public void buildWhereClause() { + addPhrase("host.pk_host", criteria.getIdsList()); + addPhrase("host.str_name", criteria.getHostsList()); + addLikePhrase("host.str_name", new HashSet<>(criteria.getSubstrList())); + addRegexPhrase("host.str_name", new HashSet<>(criteria.getRegexList())); + addPhrase("alloc.str_name", criteria.getAllocsList()); + Set items = new HashSet<>(criteria.getStates().getStateCount()); + for (HardwareState w : criteria.getStates().getStateList()) { + items.add(w.toString()); + } + addPhrase("host_stat.str_state", items); } - addPhrase("host_stat.str_state", items); - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/JobSearch.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/JobSearch.java index b69545791..44e87d753 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/JobSearch.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/JobSearch.java @@ -22,38 +22,38 @@ import com.imageworks.spcue.grpc.job.JobSearchCriteria; public final class JobSearch extends Criteria implements JobSearchInterface { - private JobSearchCriteria criteria; - - public JobSearch() { - criteria = JobSearchInterface.criteriaFactory(); - } - - @Override - public JobSearchCriteria getCriteria() { - return criteria; - } - - @Override - public void setCriteria(JobSearchCriteria criteria) { - this.criteria = criteria; - } - - @Override - public void filterByShow(ShowInterface show) { - addPhrase("job.pk_show", show.getShowId()); - } - - @Override - void buildWhereClause() { - addPhrase("job.pk_job", criteria.getIdsList()); - addPhrase("job.str_name", criteria.getJobsList()); - addLikePhrase("job.str_name", new HashSet<>(criteria.getSubstrList())); - addRegexPhrase("job.str_name", new HashSet<>(criteria.getRegexList())); - addPhrase("job.str_shot", criteria.getShotsList()); - addPhrase("show.str_name", criteria.getShowsList()); - addPhrase("job.str_user", criteria.getUsersList()); - if (!criteria.getIncludeFinished()) { - addPhrase("job.str_state", "PENDING"); + private JobSearchCriteria criteria; + + public JobSearch() { + criteria = JobSearchInterface.criteriaFactory(); + } + + @Override + public JobSearchCriteria getCriteria() { + return criteria; + } + + @Override + public void setCriteria(JobSearchCriteria criteria) { + this.criteria = criteria; + } + + @Override + public void filterByShow(ShowInterface show) { + addPhrase("job.pk_show", show.getShowId()); + } + + @Override + void buildWhereClause() { + addPhrase("job.pk_job", criteria.getIdsList()); + addPhrase("job.str_name", criteria.getJobsList()); + addLikePhrase("job.str_name", new HashSet<>(criteria.getSubstrList())); + addRegexPhrase("job.str_name", new HashSet<>(criteria.getRegexList())); + addPhrase("job.str_shot", criteria.getShotsList()); + addPhrase("show.str_name", criteria.getShowsList()); + addPhrase("job.str_user", criteria.getUsersList()); + if (!criteria.getIncludeFinished()) { + addPhrase("job.str_state", "PENDING"); + } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/ProcSearch.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/ProcSearch.java index 038de725a..a37309b18 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/ProcSearch.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/ProcSearch.java @@ -32,102 +32,102 @@ public class ProcSearch extends Criteria implements ProcSearchInterface { - private ProcSearchCriteria criteria; - private Set notJobs = new HashSet<>(); - private Set notGroups = new HashSet<>(); + private ProcSearchCriteria criteria; + private Set notJobs = new HashSet<>(); + private Set notGroups = new HashSet<>(); - public ProcSearch() { - criteria = ProcSearchInterface.criteriaFactory(); - } + public ProcSearch() { + criteria = ProcSearchInterface.criteriaFactory(); + } + + public ProcSearchCriteria getCriteria() { + return criteria; + } + + public void setCriteria(ProcSearchCriteria criteria) { + this.criteria = criteria; + } - public ProcSearchCriteria getCriteria() { - return criteria; - } + public void notJobs(List jobs) { + for (JobInterface job : jobs) { + notJobs.add(new Phrase("proc.pk_job", "!=", job.getJobId())); + } + } - public void setCriteria(ProcSearchCriteria criteria) { - this.criteria = criteria; - } + public void notGroups(List groups) { + for (GroupInterface group : groups) { + notGroups.add(new Phrase("folder.pk_folder", "!=", group.getGroupId())); + } + } - public void notJobs(List jobs) { - for (JobInterface job : jobs) { - notJobs.add(new Phrase("proc.pk_job", "!=", job.getJobId())); + public void filterByDurationRange(LessThanIntegerSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" (find_duration(proc.ts_dispatched, null) <= ?) "); + chunks.add(sb); + values.add(criterion.getValue()); } - } - public void notGroups(List groups) { - for (GroupInterface group : groups) { - notGroups.add(new Phrase("folder.pk_folder", "!=", group.getGroupId())); + public void filterByDurationRange(GreaterThanIntegerSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" (find_duration(proc.ts_dispatched, null) >= ?) "); + chunks.add(sb); + values.add(criterion.getValue()); } - } - - public void filterByDurationRange(LessThanIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" (find_duration(proc.ts_dispatched, null) <= ?) "); - chunks.add(sb); - values.add(criterion.getValue()); - } - - public void filterByDurationRange(GreaterThanIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" (find_duration(proc.ts_dispatched, null) >= ?) "); - chunks.add(sb); - values.add(criterion.getValue()); - } - - public void filterByDurationRange(InRangeIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" (find_duration(proc.ts_dispatched, null) BETWEEN ? AND ? )"); - chunks.add(sb); - values.add(criterion.getMin()); - values.add(criterion.getMax()); - } - - public void filterByHost(HostInterface host) { - addPhrase("host.pk_host", host.getHostId()); - } - - public void sortByHostName() { - addSort(Sort.asc("host.str_name")); - } - - public void sortByDispatchedTime() { - addSort(Sort.asc("proc.ts_dispatched")); - } - - public void sortByBookedTime() { - addSort(Sort.asc("proc.ts_booked")); - } - - @Override - void buildWhereClause() { - addPhrases(notJobs, "AND"); - addPhrases(notGroups, "AND"); - - addPhrase("host.str_name", criteria.getHostsList()); - addPhrase("job.str_name", criteria.getJobsList()); - addPhrase("layer.str_name", criteria.getLayersList()); - addPhrase("show.str_name", criteria.getShowsList()); - addPhrase("alloc.str_name", criteria.getAllocsList()); - - if (criteria.getMemoryRangeCount() > 0) { - addRangePhrase("proc.int_mem_reserved", criteria.getMemoryRange(0)); + + public void filterByDurationRange(InRangeIntegerSearchCriterion criterion) { + StringBuilder sb = new StringBuilder(128); + sb.append(" (find_duration(proc.ts_dispatched, null) BETWEEN ? AND ? )"); + chunks.add(sb); + values.add(criterion.getMin()); + values.add(criterion.getMax()); + } + + public void filterByHost(HostInterface host) { + addPhrase("host.pk_host", host.getHostId()); } - if (criteria.getMemoryLessThanCount() > 0) { - addRangePhrase("proc.int_mem_reserved", criteria.getMemoryLessThan(0)); + public void sortByHostName() { + addSort(Sort.asc("host.str_name")); } - if (criteria.getMemoryGreaterThanCount() > 0) { - addRangePhrase("proc.int_mem_reserved", criteria.getMemoryGreaterThan(0)); + public void sortByDispatchedTime() { + addSort(Sort.asc("proc.ts_dispatched")); } - if (criteria.getDurationRangeCount() > 0) { - filterByDurationRange(criteria.getDurationRange(0)); + public void sortByBookedTime() { + addSort(Sort.asc("proc.ts_booked")); } - setFirstResult(criteria.getFirstResult()); - if (criteria.getMaxResultsCount() > 0) { - setMaxResults(criteria.getMaxResults(0)); + @Override + void buildWhereClause() { + addPhrases(notJobs, "AND"); + addPhrases(notGroups, "AND"); + + addPhrase("host.str_name", criteria.getHostsList()); + addPhrase("job.str_name", criteria.getJobsList()); + addPhrase("layer.str_name", criteria.getLayersList()); + addPhrase("show.str_name", criteria.getShowsList()); + addPhrase("alloc.str_name", criteria.getAllocsList()); + + if (criteria.getMemoryRangeCount() > 0) { + addRangePhrase("proc.int_mem_reserved", criteria.getMemoryRange(0)); + } + + if (criteria.getMemoryLessThanCount() > 0) { + addRangePhrase("proc.int_mem_reserved", criteria.getMemoryLessThan(0)); + } + + if (criteria.getMemoryGreaterThanCount() > 0) { + addRangePhrase("proc.int_mem_reserved", criteria.getMemoryGreaterThan(0)); + } + + if (criteria.getDurationRangeCount() > 0) { + filterByDurationRange(criteria.getDurationRange(0)); + } + + setFirstResult(criteria.getFirstResult()); + if (criteria.getMaxResultsCount() > 0) { + setMaxResults(criteria.getMaxResults(0)); + } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ActionDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ActionDaoJdbc.java index 96059b6a1..d3a0c239c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ActionDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ActionDaoJdbc.java @@ -34,111 +34,112 @@ public class ActionDaoJdbc extends JdbcDaoSupport implements ActionDao { - public static final String INSERT_ACTION = "INSERT INTO " + "action " + "(" - + "pk_action,pk_filter,str_action,str_value_type,b_stop" + ") VALUES (?,?,?,?,?)"; - - public void createAction(ActionEntity action) { - action.id = SqlUtil.genKeyRandom(); - boolean stopAction = ActionType.STOP_PROCESSING.equals(action.type); - getJdbcTemplate().update(INSERT_ACTION, action.id, action.filterId, action.type.toString(), - action.valueType.toString(), stopAction); - updateAction(action); - } - - private static final String GET_ACTION = "SELECT " + "action.*," + "filter.pk_show " + "FROM " - + "action," + "filter " + "WHERE " + "action.pk_filter = filter.pk_filter"; - - public ActionEntity getAction(String id) { - return getJdbcTemplate().queryForObject(GET_ACTION + " AND pk_action=?", ACTION_DETAIL_MAPPER, - id); - } - - public ActionEntity getAction(ActionInterface action) { - return getJdbcTemplate().queryForObject(GET_ACTION + " AND pk_action=?", ACTION_DETAIL_MAPPER, - action.getActionId()); - } - - public List getActions(FilterInterface filter) { - return getJdbcTemplate().query( - GET_ACTION + " AND filter.pk_filter=? ORDER BY b_stop ASC, ts_created ASC", - ACTION_DETAIL_MAPPER, filter.getFilterId()); - } - - public void updateAction(ActionEntity action) { - if (action.isNew()) { - throw new SpcueRuntimeException("unable to update action that is not already commited"); + public static final String INSERT_ACTION = "INSERT INTO " + "action " + "(" + + "pk_action,pk_filter,str_action,str_value_type,b_stop" + ") VALUES (?,?,?,?,?)"; + + public void createAction(ActionEntity action) { + action.id = SqlUtil.genKeyRandom(); + boolean stopAction = ActionType.STOP_PROCESSING.equals(action.type); + getJdbcTemplate().update(INSERT_ACTION, action.id, action.filterId, action.type.toString(), + action.valueType.toString(), stopAction); + updateAction(action); } - // first we clear out all values + private static final String GET_ACTION = "SELECT " + "action.*," + "filter.pk_show " + "FROM " + + "action," + "filter " + "WHERE " + "action.pk_filter = filter.pk_filter"; - getJdbcTemplate().update( - "UPDATE action SET str_value=NULL,int_value=NULL,b_value=NULL,float_value=NULL WHERE pk_action=?", - action.getActionId()); + public ActionEntity getAction(String id) { + return getJdbcTemplate().queryForObject(GET_ACTION + " AND pk_action=?", + ACTION_DETAIL_MAPPER, id); + } + + public ActionEntity getAction(ActionInterface action) { + return getJdbcTemplate().queryForObject(GET_ACTION + " AND pk_action=?", + ACTION_DETAIL_MAPPER, action.getActionId()); + } + + public List getActions(FilterInterface filter) { + return getJdbcTemplate().query( + GET_ACTION + " AND filter.pk_filter=? ORDER BY b_stop ASC, ts_created ASC", + ACTION_DETAIL_MAPPER, filter.getFilterId()); + } + + public void updateAction(ActionEntity action) { + if (action.isNew()) { + throw new SpcueRuntimeException("unable to update action that is not already commited"); + } - StringBuilder query = new StringBuilder(1024); - query.append("UPDATE action SET str_action=?,str_value_type=?"); + // first we clear out all values - List args = new ArrayList(4); - args.add(action.type.toString()); - args.add(action.valueType.toString()); + getJdbcTemplate().update( + "UPDATE action SET str_value=NULL,int_value=NULL,b_value=NULL,float_value=NULL WHERE pk_action=?", + action.getActionId()); - switch (action.valueType) { - case GROUP_TYPE: - query.append(",pk_folder=? WHERE pk_action=?"); - args.add(action.groupValue); - break; + StringBuilder query = new StringBuilder(1024); + query.append("UPDATE action SET str_action=?,str_value_type=?"); - case STRING_TYPE: - query.append(",str_value=? WHERE pk_action=?"); - args.add(action.stringValue); - break; + List args = new ArrayList(4); + args.add(action.type.toString()); + args.add(action.valueType.toString()); - case INTEGER_TYPE: - query.append(",int_value=? WHERE pk_action=?"); - args.add(action.intValue); - break; + switch (action.valueType) { + case GROUP_TYPE: + query.append(",pk_folder=? WHERE pk_action=?"); + args.add(action.groupValue); + break; - case FLOAT_TYPE: - query.append(",float_value=? WHERE pk_action=?"); - args.add(action.floatValue); - break; + case STRING_TYPE: + query.append(",str_value=? WHERE pk_action=?"); + args.add(action.stringValue); + break; - case BOOLEAN_TYPE: - query.append(",b_value=? WHERE pk_action=?"); - args.add(action.booleanValue); - break; + case INTEGER_TYPE: + query.append(",int_value=? WHERE pk_action=?"); + args.add(action.intValue); + break; - case NONE_TYPE: - query.append(" WHERE pk_action=?"); - break; + case FLOAT_TYPE: + query.append(",float_value=? WHERE pk_action=?"); + args.add(action.floatValue); + break; + + case BOOLEAN_TYPE: + query.append(",b_value=? WHERE pk_action=?"); + args.add(action.booleanValue); + break; + + case NONE_TYPE: + query.append(" WHERE pk_action=?"); + break; + + default: + throw new SpcueRuntimeException("invalid action value type: " + action.valueType); + } + + args.add(action.id); + getJdbcTemplate().update(query.toString(), args.toArray()); - default: - throw new SpcueRuntimeException("invalid action value type: " + action.valueType); } - args.add(action.id); - getJdbcTemplate().update(query.toString(), args.toArray()); - - } - - public void deleteAction(ActionInterface action) { - getJdbcTemplate().update("DELETE FROM action WHERE pk_action=?", action.getActionId()); - } - - public static final RowMapper ACTION_DETAIL_MAPPER = new RowMapper() { - public ActionEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - ActionEntity action = new ActionEntity(); - action.id = rs.getString("pk_action"); - action.showId = rs.getString("pk_show"); - action.filterId = rs.getString("pk_filter"); - action.booleanValue = rs.getBoolean("b_value"); - action.groupValue = rs.getString("pk_folder"); - action.intValue = rs.getLong("int_value"); - action.floatValue = rs.getFloat("float_value"); - action.type = ActionType.valueOf(rs.getString("str_action")); - action.valueType = ActionValueType.valueOf(rs.getString("str_value_type")); - action.stringValue = rs.getString("str_value"); - return action; + public void deleteAction(ActionInterface action) { + getJdbcTemplate().update("DELETE FROM action WHERE pk_action=?", action.getActionId()); } - }; + + public static final RowMapper ACTION_DETAIL_MAPPER = + new RowMapper() { + public ActionEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + ActionEntity action = new ActionEntity(); + action.id = rs.getString("pk_action"); + action.showId = rs.getString("pk_show"); + action.filterId = rs.getString("pk_filter"); + action.booleanValue = rs.getBoolean("b_value"); + action.groupValue = rs.getString("pk_folder"); + action.intValue = rs.getLong("int_value"); + action.floatValue = rs.getFloat("float_value"); + action.type = ActionType.valueOf(rs.getString("str_action")); + action.valueType = ActionValueType.valueOf(rs.getString("str_value_type")); + action.stringValue = rs.getString("str_value"); + return action; + } + }; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/AllocationDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/AllocationDaoJdbc.java index e1fd731bb..0ad5b5eaf 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/AllocationDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/AllocationDaoJdbc.java @@ -39,143 +39,147 @@ public class AllocationDaoJdbc extends JdbcDaoSupport implements AllocationDao { - public static RowMapper ALLOC_MAPPER = new RowMapper() { - public AllocationEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - AllocationEntity alloc = new AllocationEntity(); - alloc.id = rs.getString("pk_alloc"); - alloc.facilityId = rs.getString("pk_facility"); - alloc.name = rs.getString("str_name"); - alloc.tag = rs.getString("str_tag"); - return alloc; - } - }; - - private static final String GET_ALLOCATION = "SELECT " + "alloc.pk_facility," + "alloc.pk_alloc, " - + "alloc.str_name, " + "alloc.str_tag, " + "facility.str_name AS facility_name " + "FROM " - + "alloc, " + "facility " + "WHERE " + "alloc.pk_facility = facility.pk_facility "; - - public AllocationEntity getAllocationEntity(String id) { - return getJdbcTemplate().queryForObject(GET_ALLOCATION + " AND pk_alloc=?", ALLOC_MAPPER, id); - } - - public AllocationEntity findAllocationEntity(String facility, String name) { - return getJdbcTemplate().queryForObject(GET_ALLOCATION + " AND alloc.str_name=?", ALLOC_MAPPER, - String.format("%s.%s", facility, name)); - } - - @Override - public AllocationEntity findAllocationEntity(String name) { - return getJdbcTemplate().queryForObject(GET_ALLOCATION + " AND alloc.str_name=?", ALLOC_MAPPER, - name); - } - - private static final String INSERT_ALLOCATION = "INSERT INTO " + "alloc " + "(" + "pk_alloc," - + "pk_facility," + "str_name, " + "str_tag " + ") VALUES (?,?,?,?)"; - - public void insertAllocation(FacilityInterface facility, AllocationEntity detail) { - - String new_alloc_name = String.format("%s.%s", facility.getName(), detail.getName()); - /* - * Checks if the allocation already exits. - */ - if (getJdbcTemplate().queryForObject("SELECT COUNT(1) FROM alloc WHERE str_name=?", - Integer.class, new_alloc_name) > 0) { - - getJdbcTemplate().update("UPDATE alloc SET b_enabled = true WHERE str_name=?", - new_alloc_name); - } else { - detail.id = SqlUtil.genKeyRandom(); - detail.name = new_alloc_name; - getJdbcTemplate().update(INSERT_ALLOCATION, detail.id, facility.getFacilityId(), detail.name, - detail.tag); + public static RowMapper ALLOC_MAPPER = new RowMapper() { + public AllocationEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + AllocationEntity alloc = new AllocationEntity(); + alloc.id = rs.getString("pk_alloc"); + alloc.facilityId = rs.getString("pk_facility"); + alloc.name = rs.getString("str_name"); + alloc.tag = rs.getString("str_tag"); + return alloc; + } + }; + + private static final String GET_ALLOCATION = "SELECT " + "alloc.pk_facility," + + "alloc.pk_alloc, " + "alloc.str_name, " + "alloc.str_tag, " + + "facility.str_name AS facility_name " + "FROM " + "alloc, " + "facility " + "WHERE " + + "alloc.pk_facility = facility.pk_facility "; + + public AllocationEntity getAllocationEntity(String id) { + return getJdbcTemplate().queryForObject(GET_ALLOCATION + " AND pk_alloc=?", ALLOC_MAPPER, + id); } - } - public void deleteAllocation(AllocationInterface a) { - if (getJdbcTemplate().queryForObject("SELECT COUNT(1) FROM host WHERE pk_alloc=?", - Integer.class, a.getAllocationId()) > 0) { - throw new EntityRemovalError("allocation still contains hosts", a); + public AllocationEntity findAllocationEntity(String facility, String name) { + return getJdbcTemplate().queryForObject(GET_ALLOCATION + " AND alloc.str_name=?", + ALLOC_MAPPER, String.format("%s.%s", facility, name)); } - if (getJdbcTemplate().queryForObject("SELECT b_default FROM alloc WHERE pk_alloc=?", - Boolean.class, a.getAllocationId())) { - throw new EntityRemovalError("you cannot delete the default allocation", a); + @Override + public AllocationEntity findAllocationEntity(String name) { + return getJdbcTemplate().queryForObject(GET_ALLOCATION + " AND alloc.str_name=?", + ALLOC_MAPPER, name); } - Savepoint sp1; - try { - sp1 = getConnection().setSavepoint(); - } catch (SQLException e) { - throw new RuntimeException("failed to create savepoint", e); + private static final String INSERT_ALLOCATION = "INSERT INTO " + "alloc " + "(" + "pk_alloc," + + "pk_facility," + "str_name, " + "str_tag " + ") VALUES (?,?,?,?)"; + + public void insertAllocation(FacilityInterface facility, AllocationEntity detail) { + + String new_alloc_name = String.format("%s.%s", facility.getName(), detail.getName()); + /* + * Checks if the allocation already exits. + */ + if (getJdbcTemplate().queryForObject("SELECT COUNT(1) FROM alloc WHERE str_name=?", + Integer.class, new_alloc_name) > 0) { + + getJdbcTemplate().update("UPDATE alloc SET b_enabled = true WHERE str_name=?", + new_alloc_name); + } else { + detail.id = SqlUtil.genKeyRandom(); + detail.name = new_alloc_name; + getJdbcTemplate().update(INSERT_ALLOCATION, detail.id, facility.getFacilityId(), + detail.name, detail.tag); + } } - /* - * Allocations are logged in historical data so once they are used you can't specifically delete - * them. They are disabled instead. - */ - try { - getJdbcTemplate().update("DELETE FROM alloc WHERE pk_alloc=?", a.getAllocationId()); - } catch (DataIntegrityViolationException e) { - try { - getConnection().rollback(sp1); - } catch (SQLException e1) { - throw new RuntimeException("failed to roll back failed delete", e); - } - getJdbcTemplate().update("UPDATE alloc SET b_enabled = false WHERE pk_alloc = ?", - a.getAllocationId()); + public void deleteAllocation(AllocationInterface a) { + if (getJdbcTemplate().queryForObject("SELECT COUNT(1) FROM host WHERE pk_alloc=?", + Integer.class, a.getAllocationId()) > 0) { + throw new EntityRemovalError("allocation still contains hosts", a); + } + + if (getJdbcTemplate().queryForObject("SELECT b_default FROM alloc WHERE pk_alloc=?", + Boolean.class, a.getAllocationId())) { + throw new EntityRemovalError("you cannot delete the default allocation", a); + } + + Savepoint sp1; + try { + sp1 = getConnection().setSavepoint(); + } catch (SQLException e) { + throw new RuntimeException("failed to create savepoint", e); + } + + /* + * Allocations are logged in historical data so once they are used you can't specifically + * delete them. They are disabled instead. + */ + try { + getJdbcTemplate().update("DELETE FROM alloc WHERE pk_alloc=?", a.getAllocationId()); + } catch (DataIntegrityViolationException e) { + try { + getConnection().rollback(sp1); + } catch (SQLException e1) { + throw new RuntimeException("failed to roll back failed delete", e); + } + getJdbcTemplate().update("UPDATE alloc SET b_enabled = false WHERE pk_alloc = ?", + a.getAllocationId()); + } } - } - public void updateAllocationName(AllocationInterface a, String name) { - if (!Pattern.matches("^\\w+$", name)) { - throw new IllegalArgumentException( - "The new allocation name" + "must be alpha numeric and not contain the facility prefix."); + public void updateAllocationName(AllocationInterface a, String name) { + if (!Pattern.matches("^\\w+$", name)) { + throw new IllegalArgumentException("The new allocation name" + + "must be alpha numeric and not contain the facility prefix."); + } + + String[] parts = a.getName().split("\\.", 2); + String new_name = String.format("%s.%s", parts[0], name); + + getJdbcTemplate().update("UPDATE alloc SET str_name=? WHERE pk_alloc=?", new_name, + a.getAllocationId()); } - String[] parts = a.getName().split("\\.", 2); - String new_name = String.format("%s.%s", parts[0], name); - - getJdbcTemplate().update("UPDATE alloc SET str_name=? WHERE pk_alloc=?", new_name, - a.getAllocationId()); - } - - public void updateAllocationTag(AllocationInterface a, String tag) { - getJdbcTemplate().update("UPDATE alloc SET str_tag=? WHERE pk_alloc=?", tag, - a.getAllocationId()); - - getJdbcTemplate().update( - "UPDATE host_tag SET str_tag=? WHERE " + "host_tag.str_tag_type='Alloc' AND pk_host IN " - + "(SELECT pk_host FROM host WHERE host.pk_alloc=?)", - tag, a.getAllocationId()); - - for (Map e : getJdbcTemplate() - .queryForList("SELECT pk_host FROM host WHERE pk_alloc=?", a.getAllocationId())) { - final String pk_host = (String) e.get("pk_host"); - getJdbcTemplate().call(new CallableStatementCreator() { - public CallableStatement createCallableStatement(Connection con) throws SQLException { - CallableStatement c = con.prepareCall("{ call recalculate_tags(?) }"); - c.setString(1, pk_host); - return c; + public void updateAllocationTag(AllocationInterface a, String tag) { + getJdbcTemplate().update("UPDATE alloc SET str_tag=? WHERE pk_alloc=?", tag, + a.getAllocationId()); + + getJdbcTemplate().update( + "UPDATE host_tag SET str_tag=? WHERE " + + "host_tag.str_tag_type='Alloc' AND pk_host IN " + + "(SELECT pk_host FROM host WHERE host.pk_alloc=?)", + tag, a.getAllocationId()); + + for (Map e : getJdbcTemplate() + .queryForList("SELECT pk_host FROM host WHERE pk_alloc=?", a.getAllocationId())) { + final String pk_host = (String) e.get("pk_host"); + getJdbcTemplate().call(new CallableStatementCreator() { + public CallableStatement createCallableStatement(Connection con) + throws SQLException { + CallableStatement c = con.prepareCall("{ call recalculate_tags(?) }"); + c.setString(1, pk_host); + return c; + } + }, new ArrayList()); } - }, new ArrayList()); } - } - public void setDefaultAllocation(AllocationInterface a) { - getJdbcTemplate().update("UPDATE alloc SET b_default = false WHERE b_default = true"); - getJdbcTemplate().update("UPDATE alloc SET b_default = true WHERE pk_alloc=?", - a.getAllocationId()); - } + public void setDefaultAllocation(AllocationInterface a) { + getJdbcTemplate().update("UPDATE alloc SET b_default = false WHERE b_default = true"); + getJdbcTemplate().update("UPDATE alloc SET b_default = true WHERE pk_alloc=?", + a.getAllocationId()); + } - public AllocationEntity getDefaultAllocationEntity() { - return getJdbcTemplate().queryForObject(GET_ALLOCATION + " AND alloc.b_default = true LIMIT 1", - ALLOC_MAPPER); - } + public AllocationEntity getDefaultAllocationEntity() { + return getJdbcTemplate().queryForObject( + GET_ALLOCATION + " AND alloc.b_default = true LIMIT 1", ALLOC_MAPPER); + } - @Override - public void updateAllocationBillable(AllocationInterface alloc, boolean value) { - getJdbcTemplate().update("UPDATE alloc SET b_billable = ? WHERE pk_alloc = ?", value, - alloc.getAllocationId()); + @Override + public void updateAllocationBillable(AllocationInterface alloc, boolean value) { + getJdbcTemplate().update("UPDATE alloc SET b_billable = ? WHERE pk_alloc = ?", value, + alloc.getAllocationId()); - } + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/BookingDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/BookingDaoJdbc.java index f2b85cba5..186e22ede 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/BookingDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/BookingDaoJdbc.java @@ -36,282 +36,283 @@ public class BookingDaoJdbc extends JdbcDaoSupport implements BookingDao { - private static final String INSERT_LOCAL_JOB_ASSIGNMENT = "INSERT INTO " + "host_local " + "(" - + "pk_host_local," + "pk_job," + "pk_layer," + "pk_frame," + "str_type," + "pk_host," - + "int_mem_max," + "int_mem_idle," + "int_cores_max," + "int_cores_idle," - + "int_gpu_mem_idle," + "int_gpu_mem_max," + "int_gpus_max," + "int_gpus_idle," - + "int_threads " + ") " + "VALUES " + "(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; - - @Override - public void insertLocalHostAssignment(HostInterface h, JobInterface job, LocalHostAssignment l) { - l.id = SqlUtil.genKeyRandom(); - l.name = String.format("%s->%s", h.getName(), job.getName()); - l.setHostId(h.getHostId()); - l.setJobId(job.getJobId()); - l.setType(RenderPartitionType.JOB_PARTITION); - l.setIdleCoreUnits(l.getMaxCoreUnits()); - l.setIdleMemory(l.getMaxMemory()); - l.setIdleGpuUnits(l.getMaxGpuUnits()); - l.setIdleGpuMemory(l.getMaxGpuMemory()); - - getJdbcTemplate().update(INSERT_LOCAL_JOB_ASSIGNMENT, l.id, job.getJobId(), l.getLayerId(), - l.getFrameId(), l.getType().toString(), h.getHostId(), l.getMaxMemory(), l.getMaxMemory(), - l.getMaxCoreUnits(), l.getMaxCoreUnits(), l.getMaxGpuMemory(), l.getMaxGpuMemory(), - l.getMaxGpuUnits(), l.getMaxGpuUnits(), l.getThreads()); - } - - @Override - public void insertLocalHostAssignment(HostInterface h, LayerInterface layer, - LocalHostAssignment l) { - l.id = SqlUtil.genKeyRandom(); - l.name = String.format("%s->%s", h.getName(), layer.getName()); - l.setHostId(h.getHostId()); - l.setJobId(layer.getJobId()); - l.setLayerId(layer.getLayerId()); - l.setType(RenderPartitionType.LAYER_PARTITION); - l.setIdleCoreUnits(l.getMaxCoreUnits()); - l.setIdleMemory(l.getMaxMemory()); - l.setIdleGpuUnits(l.getMaxGpuUnits()); - l.setIdleGpuMemory(l.getMaxGpuMemory()); - - getJdbcTemplate().update(INSERT_LOCAL_JOB_ASSIGNMENT, l.id, l.getJobId(), l.getLayerId(), - l.getFrameId(), l.getType().toString(), h.getHostId(), l.getMaxMemory(), l.getMaxMemory(), - l.getMaxCoreUnits(), l.getMaxCoreUnits(), l.getMaxGpuMemory(), l.getMaxGpuMemory(), - l.getMaxGpuUnits(), l.getMaxGpuUnits(), l.getThreads()); - } - - @Override - public void insertLocalHostAssignment(HostInterface h, FrameInterface frame, - LocalHostAssignment l) { - l.id = SqlUtil.genKeyRandom(); - l.name = String.format("%s->%s", h.getName(), frame.getName()); - l.setHostId(h.getHostId()); - l.setJobId(frame.getJobId()); - l.setLayerId(frame.getLayerId()); - l.setFrameId(frame.getFrameId()); - l.setType(RenderPartitionType.FRAME_PARTITION); - l.setIdleCoreUnits(l.getMaxCoreUnits()); - l.setIdleMemory(l.getMaxMemory()); - l.setIdleGpuUnits(l.getMaxGpuUnits()); - l.setIdleGpuMemory(l.getMaxGpuMemory()); - - getJdbcTemplate().update(INSERT_LOCAL_JOB_ASSIGNMENT, l.id, l.getJobId(), l.getLayerId(), - l.getFrameId(), l.getType().toString(), h.getHostId(), l.getMaxMemory(), l.getMaxMemory(), - l.getMaxCoreUnits(), l.getMaxCoreUnits(), l.getMaxGpuMemory(), l.getMaxGpuMemory(), - l.getMaxGpuUnits(), l.getMaxGpuUnits(), l.getThreads()); - } - - public static final RowMapper LJA_MAPPER = - new RowMapper() { - public LocalHostAssignment mapRow(final ResultSet rs, int rowNum) throws SQLException { - LocalHostAssignment l = new LocalHostAssignment(); - l.id = rs.getString("pk_host_local"); - l.setMaxCoreUnits(rs.getInt("int_cores_max")); - l.setMaxMemory(rs.getLong("int_mem_max")); - l.setMaxGpuUnits(rs.getInt("int_gpus_max")); - l.setMaxGpuMemory(rs.getLong("int_gpu_mem_max")); - l.setThreads(rs.getInt("int_threads")); - l.setIdleCoreUnits(rs.getInt("int_cores_idle")); - l.setIdleMemory(rs.getLong("int_mem_idle")); - l.setIdleGpuUnits(rs.getInt("int_gpus_idle")); - l.setIdleGpuMemory(rs.getLong("int_gpu_mem_idle")); - l.setJobId(rs.getString("pk_job")); - l.setLayerId(rs.getString("pk_layer")); - l.setFrameId(rs.getString("pk_frame")); - l.setHostId(rs.getString("pk_host")); - l.setType(RenderPartitionType.valueOf(rs.getString("str_type"))); - return l; + private static final String INSERT_LOCAL_JOB_ASSIGNMENT = "INSERT INTO " + "host_local " + "(" + + "pk_host_local," + "pk_job," + "pk_layer," + "pk_frame," + "str_type," + "pk_host," + + "int_mem_max," + "int_mem_idle," + "int_cores_max," + "int_cores_idle," + + "int_gpu_mem_idle," + "int_gpu_mem_max," + "int_gpus_max," + "int_gpus_idle," + + "int_threads " + ") " + "VALUES " + "(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + + @Override + public void insertLocalHostAssignment(HostInterface h, JobInterface job, + LocalHostAssignment l) { + l.id = SqlUtil.genKeyRandom(); + l.name = String.format("%s->%s", h.getName(), job.getName()); + l.setHostId(h.getHostId()); + l.setJobId(job.getJobId()); + l.setType(RenderPartitionType.JOB_PARTITION); + l.setIdleCoreUnits(l.getMaxCoreUnits()); + l.setIdleMemory(l.getMaxMemory()); + l.setIdleGpuUnits(l.getMaxGpuUnits()); + l.setIdleGpuMemory(l.getMaxGpuMemory()); + + getJdbcTemplate().update(INSERT_LOCAL_JOB_ASSIGNMENT, l.id, job.getJobId(), l.getLayerId(), + l.getFrameId(), l.getType().toString(), h.getHostId(), l.getMaxMemory(), + l.getMaxMemory(), l.getMaxCoreUnits(), l.getMaxCoreUnits(), l.getMaxGpuMemory(), + l.getMaxGpuMemory(), l.getMaxGpuUnits(), l.getMaxGpuUnits(), l.getThreads()); + } + + @Override + public void insertLocalHostAssignment(HostInterface h, LayerInterface layer, + LocalHostAssignment l) { + l.id = SqlUtil.genKeyRandom(); + l.name = String.format("%s->%s", h.getName(), layer.getName()); + l.setHostId(h.getHostId()); + l.setJobId(layer.getJobId()); + l.setLayerId(layer.getLayerId()); + l.setType(RenderPartitionType.LAYER_PARTITION); + l.setIdleCoreUnits(l.getMaxCoreUnits()); + l.setIdleMemory(l.getMaxMemory()); + l.setIdleGpuUnits(l.getMaxGpuUnits()); + l.setIdleGpuMemory(l.getMaxGpuMemory()); + + getJdbcTemplate().update(INSERT_LOCAL_JOB_ASSIGNMENT, l.id, l.getJobId(), l.getLayerId(), + l.getFrameId(), l.getType().toString(), h.getHostId(), l.getMaxMemory(), + l.getMaxMemory(), l.getMaxCoreUnits(), l.getMaxCoreUnits(), l.getMaxGpuMemory(), + l.getMaxGpuMemory(), l.getMaxGpuUnits(), l.getMaxGpuUnits(), l.getThreads()); + } + + @Override + public void insertLocalHostAssignment(HostInterface h, FrameInterface frame, + LocalHostAssignment l) { + l.id = SqlUtil.genKeyRandom(); + l.name = String.format("%s->%s", h.getName(), frame.getName()); + l.setHostId(h.getHostId()); + l.setJobId(frame.getJobId()); + l.setLayerId(frame.getLayerId()); + l.setFrameId(frame.getFrameId()); + l.setType(RenderPartitionType.FRAME_PARTITION); + l.setIdleCoreUnits(l.getMaxCoreUnits()); + l.setIdleMemory(l.getMaxMemory()); + l.setIdleGpuUnits(l.getMaxGpuUnits()); + l.setIdleGpuMemory(l.getMaxGpuMemory()); + + getJdbcTemplate().update(INSERT_LOCAL_JOB_ASSIGNMENT, l.id, l.getJobId(), l.getLayerId(), + l.getFrameId(), l.getType().toString(), h.getHostId(), l.getMaxMemory(), + l.getMaxMemory(), l.getMaxCoreUnits(), l.getMaxCoreUnits(), l.getMaxGpuMemory(), + l.getMaxGpuMemory(), l.getMaxGpuUnits(), l.getMaxGpuUnits(), l.getThreads()); + } + + public static final RowMapper LJA_MAPPER = + new RowMapper() { + public LocalHostAssignment mapRow(final ResultSet rs, int rowNum) + throws SQLException { + LocalHostAssignment l = new LocalHostAssignment(); + l.id = rs.getString("pk_host_local"); + l.setMaxCoreUnits(rs.getInt("int_cores_max")); + l.setMaxMemory(rs.getLong("int_mem_max")); + l.setMaxGpuUnits(rs.getInt("int_gpus_max")); + l.setMaxGpuMemory(rs.getLong("int_gpu_mem_max")); + l.setThreads(rs.getInt("int_threads")); + l.setIdleCoreUnits(rs.getInt("int_cores_idle")); + l.setIdleMemory(rs.getLong("int_mem_idle")); + l.setIdleGpuUnits(rs.getInt("int_gpus_idle")); + l.setIdleGpuMemory(rs.getLong("int_gpu_mem_idle")); + l.setJobId(rs.getString("pk_job")); + l.setLayerId(rs.getString("pk_layer")); + l.setFrameId(rs.getString("pk_frame")); + l.setHostId(rs.getString("pk_host")); + l.setType(RenderPartitionType.valueOf(rs.getString("str_type"))); + return l; + } + }; + + private static final String QUERY_FOR_LJA = + "SELECT " + "pk_host_local," + "pk_job," + "pk_layer," + "pk_frame," + "pk_host," + + "int_mem_idle," + "int_mem_max," + "int_cores_idle," + "int_cores_max," + + "int_gpu_mem_idle," + "int_gpu_mem_max," + "int_gpus_idle," + "int_gpus_max," + + "int_threads, " + "str_type " + "FROM " + "host_local "; + + @Override + public List getLocalJobAssignment(HostInterface host) { + return getJdbcTemplate().query(QUERY_FOR_LJA + "WHERE " + "host_local.pk_host = ? ", + LJA_MAPPER, host.getHostId()); + } + + @Override + public LocalHostAssignment getLocalJobAssignment(String id) { + return getJdbcTemplate().queryForObject(QUERY_FOR_LJA + " WHERE pk_host_local = ?", + LJA_MAPPER, id); + } + + @Override + public LocalHostAssignment getLocalJobAssignment(String hostId, String jobId) { + return getJdbcTemplate().queryForObject(QUERY_FOR_LJA + " WHERE pk_host = ? and pk_job = ?", + LJA_MAPPER, hostId, jobId); + } + + @Override + public boolean deleteLocalJobAssignment(LocalHostAssignment l) { + return getJdbcTemplate().update("DELETE FROM host_local WHERE pk_host_local = ?", + l.getId()) > 0; + } + + private static final String HAS_LOCAL_JOB = "SELECT " + "COUNT(1) " + "FROM " + "host_local " + + "WHERE " + "host_local.pk_host = ? "; + + @Override + public boolean hasLocalJob(HostInterface host) { + return getJdbcTemplate().queryForObject(HAS_LOCAL_JOB, Integer.class, host.getHostId()) > 0; + } + + private static final String HAS_ACTIVE_LOCAL_JOB = "SELECT " + "COUNT(1) " + "FROM " + + "host_local, " + "proc " + "WHERE " + "host_local.pk_host = proc.pk_host " + "AND " + + "proc.b_local = true " + "AND " + "host_local.pk_host = ? "; + + @Override + public boolean hasActiveLocalJob(HostInterface host) { + return getJdbcTemplate().queryForObject(HAS_ACTIVE_LOCAL_JOB, Integer.class, + host.getHostId()) > 0; + } + + @Override + public int getCoreUsageDifference(LocalHostAssignment l, int coreUnits) { + return getJdbcTemplate().queryForObject( + "SELECT ? - int_cores_max FROM host_local WHERE pk_host_local=?", Integer.class, + coreUnits, l.getId()); + } + + @Override + public int getGpuUsageDifference(LocalHostAssignment l, int gpuUnits) { + return getJdbcTemplate().queryForObject( + "SELECT ? - int_gpus_max FROM host_local WHERE pk_host_local=?", Integer.class, + gpuUnits, l.getId()); + } + + private static final String UPDATE_MAX_CORES = "UPDATE " + "host_local " + "SET " + + "int_cores_idle = int_cores_idle + (? - int_cores_max), " + "int_cores_max = ? " + + "WHERE " + "pk_host_local = ? "; + + @Override + public boolean updateMaxCores(LocalHostAssignment l, int coreUnits) { + return getJdbcTemplate().update(UPDATE_MAX_CORES, coreUnits, coreUnits, l.getId()) > 0; + } + + private static final String UPDATE_MAX_GPUS = "UPDATE " + "host_local " + "SET " + + "int_gpus_idle = int_gpus_idle + (? - int_gpus_max), " + "int_gpus_max = ? " + + "WHERE " + "pk_host_local = ? "; + + @Override + public boolean updateMaxGpus(LocalHostAssignment l, int gpuUnits) { + return getJdbcTemplate().update(UPDATE_MAX_GPUS, gpuUnits, gpuUnits, l.getId()) > 0; + } + + private static final String UPDATE_MAX_MEMORY = + "UPDATE " + "host_local " + "SET " + "int_mem_idle = int_mem_idle + (? - int_mem_max), " + + "int_mem_max = ? " + "WHERE " + "pk_host_local = ? "; + + @Override + public boolean updateMaxMemory(LocalHostAssignment l, long maxMemory) { + return getJdbcTemplate().update(UPDATE_MAX_MEMORY, maxMemory, maxMemory, l.getId()) > 0; + } + + private static final String UPDATE_MAX_GPU_MEMORY = "UPDATE " + "host_local " + "SET " + + "int_gpu_mem_idle = int_gpu_mem_idle + (? - int_gpu_mem_max), " + + "int_gpu_mem_max = ? " + "WHERE " + "pk_host_local = ? "; + + @Override + public boolean updateMaxGpuMemory(LocalHostAssignment l, long maxGpuMemory) { + return getJdbcTemplate().update(UPDATE_MAX_GPU_MEMORY, maxGpuMemory, maxGpuMemory, + l.getId()) > 0; + } + + @Override + public boolean deactivate(LocalHostAssignment l) { + return getJdbcTemplate().update("UPDATE host_local SET b_active = false WHERE " + + "pk_host_local = ? AND b_active = true", l.getId()) > 0; + } + + /** + * + * @param h HostInterface + * @param cores int + * @return boolean + */ + @Override + public boolean allocateCoresFromHost(HostInterface h, int cores) { + + try { + return getJdbcTemplate().update( + "UPDATE host SET int_cores_idle = int_cores_idle - ? " + "WHERE pk_host = ?", + cores, h.getHostId()) > 0; + } catch (DataAccessException e) { + throw new ResourceReservationFailureException( + "Failed to allocate " + cores + " from host, " + e); } - }; - - private static final String QUERY_FOR_LJA = "SELECT " + "pk_host_local," + "pk_job," + "pk_layer," - + "pk_frame," + "pk_host," + "int_mem_idle," + "int_mem_max," + "int_cores_idle," - + "int_cores_max," + "int_gpu_mem_idle," + "int_gpu_mem_max," + "int_gpus_idle," - + "int_gpus_max," + "int_threads, " + "str_type " + "FROM " + "host_local "; - - @Override - public List getLocalJobAssignment(HostInterface host) { - return getJdbcTemplate().query(QUERY_FOR_LJA + "WHERE " + "host_local.pk_host = ? ", LJA_MAPPER, - host.getHostId()); - } - - @Override - public LocalHostAssignment getLocalJobAssignment(String id) { - return getJdbcTemplate().queryForObject(QUERY_FOR_LJA + " WHERE pk_host_local = ?", LJA_MAPPER, - id); - } - - @Override - public LocalHostAssignment getLocalJobAssignment(String hostId, String jobId) { - return getJdbcTemplate().queryForObject(QUERY_FOR_LJA + " WHERE pk_host = ? and pk_job = ?", - LJA_MAPPER, hostId, jobId); - } - - @Override - public boolean deleteLocalJobAssignment(LocalHostAssignment l) { - return getJdbcTemplate().update("DELETE FROM host_local WHERE pk_host_local = ?", - l.getId()) > 0; - } - - private static final String HAS_LOCAL_JOB = - "SELECT " + "COUNT(1) " + "FROM " + "host_local " + "WHERE " + "host_local.pk_host = ? "; - - @Override - public boolean hasLocalJob(HostInterface host) { - return getJdbcTemplate().queryForObject(HAS_LOCAL_JOB, Integer.class, host.getHostId()) > 0; - } - - private static final String HAS_ACTIVE_LOCAL_JOB = "SELECT " + "COUNT(1) " + "FROM " - + "host_local, " + "proc " + "WHERE " + "host_local.pk_host = proc.pk_host " + "AND " - + "proc.b_local = true " + "AND " + "host_local.pk_host = ? "; - - @Override - public boolean hasActiveLocalJob(HostInterface host) { - return getJdbcTemplate().queryForObject(HAS_ACTIVE_LOCAL_JOB, Integer.class, - host.getHostId()) > 0; - } - - @Override - public int getCoreUsageDifference(LocalHostAssignment l, int coreUnits) { - return getJdbcTemplate().queryForObject( - "SELECT ? - int_cores_max FROM host_local WHERE pk_host_local=?", Integer.class, coreUnits, - l.getId()); - } - - @Override - public int getGpuUsageDifference(LocalHostAssignment l, int gpuUnits) { - return getJdbcTemplate().queryForObject( - "SELECT ? - int_gpus_max FROM host_local WHERE pk_host_local=?", Integer.class, gpuUnits, - l.getId()); - } - - private static final String UPDATE_MAX_CORES = - "UPDATE " + "host_local " + "SET " + "int_cores_idle = int_cores_idle + (? - int_cores_max), " - + "int_cores_max = ? " + "WHERE " + "pk_host_local = ? "; - - @Override - public boolean updateMaxCores(LocalHostAssignment l, int coreUnits) { - return getJdbcTemplate().update(UPDATE_MAX_CORES, coreUnits, coreUnits, l.getId()) > 0; - } - - private static final String UPDATE_MAX_GPUS = - "UPDATE " + "host_local " + "SET " + "int_gpus_idle = int_gpus_idle + (? - int_gpus_max), " - + "int_gpus_max = ? " + "WHERE " + "pk_host_local = ? "; - - @Override - public boolean updateMaxGpus(LocalHostAssignment l, int gpuUnits) { - return getJdbcTemplate().update(UPDATE_MAX_GPUS, gpuUnits, gpuUnits, l.getId()) > 0; - } - - private static final String UPDATE_MAX_MEMORY = - "UPDATE " + "host_local " + "SET " + "int_mem_idle = int_mem_idle + (? - int_mem_max), " - + "int_mem_max = ? " + "WHERE " + "pk_host_local = ? "; - - @Override - public boolean updateMaxMemory(LocalHostAssignment l, long maxMemory) { - return getJdbcTemplate().update(UPDATE_MAX_MEMORY, maxMemory, maxMemory, l.getId()) > 0; - } - - private static final String UPDATE_MAX_GPU_MEMORY = "UPDATE " + "host_local " + "SET " - + "int_gpu_mem_idle = int_gpu_mem_idle + (? - int_gpu_mem_max), " + "int_gpu_mem_max = ? " - + "WHERE " + "pk_host_local = ? "; - - @Override - public boolean updateMaxGpuMemory(LocalHostAssignment l, long maxGpuMemory) { - return getJdbcTemplate().update(UPDATE_MAX_GPU_MEMORY, maxGpuMemory, maxGpuMemory, - l.getId()) > 0; - } - - @Override - public boolean deactivate(LocalHostAssignment l) { - return getJdbcTemplate().update( - "UPDATE host_local SET b_active = false WHERE " + "pk_host_local = ? AND b_active = true", - l.getId()) > 0; - } - - /** - * - * @param h HostInterface - * @param cores int - * @return boolean - */ - @Override - public boolean allocateCoresFromHost(HostInterface h, int cores) { - - try { - return getJdbcTemplate().update( - "UPDATE host SET int_cores_idle = int_cores_idle - ? " + "WHERE pk_host = ?", cores, - h.getHostId()) > 0; - } catch (DataAccessException e) { - throw new ResourceReservationFailureException( - "Failed to allocate " + cores + " from host, " + e); + } - } - - /** - * - * @param h HostInterface - * @param gpus int - * @return boolean - */ - @Override - public boolean allocateGpusFromHost(HostInterface h, int gpus) { - - try { - return getJdbcTemplate().update( - "UPDATE host SET int_gpus_idle = int_gpus_idle - ? " + "WHERE pk_host = ?", gpus, - h.getHostId()) > 0; - } catch (DataAccessException e) { - throw new ResourceReservationFailureException( - "Failed to allocate " + gpus + " GPU from host, " + e); + /** + * + * @param h HostInterface + * @param gpus int + * @return boolean + */ + @Override + public boolean allocateGpusFromHost(HostInterface h, int gpus) { + + try { + return getJdbcTemplate().update( + "UPDATE host SET int_gpus_idle = int_gpus_idle - ? " + "WHERE pk_host = ?", + gpus, h.getHostId()) > 0; + } catch (DataAccessException e) { + throw new ResourceReservationFailureException( + "Failed to allocate " + gpus + " GPU from host, " + e); + } } - } - - /** - * - * @param h HostInterface - * @param cores int - * @return boolean - */ - @Override - public boolean deallocateCoresFromHost(HostInterface h, int cores) { - try { - return getJdbcTemplate().update( - "UPDATE host SET int_cores_idle = int_cores_idle + ? WHERE pk_host = ?", cores, - h.getHostId()) > 0; - } catch (DataAccessException e) { - throw new ResourceReservationFailureException( - "Failed to de-allocate " + cores + " from host, " + e); + + /** + * + * @param h HostInterface + * @param cores int + * @return boolean + */ + @Override + public boolean deallocateCoresFromHost(HostInterface h, int cores) { + try { + return getJdbcTemplate().update( + "UPDATE host SET int_cores_idle = int_cores_idle + ? WHERE pk_host = ?", cores, + h.getHostId()) > 0; + } catch (DataAccessException e) { + throw new ResourceReservationFailureException( + "Failed to de-allocate " + cores + " from host, " + e); + } } - } - - /** - * - * @param h HostInterface - * @param gpus int - * @return boolean - */ - @Override - public boolean deallocateGpusFromHost(HostInterface h, int gpus) { - try { - return getJdbcTemplate().update( - "UPDATE host SET int_gpus_idle = int_gpus_idle + ? WHERE pk_host = ?", gpus, - h.getHostId()) > 0; - } catch (DataAccessException e) { - throw new ResourceReservationFailureException( - "Failed to de-allocate " + gpus + " GPU from host, " + e); + + /** + * + * @param h HostInterface + * @param gpus int + * @return boolean + */ + @Override + public boolean deallocateGpusFromHost(HostInterface h, int gpus) { + try { + return getJdbcTemplate().update( + "UPDATE host SET int_gpus_idle = int_gpus_idle + ? WHERE pk_host = ?", gpus, + h.getHostId()) > 0; + } catch (DataAccessException e) { + throw new ResourceReservationFailureException( + "Failed to de-allocate " + gpus + " GPU from host, " + e); + } + } + + @Override + public boolean hasResourceDeficit(HostInterface host) { + return getJdbcTemplate().queryForObject("SELECT COUNT(1) FROM host_local WHERE " + + "(int_cores_max < int_cores_max - int_cores_idle OR " + + "int_gpus_max < int_gpus_max - int_gpus_idle OR " + + "int_gpu_mem_max < int_gpu_mem_max - int_gpu_mem_idle OR " + + "int_mem_max < int_mem_max - int_mem_idle) AND " + "host_local.pk_host= ?", + Integer.class, host.getHostId()) > 0; } - } - - @Override - public boolean hasResourceDeficit(HostInterface host) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM host_local WHERE " - + "(int_cores_max < int_cores_max - int_cores_idle OR " - + "int_gpus_max < int_gpus_max - int_gpus_idle OR " - + "int_gpu_mem_max < int_gpu_mem_max - int_gpu_mem_idle OR " - + "int_mem_max < int_mem_max - int_mem_idle) AND " + "host_local.pk_host= ?", - Integer.class, host.getHostId()) > 0; - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/CommentDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/CommentDaoJdbc.java index 9362981c1..eae724867 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/CommentDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/CommentDaoJdbc.java @@ -31,94 +31,98 @@ public class CommentDaoJdbc extends JdbcDaoSupport implements CommentDao { - public void deleteComment(String id) { - /* - * Checks what type of comment we have. - */ - Map type = getJdbcTemplate() - .queryForMap("SELECT pk_job, pk_host FROM comments WHERE pk_comment=?", id); - - /* - * If the comment is deleted successfully, check if we need to unset the b_comment boolean flag. - */ - if (getJdbcTemplate().update("DELETE FROM comments WHERE pk_comment=?", id) > 0) { - if (type.get("pk_job") != null) { - getJdbcTemplate().update( - "UPDATE job SET b_comment=false WHERE job.pk_job = ? AND " - + "(SELECT COUNT(1) FROM comments c WHERE c.pk_job = job.pk_job) = 0", - type.get("pk_job")); - } else if (type.get("pk_host") != null) { + public void deleteComment(String id) { + /* + * Checks what type of comment we have. + */ + Map type = getJdbcTemplate() + .queryForMap("SELECT pk_job, pk_host FROM comments WHERE pk_comment=?", id); + + /* + * If the comment is deleted successfully, check if we need to unset the b_comment boolean + * flag. + */ + if (getJdbcTemplate().update("DELETE FROM comments WHERE pk_comment=?", id) > 0) { + if (type.get("pk_job") != null) { + getJdbcTemplate().update("UPDATE job SET b_comment=false WHERE job.pk_job = ? AND " + + "(SELECT COUNT(1) FROM comments c WHERE c.pk_job = job.pk_job) = 0", + type.get("pk_job")); + } else if (type.get("pk_host") != null) { + getJdbcTemplate().update( + "UPDATE host SET b_comment=false WHERE host.pk_host = ? AND " + + "(SELECT COUNT(1) FROM comments c WHERE c.pk_host = host.pk_host) = 0", + type.get("pk_host")); + } + } + } + + private static final RowMapper COMMENT_DETAIL_MAPPER = + new RowMapper() { + public CommentDetail mapRow(ResultSet rs, int row) throws SQLException { + CommentDetail d = new CommentDetail(); + d.id = rs.getString("pk_comment"); + d.message = rs.getString("str_message"); + d.subject = rs.getString("str_subject"); + d.timestamp = rs.getTimestamp("ts_created"); + d.user = rs.getString("str_user"); + return d; + } + }; + + public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, + String subject) { + return getJdbcTemplate().update( + "DELETE FROM comments WHERE pk_host=? AND str_user=? AND str_subject=?", + host.getHostId(), user, subject) > 0; + } + + public List getCommentsByHostUserAndSubject(HostInterface host, String user, + String subject) { + return getJdbcTemplate().query( + "SELECT * FROM comments WHERE pk_host=? AND str_user=? AND str_subject=?", + COMMENT_DETAIL_MAPPER, host.getHostId(), user, subject); + } + + public CommentDetail getCommentDetail(String id) { + return getJdbcTemplate().queryForObject("SELECT * FROM comments WHERE pk_comment=?", + COMMENT_DETAIL_MAPPER, id); + } + + public void updateComment(CommentDetail comment) { getJdbcTemplate().update( - "UPDATE host SET b_comment=false WHERE host.pk_host = ? AND " - + "(SELECT COUNT(1) FROM comments c WHERE c.pk_host = host.pk_host) = 0", - type.get("pk_host")); - } + "UPDATE comments SET str_message=?,str_subject=? WHERE pk_comment=?", + comment.message, comment.subject, comment.id); + } + + public void updateCommentMessage(String id, String message) { + getJdbcTemplate().update("UPDATE comments SET str_message=? WHERE pk_comment=?", message, + id); + } + + public void updateCommentSubject(String id, String subject) { + getJdbcTemplate().update("UPDATE comments SET str_subject=? WHERE pk_comment=?", subject, + id); + } + + private static final String INSERT_JOB_COMMENT = "INSERT INTO " + "comments " + "(" + + "pk_comment,pk_job,str_user,str_subject,str_message" + ") VALUES (?,?,?,?,?)"; + + public void insertComment(JobInterface job, CommentDetail comment) { + comment.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_JOB_COMMENT, comment.id, job.getJobId(), comment.user, + comment.subject, comment.message); + getJdbcTemplate().update("UPDATE job SET b_comment=true WHERE pk_job=?", job.getJobId()); + } + + private static final String INSERT_HOST_COMMENT = "INSERT INTO " + "comments " + "(" + + "pk_comment,pk_host,str_user,str_subject,str_message" + ") VALUES (?,?,?,?,?)"; + + public void insertComment(HostInterface host, CommentDetail comment) { + comment.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_HOST_COMMENT, comment.id, host.getHostId(), comment.user, + comment.subject, comment.message); + getJdbcTemplate().update("UPDATE host SET b_comment=true WHERE pk_host=?", + host.getHostId()); } - } - - private static final RowMapper COMMENT_DETAIL_MAPPER = - new RowMapper() { - public CommentDetail mapRow(ResultSet rs, int row) throws SQLException { - CommentDetail d = new CommentDetail(); - d.id = rs.getString("pk_comment"); - d.message = rs.getString("str_message"); - d.subject = rs.getString("str_subject"); - d.timestamp = rs.getTimestamp("ts_created"); - d.user = rs.getString("str_user"); - return d; - } - }; - - public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, - String subject) { - return getJdbcTemplate().update( - "DELETE FROM comments WHERE pk_host=? AND str_user=? AND str_subject=?", host.getHostId(), - user, subject) > 0; - } - - public List getCommentsByHostUserAndSubject(HostInterface host, String user, - String subject) { - return getJdbcTemplate().query( - "SELECT * FROM comments WHERE pk_host=? AND str_user=? AND str_subject=?", - COMMENT_DETAIL_MAPPER, host.getHostId(), user, subject); - } - - public CommentDetail getCommentDetail(String id) { - return getJdbcTemplate().queryForObject("SELECT * FROM comments WHERE pk_comment=?", - COMMENT_DETAIL_MAPPER, id); - } - - public void updateComment(CommentDetail comment) { - getJdbcTemplate().update("UPDATE comments SET str_message=?,str_subject=? WHERE pk_comment=?", - comment.message, comment.subject, comment.id); - } - - public void updateCommentMessage(String id, String message) { - getJdbcTemplate().update("UPDATE comments SET str_message=? WHERE pk_comment=?", message, id); - } - - public void updateCommentSubject(String id, String subject) { - getJdbcTemplate().update("UPDATE comments SET str_subject=? WHERE pk_comment=?", subject, id); - } - - private static final String INSERT_JOB_COMMENT = "INSERT INTO " + "comments " + "(" - + "pk_comment,pk_job,str_user,str_subject,str_message" + ") VALUES (?,?,?,?,?)"; - - public void insertComment(JobInterface job, CommentDetail comment) { - comment.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_JOB_COMMENT, comment.id, job.getJobId(), comment.user, - comment.subject, comment.message); - getJdbcTemplate().update("UPDATE job SET b_comment=true WHERE pk_job=?", job.getJobId()); - } - - private static final String INSERT_HOST_COMMENT = "INSERT INTO " + "comments " + "(" - + "pk_comment,pk_host,str_user,str_subject,str_message" + ") VALUES (?,?,?,?,?)"; - - public void insertComment(HostInterface host, CommentDetail comment) { - comment.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_HOST_COMMENT, comment.id, host.getHostId(), comment.user, - comment.subject, comment.message); - getJdbcTemplate().update("UPDATE host SET b_comment=true WHERE pk_host=?", host.getHostId()); - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DeedDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DeedDaoJdbc.java index bdffd1694..ff4552917 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DeedDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DeedDaoJdbc.java @@ -30,58 +30,59 @@ public class DeedDaoJdbc extends JdbcDaoSupport implements DeedDao { - public static final RowMapper DEED_MAPPER = new RowMapper() { - public DeedEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - DeedEntity o = new DeedEntity(); - o.id = rs.getString("pk_deed"); - o.owner = rs.getString("str_username"); - o.host = rs.getString("str_hostname"); - return o; + public static final RowMapper DEED_MAPPER = new RowMapper() { + public DeedEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + DeedEntity o = new DeedEntity(); + o.id = rs.getString("pk_deed"); + o.owner = rs.getString("str_username"); + o.host = rs.getString("str_hostname"); + return o; + } + }; + + @Override + public boolean deleteDeed(DeedEntity deed) { + return getJdbcTemplate().update("DELETE FROM deed WHERE pk_deed = ?", deed.getId()) > 0; + } + + @Override + public boolean deleteDeed(HostInterface host) { + return getJdbcTemplate().update("DELETE FROM deed WHERE pk_host = ?", host.getHostId()) > 0; + } + + @Override + public void deleteDeeds(OwnerEntity owner) { + getJdbcTemplate().update("DELETE FROM deed WHERE pk_owner = ?", owner.getId()); + } + + private static final String INSERT_DEED = "INSERT INTO " + "deed " + "(" + "pk_deed," + + "pk_owner," + "pk_host " + ") " + "VALUES (?,?,?)"; + + public DeedEntity insertDeed(OwnerEntity owner, HostInterface host) { + DeedEntity deed = new DeedEntity(); + deed.id = SqlUtil.genKeyRandom(); + deed.host = host.getName(); + deed.owner = owner.name; + + getJdbcTemplate().update(INSERT_DEED, deed.getId(), owner.getId(), host.getId()); + + return deed; + } + + private static final String QUERY_FOR_DEED = + "SELECT " + "deed.pk_deed, " + "host.str_name as str_hostname, " + "owner.str_username " + + "FROM " + "deed," + "host," + "owner " + "WHERE " + + "deed.pk_owner = owner.pk_owner " + "AND " + "deed.pk_host = host.pk_host "; + + @Override + public DeedEntity getDeed(String id) { + return getJdbcTemplate().queryForObject(QUERY_FOR_DEED + " AND pk_deed = ?", DEED_MAPPER, + id); + } + + @Override + public List getDeeds(OwnerEntity owner) { + return getJdbcTemplate().query(QUERY_FOR_DEED + " AND owner.pk_owner = ?", DEED_MAPPER, + owner.getId()); } - }; - - @Override - public boolean deleteDeed(DeedEntity deed) { - return getJdbcTemplate().update("DELETE FROM deed WHERE pk_deed = ?", deed.getId()) > 0; - } - - @Override - public boolean deleteDeed(HostInterface host) { - return getJdbcTemplate().update("DELETE FROM deed WHERE pk_host = ?", host.getHostId()) > 0; - } - - @Override - public void deleteDeeds(OwnerEntity owner) { - getJdbcTemplate().update("DELETE FROM deed WHERE pk_owner = ?", owner.getId()); - } - - private static final String INSERT_DEED = "INSERT INTO " + "deed " + "(" + "pk_deed," - + "pk_owner," + "pk_host " + ") " + "VALUES (?,?,?)"; - - public DeedEntity insertDeed(OwnerEntity owner, HostInterface host) { - DeedEntity deed = new DeedEntity(); - deed.id = SqlUtil.genKeyRandom(); - deed.host = host.getName(); - deed.owner = owner.name; - - getJdbcTemplate().update(INSERT_DEED, deed.getId(), owner.getId(), host.getId()); - - return deed; - } - - private static final String QUERY_FOR_DEED = - "SELECT " + "deed.pk_deed, " + "host.str_name as str_hostname, " + "owner.str_username " - + "FROM " + "deed," + "host," + "owner " + "WHERE " + "deed.pk_owner = owner.pk_owner " - + "AND " + "deed.pk_host = host.pk_host "; - - @Override - public DeedEntity getDeed(String id) { - return getJdbcTemplate().queryForObject(QUERY_FOR_DEED + " AND pk_deed = ?", DEED_MAPPER, id); - } - - @Override - public List getDeeds(OwnerEntity owner) { - return getJdbcTemplate().query(QUERY_FOR_DEED + " AND owner.pk_owner = ?", DEED_MAPPER, - owner.getId()); - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DepartmentDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DepartmentDaoJdbc.java index 24a1c3ba0..d3e950da4 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DepartmentDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DepartmentDaoJdbc.java @@ -28,48 +28,48 @@ public class DepartmentDaoJdbc extends JdbcDaoSupport implements DepartmentDao { - public static final RowMapper DEPARTMENT_MAPPER = - new RowMapper() { - public DepartmentInterface mapRow(ResultSet rs, int rowNum) throws SQLException { - DepartmentEntity d = new DepartmentEntity(); - d.id = rs.getString("pk_dept"); - d.name = rs.getString("str_name"); - return d; - } - }; + public static final RowMapper DEPARTMENT_MAPPER = + new RowMapper() { + public DepartmentInterface mapRow(ResultSet rs, int rowNum) throws SQLException { + DepartmentEntity d = new DepartmentEntity(); + d.id = rs.getString("pk_dept"); + d.name = rs.getString("str_name"); + return d; + } + }; - @Override - public boolean departmentExists(String name) { - return getJdbcTemplate().queryForObject("SELECT COUNT(1) FROM dept WHERE str_name=?", - Integer.class, name) > 0; - } + @Override + public boolean departmentExists(String name) { + return getJdbcTemplate().queryForObject("SELECT COUNT(1) FROM dept WHERE str_name=?", + Integer.class, name) > 0; + } - @Override - public DepartmentInterface findDepartment(String name) { - return getJdbcTemplate().queryForObject("SELECT pk_dept, str_name FROM dept WHERE str_name=?", - DEPARTMENT_MAPPER, name); - } + @Override + public DepartmentInterface findDepartment(String name) { + return getJdbcTemplate().queryForObject( + "SELECT pk_dept, str_name FROM dept WHERE str_name=?", DEPARTMENT_MAPPER, name); + } - @Override - public DepartmentInterface getDefaultDepartment() { - return getJdbcTemplate().queryForObject( - "SELECT pk_dept, str_name FROM dept WHERE b_default=true", DEPARTMENT_MAPPER); - } + @Override + public DepartmentInterface getDefaultDepartment() { + return getJdbcTemplate().queryForObject( + "SELECT pk_dept, str_name FROM dept WHERE b_default=true", DEPARTMENT_MAPPER); + } - @Override - public DepartmentInterface getDepartment(String id) { - return getJdbcTemplate().queryForObject("SELECT pk_dept, str_name FROM dept WHERE pk_dept=?", - DEPARTMENT_MAPPER, id); - } + @Override + public DepartmentInterface getDepartment(String id) { + return getJdbcTemplate().queryForObject( + "SELECT pk_dept, str_name FROM dept WHERE pk_dept=?", DEPARTMENT_MAPPER, id); + } - @Override - public void deleteDepartment(DepartmentInterface d) { - getJdbcTemplate().update("DELETE FROM dept WHERE pk_dept=?", d.getDepartmentId()); - } + @Override + public void deleteDepartment(DepartmentInterface d) { + getJdbcTemplate().update("DELETE FROM dept WHERE pk_dept=?", d.getDepartmentId()); + } - @Override - public void insertDepartment(String name) { - getJdbcTemplate().update("INSERT INTO dept (pk_dept,str_name) VALUES (?,?)", - SqlUtil.genKeyRandom(), name); - } + @Override + public void insertDepartment(String name) { + getJdbcTemplate().update("INSERT INTO dept (pk_dept,str_name) VALUES (?,?)", + SqlUtil.genKeyRandom(), name); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DependDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DependDaoJdbc.java index 30038ccd8..b93d8962a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DependDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DependDaoJdbc.java @@ -45,371 +45,383 @@ public class DependDaoJdbc extends JdbcDaoSupport implements DependDao { - public static final RowMapper DEPEND_MAPPER = - new RowMapper() { - public LightweightDependency mapRow(ResultSet rs, int row) throws SQLException { - LightweightDependency d = new LightweightDependency(); - d.id = rs.getString("pk_depend"); - d.type = DependType.valueOf(rs.getString("str_type")); - d.target = DependTarget.valueOf(rs.getString("str_target")); - d.anyFrame = rs.getBoolean("b_any"); - d.parent = rs.getString("pk_parent"); - d.active = rs.getBoolean("b_active"); - d.dependErFrameId = rs.getString("pk_frame_depend_er"); - d.dependOnFrameId = rs.getString("pk_frame_depend_on"); - d.dependErLayerId = rs.getString("pk_layer_depend_er"); - d.dependOnLayerId = rs.getString("pk_layer_depend_on"); - d.dependOnJobId = rs.getString("pk_job_depend_on"); - d.dependErJobId = rs.getString("pk_job_depend_er"); - return d; + public static final RowMapper DEPEND_MAPPER = + new RowMapper() { + public LightweightDependency mapRow(ResultSet rs, int row) throws SQLException { + LightweightDependency d = new LightweightDependency(); + d.id = rs.getString("pk_depend"); + d.type = DependType.valueOf(rs.getString("str_type")); + d.target = DependTarget.valueOf(rs.getString("str_target")); + d.anyFrame = rs.getBoolean("b_any"); + d.parent = rs.getString("pk_parent"); + d.active = rs.getBoolean("b_active"); + d.dependErFrameId = rs.getString("pk_frame_depend_er"); + d.dependOnFrameId = rs.getString("pk_frame_depend_on"); + d.dependErLayerId = rs.getString("pk_layer_depend_er"); + d.dependOnLayerId = rs.getString("pk_layer_depend_on"); + d.dependOnJobId = rs.getString("pk_job_depend_on"); + d.dependErJobId = rs.getString("pk_job_depend_er"); + return d; + } + }; + + private static final String INSERT_DEPEND = "INSERT INTO " + "depend " + "(" + "pk_depend," + + "pk_parent," + "pk_job_depend_er," + "pk_layer_depend_er," + "pk_frame_depend_er," + + "pk_job_depend_on," + "pk_layer_depend_on," + "pk_frame_depend_on," + "str_type," + + "b_any, " + "str_target, " + "b_active, " + "str_signature, " + "b_composite " + ") " + + "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + + @Override + public void insertDepend(JobOnJob d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErJob().getJobId(), + null, null, d.getDependOnJob().getJobId(), null, null, + DependType.JOB_ON_JOB.toString(), d.isAnyFrame(), d.getTarget().toString(), + d.isActive(), d.getSignature(), d.isComposite()); + } + + @Override + public void insertDepend(JobOnLayer d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErJob().getJobId(), + null, null, d.getDependOnLayer().getJobId(), d.getDependOnLayer().getLayerId(), + null, DependType.JOB_ON_LAYER.toString(), d.isAnyFrame(), d.getTarget().toString(), + d.isActive(), d.getSignature(), d.isComposite()); + } + + @Override + public void insertDepend(JobOnFrame d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErJob().getJobId(), + null, null, d.getDependOnFrame().getJobId(), d.getDependOnFrame().getLayerId(), + d.getDependOnFrame().getFrameId(), DependType.JOB_ON_FRAME.toString(), + d.isAnyFrame(), d.getTarget().toString(), d.isActive(), d.getSignature(), + d.isComposite()); + } + + @Override + public void insertDepend(LayerOnJob d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErLayer().getJobId(), + d.getDependErLayer().getLayerId(), null, d.getDependOnJob().getJobId(), null, null, + DependType.LAYER_ON_JOB.toString(), d.isAnyFrame(), d.getTarget().toString(), + d.isActive(), d.getSignature(), d.isComposite()); + } + + @Override + public void insertDepend(LayerOnLayer d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErLayer().getJobId(), + d.getDependErLayer().getLayerId(), null, d.getDependOnLayer().getJobId(), + d.getDependOnLayer().getLayerId(), null, DependType.LAYER_ON_LAYER.toString(), + d.isAnyFrame(), d.getTarget().toString(), d.isActive(), d.getSignature(), + d.isComposite()); + } + + @Override + public void insertDepend(LayerOnFrame d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErLayer().getJobId(), + d.getDependErLayer().getLayerId(), null, d.getDependOnFrame().getJobId(), + d.getDependOnFrame().getLayerId(), d.getDependOnFrame().getFrameId(), + DependType.LAYER_ON_FRAME.toString(), d.isAnyFrame(), d.getTarget().toString(), + d.isActive(), d.getSignature(), d.isComposite()); + } + + @Override + public void insertDepend(FrameOnJob d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErFrame().getJobId(), + d.getDependErFrame().getLayerId(), d.getDependErFrame().getFrameId(), + d.getDependOnJob().getJobId(), null, null, DependType.FRAME_ON_JOB.toString(), + d.isAnyFrame(), d.getTarget().toString(), d.isActive(), d.getSignature(), + d.isComposite()); + } + + @Override + public void insertDepend(FrameOnLayer d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErFrame().getJobId(), + d.getDependErFrame().getLayerId(), d.getDependErFrame().getFrameId(), + d.getDependOnLayer().getJobId(), d.getDependOnLayer().getLayerId(), null, + DependType.FRAME_ON_LAYER.toString(), d.isAnyFrame(), d.getTarget().toString(), + d.isActive(), d.getSignature(), d.isComposite()); + } + + @Override + public void insertDepend(PreviousFrame d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErLayer().getJobId(), + d.getDependErLayer().getLayerId(), null, d.getDependOnLayer().getJobId(), + d.getDependOnLayer().getLayerId(), null, DependType.PREVIOUS_FRAME.toString(), + d.isAnyFrame(), d.getTarget().toString(), d.isActive(), d.getSignature(), + d.isComposite()); + } + + @Override + public void insertDepend(FrameOnFrame d) { + d.setId(SqlUtil.genKeyRandom()); + String parentId = null; + if (d.getParent() != null) { + parentId = d.getParent().getId(); } - }; - - private static final String INSERT_DEPEND = "INSERT INTO " + "depend " + "(" + "pk_depend," - + "pk_parent," + "pk_job_depend_er," + "pk_layer_depend_er," + "pk_frame_depend_er," - + "pk_job_depend_on," + "pk_layer_depend_on," + "pk_frame_depend_on," + "str_type," - + "b_any, " + "str_target, " + "b_active, " + "str_signature, " + "b_composite " + ") " - + "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; - - @Override - public void insertDepend(JobOnJob d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErJob().getJobId(), null, - null, d.getDependOnJob().getJobId(), null, null, DependType.JOB_ON_JOB.toString(), - d.isAnyFrame(), d.getTarget().toString(), d.isActive(), d.getSignature(), d.isComposite()); - } - - @Override - public void insertDepend(JobOnLayer d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErJob().getJobId(), null, - null, d.getDependOnLayer().getJobId(), d.getDependOnLayer().getLayerId(), null, - DependType.JOB_ON_LAYER.toString(), d.isAnyFrame(), d.getTarget().toString(), d.isActive(), - d.getSignature(), d.isComposite()); - } - - @Override - public void insertDepend(JobOnFrame d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErJob().getJobId(), null, - null, d.getDependOnFrame().getJobId(), d.getDependOnFrame().getLayerId(), - d.getDependOnFrame().getFrameId(), DependType.JOB_ON_FRAME.toString(), d.isAnyFrame(), - d.getTarget().toString(), d.isActive(), d.getSignature(), d.isComposite()); - } - - @Override - public void insertDepend(LayerOnJob d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErLayer().getJobId(), - d.getDependErLayer().getLayerId(), null, d.getDependOnJob().getJobId(), null, null, - DependType.LAYER_ON_JOB.toString(), d.isAnyFrame(), d.getTarget().toString(), d.isActive(), - d.getSignature(), d.isComposite()); - } - - @Override - public void insertDepend(LayerOnLayer d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErLayer().getJobId(), - d.getDependErLayer().getLayerId(), null, d.getDependOnLayer().getJobId(), - d.getDependOnLayer().getLayerId(), null, DependType.LAYER_ON_LAYER.toString(), - d.isAnyFrame(), d.getTarget().toString(), d.isActive(), d.getSignature(), d.isComposite()); - } - - @Override - public void insertDepend(LayerOnFrame d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErLayer().getJobId(), - d.getDependErLayer().getLayerId(), null, d.getDependOnFrame().getJobId(), - d.getDependOnFrame().getLayerId(), d.getDependOnFrame().getFrameId(), - DependType.LAYER_ON_FRAME.toString(), d.isAnyFrame(), d.getTarget().toString(), - d.isActive(), d.getSignature(), d.isComposite()); - } - - @Override - public void insertDepend(FrameOnJob d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErFrame().getJobId(), - d.getDependErFrame().getLayerId(), d.getDependErFrame().getFrameId(), - d.getDependOnJob().getJobId(), null, null, DependType.FRAME_ON_JOB.toString(), - d.isAnyFrame(), d.getTarget().toString(), d.isActive(), d.getSignature(), d.isComposite()); - } - - @Override - public void insertDepend(FrameOnLayer d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErFrame().getJobId(), - d.getDependErFrame().getLayerId(), d.getDependErFrame().getFrameId(), - d.getDependOnLayer().getJobId(), d.getDependOnLayer().getLayerId(), null, - DependType.FRAME_ON_LAYER.toString(), d.isAnyFrame(), d.getTarget().toString(), - d.isActive(), d.getSignature(), d.isComposite()); - } - - @Override - public void insertDepend(PreviousFrame d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErLayer().getJobId(), - d.getDependErLayer().getLayerId(), null, d.getDependOnLayer().getJobId(), - d.getDependOnLayer().getLayerId(), null, DependType.PREVIOUS_FRAME.toString(), - d.isAnyFrame(), d.getTarget().toString(), d.isActive(), d.getSignature(), d.isComposite()); - } - - @Override - public void insertDepend(FrameOnFrame d) { - d.setId(SqlUtil.genKeyRandom()); - String parentId = null; - if (d.getParent() != null) { - parentId = d.getParent().getId(); + + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), parentId, + d.getDependErFrame().getJobId(), d.getDependErFrame().getLayerId(), + d.getDependErFrame().getFrameId(), d.getDependOnFrame().getJobId(), + d.getDependOnFrame().getLayerId(), d.getDependOnFrame().getFrameId(), + DependType.FRAME_ON_FRAME.toString(), d.isAnyFrame(), d.getTarget().toString(), + d.isActive(), d.getSignature(), d.isComposite()); } - getJdbcTemplate().update(INSERT_DEPEND, d.getId(), parentId, d.getDependErFrame().getJobId(), - d.getDependErFrame().getLayerId(), d.getDependErFrame().getFrameId(), - d.getDependOnFrame().getJobId(), d.getDependOnFrame().getLayerId(), - d.getDependOnFrame().getFrameId(), DependType.FRAME_ON_FRAME.toString(), d.isAnyFrame(), - d.getTarget().toString(), d.isActive(), d.getSignature(), d.isComposite()); - } - - @Override - public void insertDepend(FrameByFrame d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErLayer().getJobId(), - d.getDependErLayer().getLayerId(), null, d.getDependOnLayer().getJobId(), - d.getDependOnLayer().getLayerId(), null, DependType.FRAME_BY_FRAME.toString(), - d.isAnyFrame(), d.getTarget().toString(), d.isActive(), d.getSignature(), d.isComposite()); - } - - private static final String UPDATE_FRAME_STATE = - "UPDATE " + "frame " + "SET " + "str_state='DEPEND' " + "WHERE " + "int_depend_count != 0 " - + "AND " + "frame.str_state NOT IN ('SUCCEEDED','EATEN','RUNNING','DEPEND') " + "AND " - + "frame.pk_frame = ?"; - - @Override - public void updateFrameState(FrameInterface f) { - getJdbcTemplate().update(UPDATE_FRAME_STATE, f.getFrameId()); - } - - private static final String UPDATE_DEPEND_COUNT = "UPDATE " + "frame " + "SET " - + "int_depend_count = int_depend_count + 1 " + "WHERE " + "pk_frame = ?"; - - @Override - public void incrementDependCount(FrameInterface f) { - int result = getJdbcTemplate().update(UPDATE_DEPEND_COUNT, f.getFrameId()); - if (result == 0) { - throw new DependException("updating the depend count for " + " the frame " + f.getName() - + " in job " + f.getJobId() + "failed."); + @Override + public void insertDepend(FrameByFrame d) { + d.setId(SqlUtil.genKeyRandom()); + getJdbcTemplate().update(INSERT_DEPEND, d.getId(), null, d.getDependErLayer().getJobId(), + d.getDependErLayer().getLayerId(), null, d.getDependOnLayer().getJobId(), + d.getDependOnLayer().getLayerId(), null, DependType.FRAME_BY_FRAME.toString(), + d.isAnyFrame(), d.getTarget().toString(), d.isActive(), d.getSignature(), + d.isComposite()); } - } - private static final String DECREMENT_DEPEND_COUNT = - "UPDATE " + "frame " + "SET " + "int_depend_count = int_depend_count -1 " + "WHERE " - + "pk_frame = ? " + "AND " + "int_depend_count > 0"; + private static final String UPDATE_FRAME_STATE = "UPDATE " + "frame " + "SET " + + "str_state='DEPEND' " + "WHERE " + "int_depend_count != 0 " + "AND " + + "frame.str_state NOT IN ('SUCCEEDED','EATEN','RUNNING','DEPEND') " + "AND " + + "frame.pk_frame = ?"; - @Override - public boolean decrementDependCount(FrameInterface f) { - return getJdbcTemplate().update(DECREMENT_DEPEND_COUNT, f.getFrameId()) == 1; - } + @Override + public void updateFrameState(FrameInterface f) { + getJdbcTemplate().update(UPDATE_FRAME_STATE, f.getFrameId()); + } - private static final String[] DELETE_DEPEND = - {"DELETE FROM depend WHERE pk_parent=?", "DELETE FROM depend WHERE pk_depend=?"}; + private static final String UPDATE_DEPEND_COUNT = "UPDATE " + "frame " + "SET " + + "int_depend_count = int_depend_count + 1 " + "WHERE " + "pk_frame = ?"; - @Override - public void deleteDepend(LightweightDependency depend) { - if (depend.type.equals(DependType.FRAME_BY_FRAME)) { - getJdbcTemplate().update(DELETE_DEPEND[0], depend.getId()); + @Override + public void incrementDependCount(FrameInterface f) { + int result = getJdbcTemplate().update(UPDATE_DEPEND_COUNT, f.getFrameId()); + if (result == 0) { + throw new DependException("updating the depend count for " + " the frame " + f.getName() + + " in job " + f.getJobId() + "failed."); + } } - getJdbcTemplate().update(DELETE_DEPEND[1], depend.getId()); - } - - private static final String GET_LIGHTWEIGHT_DEPEND = "SELECT * FROM depend WHERE pk_depend=?"; - - @Override - public LightweightDependency getDepend(String id) { - return getJdbcTemplate().queryForObject(GET_LIGHTWEIGHT_DEPEND, DEPEND_MAPPER, id); - } - - private static final String GET_LIGHTWEIGHT_DEPEND_BY_SIGNATURE = - "SELECT * FROM depend WHERE str_signature=?"; - - @Override - public LightweightDependency getDependBySignature(String s) { - return getJdbcTemplate().queryForObject(GET_LIGHTWEIGHT_DEPEND_BY_SIGNATURE, DEPEND_MAPPER, s); - } - - private static final String GET_WHAT_DEPENDS_ON_JOB = "SELECT " + "depend.pk_depend," - + "depend.str_type," + "depend.str_target," + "depend.b_any," + "depend.pk_parent," - + "depend.b_active," + "depend.pk_frame_depend_er," + "depend.pk_frame_depend_on," - + "depend.pk_layer_depend_er," + "depend.pk_layer_depend_on," + "depend.pk_job_depend_er," - + "depend.pk_job_depend_on " + "FROM " + "depend " + "WHERE " + "pk_job_depend_on=? " + "AND " - + "b_active = true " + "AND " + "str_type IN (?,?,?)"; - - @Override - public List getWhatDependsOn(JobInterface job) { - return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_JOB, DEPEND_MAPPER, job.getJobId(), - DependType.JOB_ON_JOB.toString(), DependType.LAYER_ON_JOB.toString(), - DependType.FRAME_ON_JOB.toString()); - } - - private static final String GET_WHAT_DEPENDS_ON_JOB_WITH_TARGET = "SELECT " + "depend.pk_depend," - + "depend.str_type," + "depend.str_target," + "depend.b_any," + "depend.pk_parent," - + "depend.b_active," + "depend.pk_frame_depend_er," + "depend.pk_frame_depend_on," - + "depend.pk_layer_depend_er," + "depend.pk_layer_depend_on," + "depend.pk_job_depend_er," - + "depend.pk_job_depend_on " + "FROM " + "depend " + "WHERE " + "pk_job_depend_on=? " + "AND " - + "b_active = true " + "AND " + "str_target = ? " + "AND " + "str_type IN (?,?,?)"; - - @Override - public List getWhatDependsOn(JobInterface job, DependTarget target) { - if (target.equals(DependTarget.ANY_TARGET)) { - return getWhatDependsOn(job); - } else { - return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_JOB_WITH_TARGET, DEPEND_MAPPER, - job.getJobId(), target.toString(), DependType.JOB_ON_JOB.toString(), - DependType.LAYER_ON_JOB.toString(), DependType.FRAME_ON_JOB.toString()); + + private static final String DECREMENT_DEPEND_COUNT = + "UPDATE " + "frame " + "SET " + "int_depend_count = int_depend_count -1 " + "WHERE " + + "pk_frame = ? " + "AND " + "int_depend_count > 0"; + + @Override + public boolean decrementDependCount(FrameInterface f) { + return getJdbcTemplate().update(DECREMENT_DEPEND_COUNT, f.getFrameId()) == 1; } - } - - private static final String GET_WHAT_DEPENDS_ON_LAYER = "SELECT " + "depend.pk_depend," - + "depend.str_type," + "depend.str_target," + "depend.b_any," + "depend.pk_parent," - + "depend.b_active," + "depend.pk_frame_depend_er," + "depend.pk_frame_depend_on," - + "depend.pk_layer_depend_er," + "depend.pk_layer_depend_on," + "depend.pk_job_depend_er," - + "depend.pk_job_depend_on " + "FROM " + "depend " + "WHERE " + "pk_job_depend_on=? " + "AND " - + "pk_layer_depend_on=? " + "AND " + "str_type IN (?,?,?) " + "AND " + "b_active = ?"; - - @Override - public List getWhatDependsOn(LayerInterface layer) { - return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_LAYER, DEPEND_MAPPER, layer.getJobId(), - layer.getLayerId(), DependType.JOB_ON_LAYER.toString(), - DependType.LAYER_ON_LAYER.toString(), DependType.FRAME_ON_LAYER.toString(), true); - } - - @Override - public List getWhatDependsOn(LayerInterface layer, boolean active) { - return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_LAYER, DEPEND_MAPPER, layer.getJobId(), - layer.getLayerId(), DependType.JOB_ON_LAYER.toString(), - DependType.LAYER_ON_LAYER.toString(), DependType.FRAME_ON_LAYER.toString(), active); - } - - private static final String GET_WHAT_DEPENDS_ON_FRAME = - "SELECT " + "depend.pk_depend," + "depend.str_type," + "depend.str_target," + "depend.b_any," - + "depend.pk_parent," + "depend.b_active," + "depend.pk_frame_depend_er," - + "depend.pk_frame_depend_on," + "depend.pk_layer_depend_er," - + "depend.pk_layer_depend_on," + "depend.pk_job_depend_er," + "depend.pk_job_depend_on " - + "FROM " + "depend " + "WHERE " + "b_active = ? " + "AND " + "pk_job_depend_on = ? " - + "AND " + "(pk_frame_depend_on = ? " + "AND " + "str_type IN (?,?,?)) " + "OR " - + "(pk_layer_depend_on = ? AND str_type = ? AND b_any = true)"; - - @Override - public List getWhatDependsOn(FrameInterface frame) { - return getWhatDependsOn(frame, true); - } - - @Override - public List getWhatDependsOn(FrameInterface frame, boolean active) { - return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_FRAME, DEPEND_MAPPER, active, - frame.getJobId(), frame.getFrameId(), DependType.FRAME_ON_FRAME.toString(), - DependType.LAYER_ON_FRAME.toString(), DependType.JOB_ON_FRAME.toString(), - frame.getLayerId(), DependType.LAYER_ON_LAYER.toString()); - } - - private static final String SET_INACTIVE = "UPDATE " + "depend " + "SET " + "b_active=false," - + "ts_satisfied=current_timestamp," + "str_signature=pk_depend " + "WHERE " + "pk_depend = ? " - + "AND " + "b_active = true " + "AND " + "b_composite = false"; - - @Override - public boolean setInactive(LightweightDependency depend) { - depend.active = getJdbcTemplate().update(SET_INACTIVE, depend.getId()) == 1; - return depend.active; - } - - private static final String SET_ACTIVE = "UPDATE " + "depend " + "SET " + "b_active=true " - + "WHERE " + "pk_depend=? " + "AND " + "b_active=false"; - - @Override - public boolean setActive(LightweightDependency depend) { - if (!depend.type.equals(DependType.FRAME_ON_FRAME) - && !depend.type.equals(DependType.LAYER_ON_LAYER)) { - return false; + + private static final String[] DELETE_DEPEND = + {"DELETE FROM depend WHERE pk_parent=?", "DELETE FROM depend WHERE pk_depend=?"}; + + @Override + public void deleteDepend(LightweightDependency depend) { + if (depend.type.equals(DependType.FRAME_BY_FRAME)) { + getJdbcTemplate().update(DELETE_DEPEND[0], depend.getId()); + } + getJdbcTemplate().update(DELETE_DEPEND[1], depend.getId()); } - depend.active = getJdbcTemplate().update(SET_ACTIVE, depend.getId()) == 1; - return depend.active; - } - - private static final String GET_CHILD_DEPENDS = "SELECT " + "depend.pk_depend," - + "depend.str_type," + "depend.str_target," + "depend.b_any," + "depend.pk_parent," - + "depend.b_active," + "depend.pk_frame_depend_er," + "depend.pk_frame_depend_on," - + "depend.pk_layer_depend_er," + "depend.pk_layer_depend_on," + "depend.pk_job_depend_er," - + "depend.pk_job_depend_on " + "FROM " + "depend " + "WHERE " + "depend.pk_job_depend_er = ? " - + "AND " + "depend.pk_job_depend_on = ? " + "AND " + "depend.pk_parent = ? " + "AND " - + "depend.b_active = true "; - - @Override - public List getChildDepends(LightweightDependency depend) { - return getJdbcTemplate().query(GET_CHILD_DEPENDS, DEPEND_MAPPER, depend.dependErJobId, - depend.dependOnJobId, depend.id); - } - - private static final String GET_WHAT_THIS_JOB_DEPENDS_ON = "SELECT " + "depend.pk_depend," - + "depend.str_type," + "depend.str_target," + "depend.b_any," + "depend.pk_parent," - + "depend.b_active," + "depend.pk_frame_depend_er," + "depend.pk_frame_depend_on," - + "depend.pk_layer_depend_er," + "depend.pk_layer_depend_on," + "depend.pk_job_depend_er," - + "depend.pk_job_depend_on " + "FROM " + "depend " + "WHERE " + "depend.pk_job_depend_er = ? " - + "AND " + "depend.b_active = true " + "AND " + "depend.pk_parent IS NULL "; - - @Override - public List getWhatThisDependsOn(JobInterface job, DependTarget target) { - String query = GET_WHAT_THIS_JOB_DEPENDS_ON; - Object[] values = new Object[] {job.getJobId()}; - if (!target.equals(DependTarget.ANY_TARGET)) { - query = query + " AND depend.str_target = ?"; - values = new Object[] {job.getJobId(), target.toString()}; + + private static final String GET_LIGHTWEIGHT_DEPEND = "SELECT * FROM depend WHERE pk_depend=?"; + + @Override + public LightweightDependency getDepend(String id) { + return getJdbcTemplate().queryForObject(GET_LIGHTWEIGHT_DEPEND, DEPEND_MAPPER, id); } - return getJdbcTemplate().query(query, DEPEND_MAPPER, values); - - } - - private static final String GET_WHAT_THIS_LAYER_DEPENDS_ON = "SELECT " + "depend.pk_depend," - + "depend.str_type," + "depend.str_target," + "depend.b_any," + "depend.pk_parent," - + "depend.b_active," + "depend.pk_frame_depend_er," + "depend.pk_frame_depend_on," - + "depend.pk_layer_depend_er," + "depend.pk_layer_depend_on," + "depend.pk_job_depend_er," - + "depend.pk_job_depend_on " + "FROM " + "depend " + "WHERE " - + "depend.pk_layer_depend_er = ? " + "AND " + "depend.b_active = true " + "AND " - + "depend.pk_parent IS NULL " + "AND " + "depend.str_type IN (?,?,?,?) "; - - @Override - public List getWhatThisDependsOn(LayerInterface layer, - DependTarget target) { - if (!target.equals(DependTarget.ANY_TARGET)) { - String query = GET_WHAT_THIS_LAYER_DEPENDS_ON + " AND str_target = ?"; - return getJdbcTemplate().query(query, DEPEND_MAPPER, layer.getLayerId(), - DependType.LAYER_ON_JOB.toString(), DependType.LAYER_ON_LAYER.toString(), - DependType.LAYER_ON_FRAME.toString(), DependType.FRAME_BY_FRAME.toString(), - target.toString()); - } else { - return getJdbcTemplate().query(GET_WHAT_THIS_LAYER_DEPENDS_ON, DEPEND_MAPPER, - layer.getLayerId(), DependType.LAYER_ON_JOB.toString(), - DependType.LAYER_ON_LAYER.toString(), DependType.LAYER_ON_FRAME.toString(), - DependType.FRAME_BY_FRAME.toString()); + + private static final String GET_LIGHTWEIGHT_DEPEND_BY_SIGNATURE = + "SELECT * FROM depend WHERE str_signature=?"; + + @Override + public LightweightDependency getDependBySignature(String s) { + return getJdbcTemplate().queryForObject(GET_LIGHTWEIGHT_DEPEND_BY_SIGNATURE, DEPEND_MAPPER, + s); } - } - - private static final String GET_WHAT_THIS_FRAME_DEPENDS_ON = - "SELECT " + "depend.pk_depend," + "depend.str_type," + "depend.str_target," + "depend.b_any," - + "depend.pk_parent," + "depend.b_active," + "depend.pk_frame_depend_er," - + "depend.pk_frame_depend_on," + "depend.pk_layer_depend_er," - + "depend.pk_layer_depend_on," + "depend.pk_job_depend_er," + "depend.pk_job_depend_on " - + "FROM " + "depend " + "WHERE " + "depend.pk_frame_depend_er = ? " + "AND " - + "depend.b_active = true " + "AND " + "depend.str_type IN (?,?,?) "; - - @Override - public List getWhatThisDependsOn(FrameInterface frame, - DependTarget target) { - if (!target.equals(DependTarget.ANY_TARGET)) { - String query = GET_WHAT_THIS_FRAME_DEPENDS_ON + " AND depend.str_target = ?"; - return getJdbcTemplate().query(query, DEPEND_MAPPER, frame.getFrameId(), - DependType.FRAME_ON_JOB.toString(), DependType.FRAME_ON_LAYER.toString(), - DependType.FRAME_ON_FRAME.toString(), target.toString()); - } else { - return getJdbcTemplate().query(GET_WHAT_THIS_FRAME_DEPENDS_ON, DEPEND_MAPPER, - frame.getFrameId(), DependType.FRAME_ON_JOB.toString(), - DependType.FRAME_ON_LAYER.toString(), DependType.FRAME_ON_FRAME.toString()); + + private static final String GET_WHAT_DEPENDS_ON_JOB = "SELECT " + "depend.pk_depend," + + "depend.str_type," + "depend.str_target," + "depend.b_any," + "depend.pk_parent," + + "depend.b_active," + "depend.pk_frame_depend_er," + "depend.pk_frame_depend_on," + + "depend.pk_layer_depend_er," + "depend.pk_layer_depend_on," + + "depend.pk_job_depend_er," + "depend.pk_job_depend_on " + "FROM " + "depend " + + "WHERE " + "pk_job_depend_on=? " + "AND " + "b_active = true " + "AND " + + "str_type IN (?,?,?)"; + + @Override + public List getWhatDependsOn(JobInterface job) { + return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_JOB, DEPEND_MAPPER, job.getJobId(), + DependType.JOB_ON_JOB.toString(), DependType.LAYER_ON_JOB.toString(), + DependType.FRAME_ON_JOB.toString()); + } + + private static final String GET_WHAT_DEPENDS_ON_JOB_WITH_TARGET = "SELECT " + + "depend.pk_depend," + "depend.str_type," + "depend.str_target," + "depend.b_any," + + "depend.pk_parent," + "depend.b_active," + "depend.pk_frame_depend_er," + + "depend.pk_frame_depend_on," + "depend.pk_layer_depend_er," + + "depend.pk_layer_depend_on," + "depend.pk_job_depend_er," + "depend.pk_job_depend_on " + + "FROM " + "depend " + "WHERE " + "pk_job_depend_on=? " + "AND " + "b_active = true " + + "AND " + "str_target = ? " + "AND " + "str_type IN (?,?,?)"; + + @Override + public List getWhatDependsOn(JobInterface job, DependTarget target) { + if (target.equals(DependTarget.ANY_TARGET)) { + return getWhatDependsOn(job); + } else { + return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_JOB_WITH_TARGET, DEPEND_MAPPER, + job.getJobId(), target.toString(), DependType.JOB_ON_JOB.toString(), + DependType.LAYER_ON_JOB.toString(), DependType.FRAME_ON_JOB.toString()); + } + } + + private static final String GET_WHAT_DEPENDS_ON_LAYER = "SELECT " + "depend.pk_depend," + + "depend.str_type," + "depend.str_target," + "depend.b_any," + "depend.pk_parent," + + "depend.b_active," + "depend.pk_frame_depend_er," + "depend.pk_frame_depend_on," + + "depend.pk_layer_depend_er," + "depend.pk_layer_depend_on," + + "depend.pk_job_depend_er," + "depend.pk_job_depend_on " + "FROM " + "depend " + + "WHERE " + "pk_job_depend_on=? " + "AND " + "pk_layer_depend_on=? " + "AND " + + "str_type IN (?,?,?) " + "AND " + "b_active = ?"; + + @Override + public List getWhatDependsOn(LayerInterface layer) { + return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_LAYER, DEPEND_MAPPER, layer.getJobId(), + layer.getLayerId(), DependType.JOB_ON_LAYER.toString(), + DependType.LAYER_ON_LAYER.toString(), DependType.FRAME_ON_LAYER.toString(), true); + } + + @Override + public List getWhatDependsOn(LayerInterface layer, boolean active) { + return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_LAYER, DEPEND_MAPPER, layer.getJobId(), + layer.getLayerId(), DependType.JOB_ON_LAYER.toString(), + DependType.LAYER_ON_LAYER.toString(), DependType.FRAME_ON_LAYER.toString(), active); + } + + private static final String GET_WHAT_DEPENDS_ON_FRAME = "SELECT " + "depend.pk_depend," + + "depend.str_type," + "depend.str_target," + "depend.b_any," + "depend.pk_parent," + + "depend.b_active," + "depend.pk_frame_depend_er," + "depend.pk_frame_depend_on," + + "depend.pk_layer_depend_er," + "depend.pk_layer_depend_on," + + "depend.pk_job_depend_er," + "depend.pk_job_depend_on " + "FROM " + "depend " + + "WHERE " + "b_active = ? " + "AND " + "pk_job_depend_on = ? " + "AND " + + "(pk_frame_depend_on = ? " + "AND " + "str_type IN (?,?,?)) " + "OR " + + "(pk_layer_depend_on = ? AND str_type = ? AND b_any = true)"; + + @Override + public List getWhatDependsOn(FrameInterface frame) { + return getWhatDependsOn(frame, true); + } + + @Override + public List getWhatDependsOn(FrameInterface frame, boolean active) { + return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_FRAME, DEPEND_MAPPER, active, + frame.getJobId(), frame.getFrameId(), DependType.FRAME_ON_FRAME.toString(), + DependType.LAYER_ON_FRAME.toString(), DependType.JOB_ON_FRAME.toString(), + frame.getLayerId(), DependType.LAYER_ON_LAYER.toString()); + } + + private static final String SET_INACTIVE = "UPDATE " + "depend " + "SET " + "b_active=false," + + "ts_satisfied=current_timestamp," + "str_signature=pk_depend " + "WHERE " + + "pk_depend = ? " + "AND " + "b_active = true " + "AND " + "b_composite = false"; + + @Override + public boolean setInactive(LightweightDependency depend) { + depend.active = getJdbcTemplate().update(SET_INACTIVE, depend.getId()) == 1; + return depend.active; + } + + private static final String SET_ACTIVE = "UPDATE " + "depend " + "SET " + "b_active=true " + + "WHERE " + "pk_depend=? " + "AND " + "b_active=false"; + + @Override + public boolean setActive(LightweightDependency depend) { + if (!depend.type.equals(DependType.FRAME_ON_FRAME) + && !depend.type.equals(DependType.LAYER_ON_LAYER)) { + return false; + } + depend.active = getJdbcTemplate().update(SET_ACTIVE, depend.getId()) == 1; + return depend.active; + } + + private static final String GET_CHILD_DEPENDS = "SELECT " + "depend.pk_depend," + + "depend.str_type," + "depend.str_target," + "depend.b_any," + "depend.pk_parent," + + "depend.b_active," + "depend.pk_frame_depend_er," + "depend.pk_frame_depend_on," + + "depend.pk_layer_depend_er," + "depend.pk_layer_depend_on," + + "depend.pk_job_depend_er," + "depend.pk_job_depend_on " + "FROM " + "depend " + + "WHERE " + "depend.pk_job_depend_er = ? " + "AND " + "depend.pk_job_depend_on = ? " + + "AND " + "depend.pk_parent = ? " + "AND " + "depend.b_active = true "; + + @Override + public List getChildDepends(LightweightDependency depend) { + return getJdbcTemplate().query(GET_CHILD_DEPENDS, DEPEND_MAPPER, depend.dependErJobId, + depend.dependOnJobId, depend.id); + } + + private static final String GET_WHAT_THIS_JOB_DEPENDS_ON = "SELECT " + "depend.pk_depend," + + "depend.str_type," + "depend.str_target," + "depend.b_any," + "depend.pk_parent," + + "depend.b_active," + "depend.pk_frame_depend_er," + "depend.pk_frame_depend_on," + + "depend.pk_layer_depend_er," + "depend.pk_layer_depend_on," + + "depend.pk_job_depend_er," + "depend.pk_job_depend_on " + "FROM " + "depend " + + "WHERE " + "depend.pk_job_depend_er = ? " + "AND " + "depend.b_active = true " + + "AND " + "depend.pk_parent IS NULL "; + + @Override + public List getWhatThisDependsOn(JobInterface job, DependTarget target) { + String query = GET_WHAT_THIS_JOB_DEPENDS_ON; + Object[] values = new Object[] {job.getJobId()}; + if (!target.equals(DependTarget.ANY_TARGET)) { + query = query + " AND depend.str_target = ?"; + values = new Object[] {job.getJobId(), target.toString()}; + } + return getJdbcTemplate().query(query, DEPEND_MAPPER, values); + + } + + private static final String GET_WHAT_THIS_LAYER_DEPENDS_ON = "SELECT " + "depend.pk_depend," + + "depend.str_type," + "depend.str_target," + "depend.b_any," + "depend.pk_parent," + + "depend.b_active," + "depend.pk_frame_depend_er," + "depend.pk_frame_depend_on," + + "depend.pk_layer_depend_er," + "depend.pk_layer_depend_on," + + "depend.pk_job_depend_er," + "depend.pk_job_depend_on " + "FROM " + "depend " + + "WHERE " + "depend.pk_layer_depend_er = ? " + "AND " + "depend.b_active = true " + + "AND " + "depend.pk_parent IS NULL " + "AND " + "depend.str_type IN (?,?,?,?) "; + + @Override + public List getWhatThisDependsOn(LayerInterface layer, + DependTarget target) { + if (!target.equals(DependTarget.ANY_TARGET)) { + String query = GET_WHAT_THIS_LAYER_DEPENDS_ON + " AND str_target = ?"; + return getJdbcTemplate().query(query, DEPEND_MAPPER, layer.getLayerId(), + DependType.LAYER_ON_JOB.toString(), DependType.LAYER_ON_LAYER.toString(), + DependType.LAYER_ON_FRAME.toString(), DependType.FRAME_BY_FRAME.toString(), + target.toString()); + } else { + return getJdbcTemplate().query(GET_WHAT_THIS_LAYER_DEPENDS_ON, DEPEND_MAPPER, + layer.getLayerId(), DependType.LAYER_ON_JOB.toString(), + DependType.LAYER_ON_LAYER.toString(), DependType.LAYER_ON_FRAME.toString(), + DependType.FRAME_BY_FRAME.toString()); + } + } + + private static final String GET_WHAT_THIS_FRAME_DEPENDS_ON = "SELECT " + "depend.pk_depend," + + "depend.str_type," + "depend.str_target," + "depend.b_any," + "depend.pk_parent," + + "depend.b_active," + "depend.pk_frame_depend_er," + "depend.pk_frame_depend_on," + + "depend.pk_layer_depend_er," + "depend.pk_layer_depend_on," + + "depend.pk_job_depend_er," + "depend.pk_job_depend_on " + "FROM " + "depend " + + "WHERE " + "depend.pk_frame_depend_er = ? " + "AND " + "depend.b_active = true " + + "AND " + "depend.str_type IN (?,?,?) "; + + @Override + public List getWhatThisDependsOn(FrameInterface frame, + DependTarget target) { + if (!target.equals(DependTarget.ANY_TARGET)) { + String query = GET_WHAT_THIS_FRAME_DEPENDS_ON + " AND depend.str_target = ?"; + return getJdbcTemplate().query(query, DEPEND_MAPPER, frame.getFrameId(), + DependType.FRAME_ON_JOB.toString(), DependType.FRAME_ON_LAYER.toString(), + DependType.FRAME_ON_FRAME.toString(), target.toString()); + } else { + return getJdbcTemplate().query(GET_WHAT_THIS_FRAME_DEPENDS_ON, DEPEND_MAPPER, + frame.getFrameId(), DependType.FRAME_ON_JOB.toString(), + DependType.FRAME_ON_LAYER.toString(), DependType.FRAME_ON_FRAME.toString()); + } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatcherDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatcherDaoJdbc.java index 6962fb3e8..876880d82 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatcherDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatcherDaoJdbc.java @@ -57,512 +57,518 @@ * @category DAO */ public class DispatcherDaoJdbc extends JdbcDaoSupport implements DispatcherDao { - private static final Logger logger = LogManager.getLogger(DispatcherDaoJdbc.class); - private PrometheusMetricsCollector prometheusMetrics; + private static final Logger logger = LogManager.getLogger(DispatcherDaoJdbc.class); + private PrometheusMetricsCollector prometheusMetrics; - public void setPrometheusMetrics(PrometheusMetricsCollector prometheusMetrics) { - this.prometheusMetrics = prometheusMetrics; - } + public void setPrometheusMetrics(PrometheusMetricsCollector prometheusMetrics) { + this.prometheusMetrics = prometheusMetrics; + } + + public static final RowMapper PKJOB_MAPPER = new RowMapper() { + public String mapRow(ResultSet rs, int rowNum) throws SQLException { + return rs.getString("pk_job"); + } + }; + + private static final RowMapper SHOW_MAPPER = new RowMapper() { + public SortableShow mapRow(ResultSet rs, int rowNum) throws SQLException { + return new SortableShow(rs.getString("pk_show"), rs.getFloat("float_tier")); + } + }; - public static final RowMapper PKJOB_MAPPER = new RowMapper() { - public String mapRow(ResultSet rs, int rowNum) throws SQLException { - return rs.getString("pk_job"); + private int threadMode(int mode) { + if (mode == ThreadMode.ALL_VALUE) + return mode; + return ThreadMode.AUTO_VALUE; } - }; - private static final RowMapper SHOW_MAPPER = new RowMapper() { - public SortableShow mapRow(ResultSet rs, int rowNum) throws SQLException { - return new SortableShow(rs.getString("pk_show"), rs.getFloat("float_tier")); + /** + * Number of milliseconds before the show cache expires and a new show cache is created. + */ + private static final long SHOW_CACHE_EXPIRE_TIME_SEC = 8000; + + /** + * Wraps a list of SortableShows along with an expiration time. + */ + private class ShowCache { + final private long expireTime = System.currentTimeMillis() + SHOW_CACHE_EXPIRE_TIME_SEC; + final private List shows; + + public ShowCache(List shows) { + this.shows = shows; + Collections.sort(this.shows); + } + + public boolean isExpired() { + return System.currentTimeMillis() > expireTime; + } + + public List getShows() { + return shows; + } } - }; - - private int threadMode(int mode) { - if (mode == ThreadMode.ALL_VALUE) - return mode; - return ThreadMode.AUTO_VALUE; - } - - /** - * Number of milliseconds before the show cache expires and a new show cache is created. - */ - private static final long SHOW_CACHE_EXPIRE_TIME_SEC = 8000; - - /** - * Wraps a list of SortableShows along with an expiration time. - */ - private class ShowCache { - final private long expireTime = System.currentTimeMillis() + SHOW_CACHE_EXPIRE_TIME_SEC; - final private List shows; - - public ShowCache(List shows) { - this.shows = shows; - Collections.sort(this.shows); + + /** + * A cache of SortableShows keyed on host tags. + */ + private final ConcurrentHashMap bookableShows = + new ConcurrentHashMap(); + + public boolean testMode = false; + + /** + * Choose between different scheduling strategies + */ + private SchedulingMode schedulingMode; + + @Autowired + public DispatcherDaoJdbc(Environment env) { + this.schedulingMode = SchedulingMode.valueOf( + env.getProperty("dispatcher.scheduling_mode", String.class, "PRIORITY_ONLY")); } - public boolean isExpired() { - return System.currentTimeMillis() > expireTime; + @Override + public SchedulingMode getSchedulingMode() { + return schedulingMode; } - public List getShows() { - return shows; + @Override + public void setSchedulingMode(SchedulingMode schedulingMode) { + this.schedulingMode = schedulingMode; } - } - - /** - * A cache of SortableShows keyed on host tags. - */ - private final ConcurrentHashMap bookableShows = - new ConcurrentHashMap(); - - public boolean testMode = false; - - /** - * Choose between different scheduling strategies - */ - private SchedulingMode schedulingMode; - - @Autowired - public DispatcherDaoJdbc(Environment env) { - this.schedulingMode = SchedulingMode - .valueOf(env.getProperty("dispatcher.scheduling_mode", String.class, "PRIORITY_ONLY")); - } - - @Override - public SchedulingMode getSchedulingMode() { - return schedulingMode; - } - - @Override - public void setSchedulingMode(SchedulingMode schedulingMode) { - this.schedulingMode = schedulingMode; - } - - /** - * Returns a sorted list of shows that have pending jobs which could benefit from the specified - * allocation. - * - * @param alloc - * @return a sorted list of shows. - */ - private List getBookableShows(AllocationInterface alloc) { - long startTime = System.currentTimeMillis(); - String key = alloc.getAllocationId(); - - ShowCache cached = bookableShows.get(key); - if (cached == null) { - bookableShows.put(key, - new ShowCache(getJdbcTemplate().query(FIND_SHOWS, SHOW_MAPPER, alloc.getAllocationId()))); - } else if (cached.isExpired()) { - bookableShows.put(key, - new ShowCache(getJdbcTemplate().query(FIND_SHOWS, SHOW_MAPPER, alloc.getAllocationId()))); + + /** + * Returns a sorted list of shows that have pending jobs which could benefit from the specified + * allocation. + * + * @param alloc + * @return a sorted list of shows. + */ + private List getBookableShows(AllocationInterface alloc) { + long startTime = System.currentTimeMillis(); + String key = alloc.getAllocationId(); + + ShowCache cached = bookableShows.get(key); + if (cached == null) { + bookableShows.put(key, new ShowCache( + getJdbcTemplate().query(FIND_SHOWS, SHOW_MAPPER, alloc.getAllocationId()))); + } else if (cached.isExpired()) { + bookableShows.put(key, new ShowCache( + getJdbcTemplate().query(FIND_SHOWS, SHOW_MAPPER, alloc.getAllocationId()))); + } + prometheusMetrics.setBookingDurationMetric("getBookableShows", + System.currentTimeMillis() - startTime); + + return bookableShows.get(key).shows; } - prometheusMetrics.setBookingDurationMetric("getBookableShows", - System.currentTimeMillis() - startTime); - - return bookableShows.get(key).shows; - } - - private String handleInClause(String key, String query, int inValueLength) { - String placeholders = String.join(",", Collections.nCopies(inValueLength, "?")); - return query.replace(key + " IN ?", key + " IN (" + placeholders + ")"); - } - - private Set findDispatchJobs(DispatchHost host, int numJobs, boolean shuffleShows) { - LinkedHashSet result = new LinkedHashSet(); - List shows = new LinkedList(getBookableShows(host)); - // shows were sorted. If we want it in random sequence, we need to shuffle it. - if (shuffleShows) { - if (!shows.isEmpty()) - shows.remove(0); - Collections.shuffle(shows); + + private String handleInClause(String key, String query, int inValueLength) { + String placeholders = String.join(",", Collections.nCopies(inValueLength, "?")); + return query.replace(key + " IN ?", key + " IN (" + placeholders + ")"); } - long loopTime = System.currentTimeMillis(); - for (SortableShow s : shows) { - long lastTime = System.currentTimeMillis(); - if (s.isSkipped(host.tags, (long) host.cores, host.memory)) { - logger.info("skipping show " + s.getShowId()); - continue; - } - - if (s.isSkipped(host)) { - logger.info("skipping show " + s.getShowId() + ", over its subscription."); - continue; - } - - /** - * Check if the show is over its subscription because we're using cached SortableShows, we - * don't pull a fresh list of shows for a while. If the show is over its subscription the - * alloc gets add to the SortableShow skipped alloc set. - */ - if (getJdbcTemplate().queryForObject( - "SELECT int_burst - int_cores FROM subscription WHERE pk_show=? AND pk_alloc=?", - Integer.class, s.getShowId(), host.getAllocationId()) < 100) { - s.skip(host); - - prometheusMetrics.setBookingDurationMetric("findDispatchJobs check overburst", - System.currentTimeMillis() - lastTime); - continue; - } - - if (host.idleGpus == 0 && (schedulingMode == SchedulingMode.BALANCED)) { - result.addAll(getJdbcTemplate().query(new PreparedStatementCreator() { - @Override - public PreparedStatement createPreparedStatement(Connection conn) throws SQLException { - String query = handleInClause("str_os", FIND_JOBS_BY_SHOW_NO_GPU, host.getOs().length); - PreparedStatement find_jobs_stmt = conn.prepareStatement(query); + private Set findDispatchJobs(DispatchHost host, int numJobs, boolean shuffleShows) { + LinkedHashSet result = new LinkedHashSet(); + List shows = new LinkedList(getBookableShows(host)); + // shows were sorted. If we want it in random sequence, we need to shuffle it. + if (shuffleShows) { + if (!shows.isEmpty()) + shows.remove(0); + Collections.shuffle(shows); + } - int index = 1; - find_jobs_stmt.setString(index++, s.getShowId()); - find_jobs_stmt.setString(index++, host.getFacilityId()); - for (String item : host.getOs()) { - find_jobs_stmt.setString(index++, item); + long loopTime = System.currentTimeMillis(); + for (SortableShow s : shows) { + long lastTime = System.currentTimeMillis(); + if (s.isSkipped(host.tags, (long) host.cores, host.memory)) { + logger.info("skipping show " + s.getShowId()); + continue; } - find_jobs_stmt.setInt(index++, host.idleCores); - find_jobs_stmt.setLong(index++, host.idleMemory); - find_jobs_stmt.setInt(index++, threadMode(host.threadMode)); - find_jobs_stmt.setString(index++, host.getName()); - find_jobs_stmt.setInt(index++, numJobs * 10); - return find_jobs_stmt; - } - }, PKJOB_MAPPER)); - prometheusMetrics.setBookingDurationMetric("findDispatchJobs nogpu findByShowQuery", - System.currentTimeMillis() - lastTime); - } else { - result.addAll(getJdbcTemplate().query(new PreparedStatementCreator() { - @Override - public PreparedStatement createPreparedStatement(Connection conn) throws SQLException { - String query = handleInClause("str_os", findByShowQuery(), host.getOs().length); - PreparedStatement find_jobs_stmt = conn.prepareStatement(query); - int index = 1; - find_jobs_stmt.setString(index++, s.getShowId()); - find_jobs_stmt.setString(index++, host.getFacilityId()); - for (String item : host.getOs()) { - find_jobs_stmt.setString(index++, item); + + if (s.isSkipped(host)) { + logger.info("skipping show " + s.getShowId() + ", over its subscription."); + continue; + } + + /** + * Check if the show is over its subscription because we're using cached SortableShows, + * we don't pull a fresh list of shows for a while. If the show is over its subscription + * the alloc gets add to the SortableShow skipped alloc set. + */ + if (getJdbcTemplate().queryForObject( + "SELECT int_burst - int_cores FROM subscription WHERE pk_show=? AND pk_alloc=?", + Integer.class, s.getShowId(), host.getAllocationId()) < 100) { + s.skip(host); + + prometheusMetrics.setBookingDurationMetric("findDispatchJobs check overburst", + System.currentTimeMillis() - lastTime); + continue; + } + + if (host.idleGpus == 0 && (schedulingMode == SchedulingMode.BALANCED)) { + result.addAll(getJdbcTemplate().query(new PreparedStatementCreator() { + @Override + public PreparedStatement createPreparedStatement(Connection conn) + throws SQLException { + String query = handleInClause("str_os", FIND_JOBS_BY_SHOW_NO_GPU, + host.getOs().length); + PreparedStatement find_jobs_stmt = conn.prepareStatement(query); + + int index = 1; + find_jobs_stmt.setString(index++, s.getShowId()); + find_jobs_stmt.setString(index++, host.getFacilityId()); + for (String item : host.getOs()) { + find_jobs_stmt.setString(index++, item); + } + find_jobs_stmt.setInt(index++, host.idleCores); + find_jobs_stmt.setLong(index++, host.idleMemory); + find_jobs_stmt.setInt(index++, threadMode(host.threadMode)); + find_jobs_stmt.setString(index++, host.getName()); + find_jobs_stmt.setInt(index++, numJobs * 10); + return find_jobs_stmt; + } + }, PKJOB_MAPPER)); + prometheusMetrics.setBookingDurationMetric("findDispatchJobs nogpu findByShowQuery", + System.currentTimeMillis() - lastTime); + } else { + result.addAll(getJdbcTemplate().query(new PreparedStatementCreator() { + @Override + public PreparedStatement createPreparedStatement(Connection conn) + throws SQLException { + String query = + handleInClause("str_os", findByShowQuery(), host.getOs().length); + PreparedStatement find_jobs_stmt = conn.prepareStatement(query); + int index = 1; + find_jobs_stmt.setString(index++, s.getShowId()); + find_jobs_stmt.setString(index++, host.getFacilityId()); + for (String item : host.getOs()) { + find_jobs_stmt.setString(index++, item); + } + find_jobs_stmt.setInt(index++, host.idleCores); + find_jobs_stmt.setLong(index++, host.idleMemory); + find_jobs_stmt.setInt(index++, threadMode(host.threadMode)); + find_jobs_stmt.setInt(index++, host.idleGpus); + find_jobs_stmt.setLong(index++, (host.idleGpuMemory > 0) ? 1 : 0); + find_jobs_stmt.setLong(index++, host.idleGpuMemory); + find_jobs_stmt.setString(index++, host.getName()); + find_jobs_stmt.setInt(index++, numJobs * 10); + return find_jobs_stmt; + } + }, PKJOB_MAPPER)); + prometheusMetrics.setBookingDurationMetric("findDispatchJobs findByShowQuery", + System.currentTimeMillis() - lastTime); + } + + // Collect metrics + prometheusMetrics.incrementFindJobsByShowQueryCountMetric(); + if (result.size() < 1) { + if (host.gpuMemory == 0) { + s.skip(host.tags, host.idleCores, host.idleMemory); + } + } else { + return result; } - find_jobs_stmt.setInt(index++, host.idleCores); - find_jobs_stmt.setLong(index++, host.idleMemory); - find_jobs_stmt.setInt(index++, threadMode(host.threadMode)); - find_jobs_stmt.setInt(index++, host.idleGpus); - find_jobs_stmt.setLong(index++, (host.idleGpuMemory > 0) ? 1 : 0); - find_jobs_stmt.setLong(index++, host.idleGpuMemory); - find_jobs_stmt.setString(index++, host.getName()); - find_jobs_stmt.setInt(index++, numJobs * 10); - return find_jobs_stmt; - } - }, PKJOB_MAPPER)); - prometheusMetrics.setBookingDurationMetric("findDispatchJobs findByShowQuery", - System.currentTimeMillis() - lastTime); - } - - // Collect metrics - prometheusMetrics.incrementFindJobsByShowQueryCountMetric(); - if (result.size() < 1) { - if (host.gpuMemory == 0) { - s.skip(host.tags, host.idleCores, host.idleMemory); } - } else { + prometheusMetrics.setBookingDurationMetric("findDispatchJobs show loop", + System.currentTimeMillis() - loopTime); return result; - } + } - prometheusMetrics.setBookingDurationMetric("findDispatchJobs show loop", - System.currentTimeMillis() - loopTime); - return result; - - } - - private String findByShowQuery() { - switch (schedulingMode) { - case PRIORITY_ONLY: - return FIND_JOBS_BY_SHOW_PRIORITY_MODE; - case FIFO: - return FIND_JOBS_BY_SHOW_FIFO_MODE; - case BALANCED: - return FIND_JOBS_BY_SHOW; - default: - return FIND_JOBS_BY_SHOW_PRIORITY_MODE; + + private String findByShowQuery() { + switch (schedulingMode) { + case PRIORITY_ONLY: + return FIND_JOBS_BY_SHOW_PRIORITY_MODE; + case FIFO: + return FIND_JOBS_BY_SHOW_FIFO_MODE; + case BALANCED: + return FIND_JOBS_BY_SHOW; + default: + return FIND_JOBS_BY_SHOW_PRIORITY_MODE; + } } - } - - private String findByGroupQuery() { - switch (schedulingMode) { - case PRIORITY_ONLY: - return FIND_JOBS_BY_GROUP_PRIORITY_MODE; - case FIFO: - return FIND_JOBS_BY_GROUP_FIFO_MODE; - case BALANCED: - return FIND_JOBS_BY_GROUP_BALANCED_MODE; - default: - return FIND_JOBS_BY_GROUP_PRIORITY_MODE; + + private String findByGroupQuery() { + switch (schedulingMode) { + case PRIORITY_ONLY: + return FIND_JOBS_BY_GROUP_PRIORITY_MODE; + case FIFO: + return FIND_JOBS_BY_GROUP_FIFO_MODE; + case BALANCED: + return FIND_JOBS_BY_GROUP_BALANCED_MODE; + default: + return FIND_JOBS_BY_GROUP_PRIORITY_MODE; + } } - } - - @Override - public Set findDispatchJobsForAllShows(DispatchHost host, int numJobs) { - return findDispatchJobs(host, numJobs, true); - } - - @Override - public Set findDispatchJobs(DispatchHost host, int numJobs) { - return findDispatchJobs(host, numJobs, false); - } - - @Override - public Set findDispatchJobs(DispatchHost host, GroupInterface g) { - LinkedHashSet result = new LinkedHashSet(5); - long lastTime = System.currentTimeMillis(); - - if (host.idleGpus == 0 && (schedulingMode == SchedulingMode.BALANCED)) { - String query = handleInClause("str_os", FIND_JOBS_BY_GROUP_NO_GPU, host.getOs().length); - ArrayList args = new ArrayList(); - - args.add(g.getGroupId()); - args.add(host.getFacilityId()); - for (String item : host.getOs()) { - args.add(item); - } - args.add(host.idleCores); - args.add(host.idleMemory); - args.add(threadMode(host.threadMode)); - args.add(host.getName()); - args.add(50); - result.addAll(getJdbcTemplate().query(query, PKJOB_MAPPER, args.toArray())); - prometheusMetrics.setBookingDurationMetric("findDispatchJobs by group nogpu query", - System.currentTimeMillis() - lastTime); - } else { - String query = handleInClause("str_os", findByGroupQuery(), host.getOs().length); - ArrayList args = new ArrayList(); - - args.add(g.getGroupId()); - args.add(host.getFacilityId()); - for (String item : host.getOs()) { - args.add(item); - } - args.add(host.idleCores); - args.add(host.idleMemory); - args.add(threadMode(host.threadMode)); - args.add(host.idleGpus); - args.add(host.idleGpuMemory > 0 ? 1 : 0); - args.add(host.idleGpuMemory); - args.add(host.getName()); - args.add(50); - result.addAll(getJdbcTemplate().query(query, PKJOB_MAPPER, args.toArray())); - prometheusMetrics.setBookingDurationMetric("findDispatchJobs by group query", - System.currentTimeMillis() - lastTime); + + @Override + public Set findDispatchJobsForAllShows(DispatchHost host, int numJobs) { + return findDispatchJobs(host, numJobs, true); } - return result; - } - - @Override - public List findNextDispatchFrames(JobInterface job, VirtualProc proc, int limit) { - long lastTime = System.currentTimeMillis(); - List frames; - if (proc.isLocalDispatch) { - frames = getJdbcTemplate().query(FIND_LOCAL_DISPATCH_FRAME_BY_JOB_AND_PROC, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, proc.memoryReserved, proc.gpuMemoryReserved, - job.getJobId(), limit); - } else { - frames = getJdbcTemplate().query(FIND_DISPATCH_FRAME_BY_JOB_AND_PROC, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, proc.coresReserved, proc.memoryReserved, - proc.gpusReserved, (proc.gpuMemoryReserved > 0) ? 1 : 0, proc.gpuMemoryReserved, - job.getJobId(), proc.hostName, job.getJobId(), limit); + + @Override + public Set findDispatchJobs(DispatchHost host, int numJobs) { + return findDispatchJobs(host, numJobs, false); } - prometheusMetrics.setBookingDurationMetric("findNextDispatchFrames by job and proc query", - System.currentTimeMillis() - lastTime); + @Override + public Set findDispatchJobs(DispatchHost host, GroupInterface g) { + LinkedHashSet result = new LinkedHashSet(5); + long lastTime = System.currentTimeMillis(); - return frames; - } + if (host.idleGpus == 0 && (schedulingMode == SchedulingMode.BALANCED)) { + String query = handleInClause("str_os", FIND_JOBS_BY_GROUP_NO_GPU, host.getOs().length); + ArrayList args = new ArrayList(); - @Override - public List findNextDispatchFrames(JobInterface job, DispatchHost host, - int limit) { - long lastTime = System.currentTimeMillis(); - List frames; + args.add(g.getGroupId()); + args.add(host.getFacilityId()); + for (String item : host.getOs()) { + args.add(item); + } + args.add(host.idleCores); + args.add(host.idleMemory); + args.add(threadMode(host.threadMode)); + args.add(host.getName()); + args.add(50); + result.addAll(getJdbcTemplate().query(query, PKJOB_MAPPER, args.toArray())); + prometheusMetrics.setBookingDurationMetric("findDispatchJobs by group nogpu query", + System.currentTimeMillis() - lastTime); + } else { + String query = handleInClause("str_os", findByGroupQuery(), host.getOs().length); + ArrayList args = new ArrayList(); + + args.add(g.getGroupId()); + args.add(host.getFacilityId()); + for (String item : host.getOs()) { + args.add(item); + } + args.add(host.idleCores); + args.add(host.idleMemory); + args.add(threadMode(host.threadMode)); + args.add(host.idleGpus); + args.add(host.idleGpuMemory > 0 ? 1 : 0); + args.add(host.idleGpuMemory); + args.add(host.getName()); + args.add(50); + result.addAll(getJdbcTemplate().query(query, PKJOB_MAPPER, args.toArray())); + prometheusMetrics.setBookingDurationMetric("findDispatchJobs by group query", + System.currentTimeMillis() - lastTime); + } + return result; + } + + @Override + public List findNextDispatchFrames(JobInterface job, VirtualProc proc, + int limit) { + long lastTime = System.currentTimeMillis(); + List frames; + if (proc.isLocalDispatch) { + frames = getJdbcTemplate().query(FIND_LOCAL_DISPATCH_FRAME_BY_JOB_AND_PROC, + FrameDaoJdbc.DISPATCH_FRAME_MAPPER, proc.memoryReserved, proc.gpuMemoryReserved, + job.getJobId(), limit); + } else { + frames = getJdbcTemplate().query(FIND_DISPATCH_FRAME_BY_JOB_AND_PROC, + FrameDaoJdbc.DISPATCH_FRAME_MAPPER, proc.coresReserved, proc.memoryReserved, + proc.gpusReserved, (proc.gpuMemoryReserved > 0) ? 1 : 0, proc.gpuMemoryReserved, + job.getJobId(), proc.hostName, job.getJobId(), limit); + } - if (host.isLocalDispatch) { - frames = getJdbcTemplate().query(FIND_LOCAL_DISPATCH_FRAME_BY_JOB_AND_HOST, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, host.idleMemory, host.idleGpuMemory, job.getJobId(), - limit); + prometheusMetrics.setBookingDurationMetric("findNextDispatchFrames by job and proc query", + System.currentTimeMillis() - lastTime); - } else { - frames = getJdbcTemplate().query(FIND_DISPATCH_FRAME_BY_JOB_AND_HOST, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, host.idleCores, host.idleMemory, - threadMode(host.threadMode), host.idleGpus, (host.idleGpuMemory > 0) ? 1 : 0, - host.idleGpuMemory, job.getJobId(), host.getName(), job.getJobId(), limit); + return frames; } - prometheusMetrics.setBookingDurationMetric("findNextDispatchFrames by job and host query", - System.currentTimeMillis() - lastTime); - - return frames; - } - - @Override - public List findNextDispatchFrames(LayerInterface layer, VirtualProc proc, - int limit) { - long lastTime = System.currentTimeMillis(); - List frames; - - if (proc.isLocalDispatch) { - frames = getJdbcTemplate().query(FIND_LOCAL_DISPATCH_FRAME_BY_LAYER_AND_PROC, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, proc.memoryReserved, proc.gpuMemoryReserved, - layer.getLayerId(), limit); - } else { - frames = getJdbcTemplate().query(FIND_DISPATCH_FRAME_BY_LAYER_AND_PROC, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, proc.coresReserved, proc.memoryReserved, - proc.gpusReserved, proc.gpuMemoryReserved, layer.getLayerId(), layer.getLayerId(), - proc.hostName, limit); + + @Override + public List findNextDispatchFrames(JobInterface job, DispatchHost host, + int limit) { + long lastTime = System.currentTimeMillis(); + List frames; + + if (host.isLocalDispatch) { + frames = getJdbcTemplate().query(FIND_LOCAL_DISPATCH_FRAME_BY_JOB_AND_HOST, + FrameDaoJdbc.DISPATCH_FRAME_MAPPER, host.idleMemory, host.idleGpuMemory, + job.getJobId(), limit); + + } else { + frames = getJdbcTemplate().query(FIND_DISPATCH_FRAME_BY_JOB_AND_HOST, + FrameDaoJdbc.DISPATCH_FRAME_MAPPER, host.idleCores, host.idleMemory, + threadMode(host.threadMode), host.idleGpus, (host.idleGpuMemory > 0) ? 1 : 0, + host.idleGpuMemory, job.getJobId(), host.getName(), job.getJobId(), limit); + } + prometheusMetrics.setBookingDurationMetric("findNextDispatchFrames by job and host query", + System.currentTimeMillis() - lastTime); + + return frames; } - prometheusMetrics.setBookingDurationMetric("findNextDispatchFrames by layer and proc query", - System.currentTimeMillis() - lastTime); + @Override + public List findNextDispatchFrames(LayerInterface layer, VirtualProc proc, + int limit) { + long lastTime = System.currentTimeMillis(); + List frames; + + if (proc.isLocalDispatch) { + frames = getJdbcTemplate().query(FIND_LOCAL_DISPATCH_FRAME_BY_LAYER_AND_PROC, + FrameDaoJdbc.DISPATCH_FRAME_MAPPER, proc.memoryReserved, proc.gpuMemoryReserved, + layer.getLayerId(), limit); + } else { + frames = getJdbcTemplate().query(FIND_DISPATCH_FRAME_BY_LAYER_AND_PROC, + FrameDaoJdbc.DISPATCH_FRAME_MAPPER, proc.coresReserved, proc.memoryReserved, + proc.gpusReserved, proc.gpuMemoryReserved, layer.getLayerId(), + layer.getLayerId(), proc.hostName, limit); + } - return frames; - } + prometheusMetrics.setBookingDurationMetric("findNextDispatchFrames by layer and proc query", + System.currentTimeMillis() - lastTime); - @Override - public List findNextDispatchFrames(LayerInterface layer, DispatchHost host, - int limit) { - long lastTime = System.currentTimeMillis(); - List frames; + return frames; + } + + @Override + public List findNextDispatchFrames(LayerInterface layer, DispatchHost host, + int limit) { + long lastTime = System.currentTimeMillis(); + List frames; + + if (host.isLocalDispatch) { + frames = getJdbcTemplate().query(FIND_LOCAL_DISPATCH_FRAME_BY_LAYER_AND_HOST, + FrameDaoJdbc.DISPATCH_FRAME_MAPPER, host.idleMemory, host.idleGpuMemory, + layer.getLayerId(), limit); + + } else { + frames = getJdbcTemplate().query(FIND_DISPATCH_FRAME_BY_LAYER_AND_HOST, + FrameDaoJdbc.DISPATCH_FRAME_MAPPER, host.idleCores, host.idleMemory, + threadMode(host.threadMode), host.idleGpus, host.idleGpuMemory, + layer.getLayerId(), layer.getLayerId(), host.getName(), limit); + } - if (host.isLocalDispatch) { - frames = getJdbcTemplate().query(FIND_LOCAL_DISPATCH_FRAME_BY_LAYER_AND_HOST, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, host.idleMemory, host.idleGpuMemory, - layer.getLayerId(), limit); + prometheusMetrics.setBookingDurationMetric("findNextDispatchFrames by layer and host query", + System.currentTimeMillis() - lastTime); - } else { - frames = getJdbcTemplate().query(FIND_DISPATCH_FRAME_BY_LAYER_AND_HOST, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, host.idleCores, host.idleMemory, - threadMode(host.threadMode), host.idleGpus, host.idleGpuMemory, layer.getLayerId(), - layer.getLayerId(), host.getName(), limit); + return frames; } - prometheusMetrics.setBookingDurationMetric("findNextDispatchFrames by layer and host query", - System.currentTimeMillis() - lastTime); - - return frames; - } - - @Override - public DispatchFrame findNextDispatchFrame(JobInterface job, VirtualProc proc) { - return findNextDispatchFrames(job, proc, 1).get(0); - } - - @Override - public DispatchFrame findNextDispatchFrame(JobInterface job, DispatchHost host) { - return findNextDispatchFrames(job, host, 1).get(0); - } - - @Override - public boolean findUnderProcedJob(JobInterface excludeJob, VirtualProc proc) { - long start = System.currentTimeMillis(); - try { - return getJdbcTemplate().queryForObject(FIND_UNDER_PROCED_JOB_BY_FACILITY, Integer.class, - excludeJob.getShowId(), proc.getFacilityId(), proc.os, excludeJob.getShowId(), - proc.getFacilityId(), proc.os, proc.coresReserved, proc.memoryReserved, proc.gpusReserved, - proc.gpuMemoryReserved, proc.hostName) > 0; - } catch (org.springframework.dao.EmptyResultDataAccessException e) { - return false; - } finally { - prometheusMetrics.setBookingDurationMetric("findUnderProcedJob query", - System.currentTimeMillis() - start); + @Override + public DispatchFrame findNextDispatchFrame(JobInterface job, VirtualProc proc) { + return findNextDispatchFrames(job, proc, 1).get(0); } - } - - @Override - public boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc) { - long start = System.currentTimeMillis(); - try { - return getJdbcTemplate().queryForObject(HIGHER_PRIORITY_JOB_BY_FACILITY_EXISTS, Boolean.class, - baseJob.priority, proc.getFacilityId(), proc.os, proc.getFacilityId(), proc.os, - proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, - proc.hostName); - } catch (org.springframework.dao.EmptyResultDataAccessException e) { - return false; - } finally { - prometheusMetrics.setBookingDurationMetric("higherPriorityJobExists query", - System.currentTimeMillis() - start); + + @Override + public DispatchFrame findNextDispatchFrame(JobInterface job, DispatchHost host) { + return findNextDispatchFrames(job, host, 1).get(0); } - } - - @Override - public Set findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs) { - LinkedHashSet result = new LinkedHashSet(numJobs); - long start = System.currentTimeMillis(); - if (host.idleGpus == 0 && (schedulingMode == SchedulingMode.BALANCED)) { - String query = handleInClause("str_os", FIND_JOBS_BY_SHOW_NO_GPU, host.getOs().length); - ArrayList args = new ArrayList(); - args.add(show.getShowId()); - args.add(host.getFacilityId()); - for (String item : host.getOs()) { - args.add(item); - } - args.add(host.idleCores); - args.add(host.idleMemory); - args.add(threadMode(host.threadMode)); - args.add(host.getName()); - args.add(numJobs * 10); - - result.addAll(getJdbcTemplate().query(query, PKJOB_MAPPER, args.toArray())); - - prometheusMetrics.setBookingDurationMetric("findDispatchJobs by show nogpu query", - System.currentTimeMillis() - start); - } else { - String query = handleInClause("str_os", findByShowQuery(), host.getOs().length); - ArrayList args = new ArrayList(); - args.add(show.getShowId()); - args.add(host.getFacilityId()); - for (String item : host.getOs()) { - args.add(item); - } - args.add(host.idleCores); - args.add(host.idleMemory); - args.add(threadMode(host.threadMode)); - args.add(host.idleGpus); - args.add(host.idleGpuMemory > 0 ? 1 : 0); - args.add(host.idleGpuMemory); - args.add(host.getName()); - args.add(numJobs * 10); - - result.addAll(getJdbcTemplate().query(query, PKJOB_MAPPER, args.toArray())); - - prometheusMetrics.setBookingDurationMetric("findDispatchJobs by show query", - System.currentTimeMillis() - start); + + @Override + public boolean findUnderProcedJob(JobInterface excludeJob, VirtualProc proc) { + long start = System.currentTimeMillis(); + try { + return getJdbcTemplate().queryForObject(FIND_UNDER_PROCED_JOB_BY_FACILITY, + Integer.class, excludeJob.getShowId(), proc.getFacilityId(), proc.os, + excludeJob.getShowId(), proc.getFacilityId(), proc.os, proc.coresReserved, + proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, + proc.hostName) > 0; + } catch (org.springframework.dao.EmptyResultDataAccessException e) { + return false; + } finally { + prometheusMetrics.setBookingDurationMetric("findUnderProcedJob query", + System.currentTimeMillis() - start); + } } - // Collect metrics - prometheusMetrics.incrementFindJobsByShowQueryCountMetric(); - return result; - } - - @Override - public Set findLocalDispatchJobs(DispatchHost host) { - LinkedHashSet result = new LinkedHashSet(5); - long start = System.currentTimeMillis(); - - String query = handleInClause("str_os", FIND_JOBS_BY_LOCAL, host.getOs().length); - ArrayList args = new ArrayList(); - args.add(host.getHostId()); - args.add(host.getFacilityId()); - for (String item : host.getOs()) { - args.add(item); + @Override + public boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc) { + long start = System.currentTimeMillis(); + try { + return getJdbcTemplate().queryForObject(HIGHER_PRIORITY_JOB_BY_FACILITY_EXISTS, + Boolean.class, baseJob.priority, proc.getFacilityId(), proc.os, + proc.getFacilityId(), proc.os, proc.coresReserved, proc.memoryReserved, + proc.gpusReserved, proc.gpuMemoryReserved, proc.hostName); + } catch (org.springframework.dao.EmptyResultDataAccessException e) { + return false; + } finally { + prometheusMetrics.setBookingDurationMetric("higherPriorityJobExists query", + System.currentTimeMillis() - start); + } } - args.add(host.getHostId()); - args.add(host.getFacilityId()); - for (String item : host.getOs()) { - args.add(item); + + @Override + public Set findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs) { + LinkedHashSet result = new LinkedHashSet(numJobs); + long start = System.currentTimeMillis(); + if (host.idleGpus == 0 && (schedulingMode == SchedulingMode.BALANCED)) { + String query = handleInClause("str_os", FIND_JOBS_BY_SHOW_NO_GPU, host.getOs().length); + ArrayList args = new ArrayList(); + args.add(show.getShowId()); + args.add(host.getFacilityId()); + for (String item : host.getOs()) { + args.add(item); + } + args.add(host.idleCores); + args.add(host.idleMemory); + args.add(threadMode(host.threadMode)); + args.add(host.getName()); + args.add(numJobs * 10); + + result.addAll(getJdbcTemplate().query(query, PKJOB_MAPPER, args.toArray())); + + prometheusMetrics.setBookingDurationMetric("findDispatchJobs by show nogpu query", + System.currentTimeMillis() - start); + } else { + String query = handleInClause("str_os", findByShowQuery(), host.getOs().length); + ArrayList args = new ArrayList(); + args.add(show.getShowId()); + args.add(host.getFacilityId()); + for (String item : host.getOs()) { + args.add(item); + } + args.add(host.idleCores); + args.add(host.idleMemory); + args.add(threadMode(host.threadMode)); + args.add(host.idleGpus); + args.add(host.idleGpuMemory > 0 ? 1 : 0); + args.add(host.idleGpuMemory); + args.add(host.getName()); + args.add(numJobs * 10); + + result.addAll(getJdbcTemplate().query(query, PKJOB_MAPPER, args.toArray())); + + prometheusMetrics.setBookingDurationMetric("findDispatchJobs by show query", + System.currentTimeMillis() - start); + } + + // Collect metrics + prometheusMetrics.incrementFindJobsByShowQueryCountMetric(); + return result; } - result.addAll(getJdbcTemplate().query(query, PKJOB_MAPPER, args.toArray())); + @Override + public Set findLocalDispatchJobs(DispatchHost host) { + LinkedHashSet result = new LinkedHashSet(5); + long start = System.currentTimeMillis(); + + String query = handleInClause("str_os", FIND_JOBS_BY_LOCAL, host.getOs().length); + ArrayList args = new ArrayList(); + args.add(host.getHostId()); + args.add(host.getFacilityId()); + for (String item : host.getOs()) { + args.add(item); + } + args.add(host.getHostId()); + args.add(host.getFacilityId()); + for (String item : host.getOs()) { + args.add(item); + } - prometheusMetrics.setBookingDurationMetric("findLocalDispatchJobs query", - System.currentTimeMillis() - start); - return result; - } + result.addAll(getJdbcTemplate().query(query, PKJOB_MAPPER, args.toArray())); - @Override - public void clearCache() { - bookableShows.clear(); - } + prometheusMetrics.setBookingDurationMetric("findLocalDispatchJobs query", + System.currentTimeMillis() - start); + return result; + } + + @Override + public void clearCache() { + bookableShows.clear(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FacilityDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FacilityDaoJdbc.java index 3114798ef..7351700ae 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FacilityDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FacilityDaoJdbc.java @@ -28,52 +28,53 @@ public class FacilityDaoJdbc extends JdbcDaoSupport implements FacilityDao { - public static final RowMapper FACILITY_MAPPER = - new RowMapper() { - public FacilityInterface mapRow(ResultSet rs, int rowNum) throws SQLException { - FacilityEntity facility = new FacilityEntity(); - facility.id = rs.getString("pk_facility"); - facility.name = rs.getString("str_name"); - return facility; - } - }; - - public FacilityInterface getDefaultFacility() { - return getJdbcTemplate().queryForObject( - "SELECT pk_facility,str_name FROM facility WHERE b_default=true LIMIT 1", FACILITY_MAPPER); - } - - public FacilityInterface getFacility(String id) { - return getJdbcTemplate().queryForObject( - "SELECT pk_facility, str_name FROM facility WHERE pk_facility=? " + "OR str_name=?", - FACILITY_MAPPER, id, id); - } - - public boolean facilityExists(String name) { - return getJdbcTemplate().queryForObject("SELECT COUNT(1) FROM facility WHERE str_name=?", - Integer.class, name) > 0; - - } - - public FacilityInterface insertFacility(FacilityEntity facility) { - facility.id = SqlUtil.genKeyRandom(); - - getJdbcTemplate().update("INSERT INTO facility (pk_facility, str_name) VALUES (?,?)", - facility.getId(), facility.getName()); - - return facility; - } - - @Override - public int deleteFacility(FacilityInterface facility) { - return getJdbcTemplate().update("DELETE FROM facility WHERE pk_facility = ?", - facility.getFacilityId()); - } - - @Override - public int updateFacilityName(FacilityInterface facility, String name) { - return getJdbcTemplate().update("UPDATE facility SET str_name=? WHERE pk_facility = ?", name, - facility.getFacilityId()); - } + public static final RowMapper FACILITY_MAPPER = + new RowMapper() { + public FacilityInterface mapRow(ResultSet rs, int rowNum) throws SQLException { + FacilityEntity facility = new FacilityEntity(); + facility.id = rs.getString("pk_facility"); + facility.name = rs.getString("str_name"); + return facility; + } + }; + + public FacilityInterface getDefaultFacility() { + return getJdbcTemplate().queryForObject( + "SELECT pk_facility,str_name FROM facility WHERE b_default=true LIMIT 1", + FACILITY_MAPPER); + } + + public FacilityInterface getFacility(String id) { + return getJdbcTemplate().queryForObject( + "SELECT pk_facility, str_name FROM facility WHERE pk_facility=? " + "OR str_name=?", + FACILITY_MAPPER, id, id); + } + + public boolean facilityExists(String name) { + return getJdbcTemplate().queryForObject("SELECT COUNT(1) FROM facility WHERE str_name=?", + Integer.class, name) > 0; + + } + + public FacilityInterface insertFacility(FacilityEntity facility) { + facility.id = SqlUtil.genKeyRandom(); + + getJdbcTemplate().update("INSERT INTO facility (pk_facility, str_name) VALUES (?,?)", + facility.getId(), facility.getName()); + + return facility; + } + + @Override + public int deleteFacility(FacilityInterface facility) { + return getJdbcTemplate().update("DELETE FROM facility WHERE pk_facility = ?", + facility.getFacilityId()); + } + + @Override + public int updateFacilityName(FacilityInterface facility, String name) { + return getJdbcTemplate().update("UPDATE facility SET str_name=? WHERE pk_facility = ?", + name, facility.getFacilityId()); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FilterDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FilterDaoJdbc.java index 5e4043227..0d74793d4 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FilterDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FilterDaoJdbc.java @@ -41,112 +41,113 @@ */ public class FilterDaoJdbc extends JdbcDaoSupport implements FilterDao { - private static final String GET_FILTER = "SELECT " + "filter.* " + "FROM " + "filter "; - - private static final String GET_ACTIVE_FILTERS = "SELECT " + "filter.* " + "FROM " + "filter " - + "WHERE " + "b_enabled = true " + "AND " + "pk_show=? " + "ORDER BY " + "f_order ASC"; - - private static final String GET_FILTERS = "SELECT " + "filter.* " + "FROM " + "filter " + "WHERE " - + "pk_show=? " + "ORDER BY " + "f_order ASC"; - - public static final RowMapper FILTER_DETAIL_MAPPER = new RowMapper() { - public FilterEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - FilterEntity d = new FilterEntity(); - d.type = FilterType.valueOf(rs.getString("str_type")); - d.id = rs.getString("pk_filter"); - d.name = rs.getString("str_name"); - d.showId = rs.getString("pk_show"); - d.enabled = rs.getBoolean("b_enabled"); - d.order = rs.getFloat("f_order"); - return d; + private static final String GET_FILTER = "SELECT " + "filter.* " + "FROM " + "filter "; + + private static final String GET_ACTIVE_FILTERS = "SELECT " + "filter.* " + "FROM " + "filter " + + "WHERE " + "b_enabled = true " + "AND " + "pk_show=? " + "ORDER BY " + "f_order ASC"; + + private static final String GET_FILTERS = "SELECT " + "filter.* " + "FROM " + "filter " + + "WHERE " + "pk_show=? " + "ORDER BY " + "f_order ASC"; + + public static final RowMapper FILTER_DETAIL_MAPPER = + new RowMapper() { + public FilterEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + FilterEntity d = new FilterEntity(); + d.type = FilterType.valueOf(rs.getString("str_type")); + d.id = rs.getString("pk_filter"); + d.name = rs.getString("str_name"); + d.showId = rs.getString("pk_show"); + d.enabled = rs.getBoolean("b_enabled"); + d.order = rs.getFloat("f_order"); + return d; + } + }; + + public List getActiveFilters(ShowInterface show) { + return getJdbcTemplate().query(GET_ACTIVE_FILTERS, FILTER_DETAIL_MAPPER, show.getShowId()); + } + + public List getFilters(ShowInterface show) { + return getJdbcTemplate().query(GET_FILTERS, FILTER_DETAIL_MAPPER, show.getShowId()); + } + + public void deleteFilter(FilterInterface f) { + getJdbcTemplate().update("DELETE FROM action WHERE pk_filter=?", f.getFilterId()); + getJdbcTemplate().update("DELETE FROM matcher WHERE pk_filter=?", f.getFilterId()); + getJdbcTemplate().update("DELETE FROM filter WHERE pk_filter=?", f.getFilterId()); + reorderFilters(f); + } + + private static final String INSERT_FILTER = "INSERT INTO " + "filter " + "(" + "pk_filter," + + "pk_show," + "str_name," + "str_type," + "f_order " + + ") VALUES (?,?,?,?,(SELECT COALESCE(MAX(f_order)+1,1) FROM filter WHERE pk_show=?))"; + + public void insertFilter(FilterEntity f) { + f.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_FILTER, f.id, f.getShowId(), f.name, f.type.toString(), + f.getShowId()); + reorderFilters(f); + } + + public void updateSetFilterEnabled(FilterInterface f, boolean enabled) { + getJdbcTemplate().update("UPDATE filter SET b_enabled=? WHERE pk_filter=?", enabled, + f.getFilterId()); + } + + public void updateSetFilterName(FilterInterface f, String name) { + getJdbcTemplate().update("UPDATE filter SET str_name=? WHERE pk_filter=?", name, + f.getFilterId()); + } + + public void updateSetFilterOrder(FilterInterface f, double order) { + getJdbcTemplate().update("UPDATE filter SET f_order=? - 0.1 WHERE pk_filter=?", order, + f.getFilterId()); + reorderFilters(f); + } + + public void lowerFilterOrder(FilterInterface f, int by) { + double lower_by = by + 0.1; + getJdbcTemplate().update("UPDATE filter SET f_order=f_order + ? WHERE pk_filter=?", + lower_by, f.getFilterId()); + reorderFilters(f); + } + + public void raiseFilterOrder(FilterInterface f, int by) { + double raise_by = (by * -1) - 0.1; + getJdbcTemplate().update("UPDATE filter SET f_order=f_order + ? WHERE pk_filter=?", + raise_by, f.getFilterId()); + reorderFilters(f); + } + + public void updateSetFilterType(FilterInterface f, FilterType type) { + getJdbcTemplate().update("UPDATE filter SET str_type=? WHERE pk_filter=?", type.toString(), + f.getFilterId()); + } + + public void reorderFilters(final ShowInterface s) { + getJdbcTemplate().update("LOCK TABLE filter IN SHARE MODE"); + getJdbcTemplate().call(new CallableStatementCreator() { + + public CallableStatement createCallableStatement(Connection con) throws SQLException { + CallableStatement c = con.prepareCall("{ call reorder_filters(?) }"); + c.setString(1, s.getShowId()); + return c; + } + }, new ArrayList()); + } + + public FilterEntity findFilter(ShowInterface show, String name) { + return getJdbcTemplate().queryForObject(GET_FILTER + " WHERE pk_show=? AND str_name=?", + FILTER_DETAIL_MAPPER, show.getShowId(), name); + } + + public FilterEntity getFilter(String id) { + return getJdbcTemplate().queryForObject(GET_FILTER + " WHERE pk_filter=?", + FILTER_DETAIL_MAPPER, id); + } + + public FilterEntity getFilter(FilterInterface filter) { + return getJdbcTemplate().queryForObject(GET_FILTER + " WHERE pk_filter=?", + FILTER_DETAIL_MAPPER, filter.getFilterId()); } - }; - - public List getActiveFilters(ShowInterface show) { - return getJdbcTemplate().query(GET_ACTIVE_FILTERS, FILTER_DETAIL_MAPPER, show.getShowId()); - } - - public List getFilters(ShowInterface show) { - return getJdbcTemplate().query(GET_FILTERS, FILTER_DETAIL_MAPPER, show.getShowId()); - } - - public void deleteFilter(FilterInterface f) { - getJdbcTemplate().update("DELETE FROM action WHERE pk_filter=?", f.getFilterId()); - getJdbcTemplate().update("DELETE FROM matcher WHERE pk_filter=?", f.getFilterId()); - getJdbcTemplate().update("DELETE FROM filter WHERE pk_filter=?", f.getFilterId()); - reorderFilters(f); - } - - private static final String INSERT_FILTER = "INSERT INTO " + "filter " + "(" + "pk_filter," - + "pk_show," + "str_name," + "str_type," + "f_order " - + ") VALUES (?,?,?,?,(SELECT COALESCE(MAX(f_order)+1,1) FROM filter WHERE pk_show=?))"; - - public void insertFilter(FilterEntity f) { - f.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_FILTER, f.id, f.getShowId(), f.name, f.type.toString(), - f.getShowId()); - reorderFilters(f); - } - - public void updateSetFilterEnabled(FilterInterface f, boolean enabled) { - getJdbcTemplate().update("UPDATE filter SET b_enabled=? WHERE pk_filter=?", enabled, - f.getFilterId()); - } - - public void updateSetFilterName(FilterInterface f, String name) { - getJdbcTemplate().update("UPDATE filter SET str_name=? WHERE pk_filter=?", name, - f.getFilterId()); - } - - public void updateSetFilterOrder(FilterInterface f, double order) { - getJdbcTemplate().update("UPDATE filter SET f_order=? - 0.1 WHERE pk_filter=?", order, - f.getFilterId()); - reorderFilters(f); - } - - public void lowerFilterOrder(FilterInterface f, int by) { - double lower_by = by + 0.1; - getJdbcTemplate().update("UPDATE filter SET f_order=f_order + ? WHERE pk_filter=?", lower_by, - f.getFilterId()); - reorderFilters(f); - } - - public void raiseFilterOrder(FilterInterface f, int by) { - double raise_by = (by * -1) - 0.1; - getJdbcTemplate().update("UPDATE filter SET f_order=f_order + ? WHERE pk_filter=?", raise_by, - f.getFilterId()); - reorderFilters(f); - } - - public void updateSetFilterType(FilterInterface f, FilterType type) { - getJdbcTemplate().update("UPDATE filter SET str_type=? WHERE pk_filter=?", type.toString(), - f.getFilterId()); - } - - public void reorderFilters(final ShowInterface s) { - getJdbcTemplate().update("LOCK TABLE filter IN SHARE MODE"); - getJdbcTemplate().call(new CallableStatementCreator() { - - public CallableStatement createCallableStatement(Connection con) throws SQLException { - CallableStatement c = con.prepareCall("{ call reorder_filters(?) }"); - c.setString(1, s.getShowId()); - return c; - } - }, new ArrayList()); - } - - public FilterEntity findFilter(ShowInterface show, String name) { - return getJdbcTemplate().queryForObject(GET_FILTER + " WHERE pk_show=? AND str_name=?", - FILTER_DETAIL_MAPPER, show.getShowId(), name); - } - - public FilterEntity getFilter(String id) { - return getJdbcTemplate().queryForObject(GET_FILTER + " WHERE pk_filter=?", FILTER_DETAIL_MAPPER, - id); - } - - public FilterEntity getFilter(FilterInterface filter) { - return getJdbcTemplate().queryForObject(GET_FILTER + " WHERE pk_filter=?", FILTER_DETAIL_MAPPER, - filter.getFilterId()); - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FrameDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FrameDaoJdbc.java index b4a4de4fa..92a2846b5 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FrameDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FrameDaoJdbc.java @@ -55,768 +55,790 @@ public class FrameDaoJdbc extends JdbcDaoSupport implements FrameDao { - private static final String UPDATE_FRAME_STOPPED_NORSS = "UPDATE " + "frame " + "SET " - + "str_state=?, " + "int_exit_status = ?, " + "ts_stopped = current_timestamp, " - + "ts_updated = current_timestamp, " + "int_version = int_version + 1, " - + "int_total_past_core_time = int_total_past_core_time + " - + "round(INTERVAL_TO_SECONDS(current_timestamp - ts_started) * int_cores / 100)," - + "int_total_past_gpu_time = int_total_past_gpu_time + " - + "round(INTERVAL_TO_SECONDS(current_timestamp - ts_started) * int_gpus) " + "WHERE " - + "frame.pk_frame = ? " + "AND " + "frame.str_state = ? " + "AND " + "frame.int_version = ? "; - - @Override - public boolean updateFrameStopped(FrameInterface frame, FrameState state, int exitStatus) { - return getJdbcTemplate().update(UPDATE_FRAME_STOPPED_NORSS, state.toString(), exitStatus, - frame.getFrameId(), FrameState.RUNNING.toString(), frame.getVersion()) == 1; - } - - private static final String UPDATE_FRAME_STOPPED = "UPDATE " + "frame " + "SET " + "str_state=?, " - + "int_exit_status = ?, " + "ts_stopped = current_timestamp + interval '1' second, " - + "ts_updated = current_timestamp, " + "int_mem_max_used = ?, " - + "int_version = int_version + 1, " + "int_total_past_core_time = int_total_past_core_time + " - + "round(INTERVAL_TO_SECONDS(current_timestamp + interval '1' second - ts_started) * int_cores / 100), " - + "int_total_past_gpu_time = int_total_past_gpu_time + " - + "round(INTERVAL_TO_SECONDS(current_timestamp + interval '1' second - ts_started) * int_gpus) " - + "WHERE " + "frame.pk_frame = ? " + "AND " + "frame.str_state = ? " + "AND " - + "frame.int_version = ? "; - - @Override - public boolean updateFrameStopped(FrameInterface frame, FrameState state, int exitStatus, - long maxRss) { - - return getJdbcTemplate().update(UPDATE_FRAME_STOPPED, state.toString(), exitStatus, maxRss, - frame.getFrameId(), FrameState.RUNNING.toString(), frame.getVersion()) == 1; - } - - private static final String UPDATE_FRAME_REASON = "UPDATE " + "frame " + "SET " - + "str_state = ?, " + "int_exit_status = ?, " + "ts_stopped = current_timestamp, " - + "ts_updated = current_timestamp, " + "int_version = int_version + 1 " + "WHERE " - + "frame.pk_frame = ? " + "AND " + "frame.pk_frame NOT IN " + "(SELECT proc.pk_frame FROM " - + "proc WHERE proc.pk_frame=?)"; - - private int updateFrame(FrameInterface frame, int exitStatus) { - - int result = getJdbcTemplate().update(UPDATE_FRAME_REASON, FrameState.WAITING.toString(), - exitStatus, frame.getFrameId(), frame.getFrameId()); - - return result; - } - - @Override - public boolean updateFrameHostDown(FrameInterface frame) { - return updateFrame(frame, Dispatcher.EXIT_STATUS_DOWN_HOST) > 0; - } - - @Override - public boolean updateFrameCleared(FrameInterface frame) { - return updateFrame(frame, Dispatcher.EXIT_STATUS_FRAME_CLEARED) > 0; - } - - private static final String UPDATE_FRAME_MEMORY_ERROR = - "UPDATE " + "frame " + "SET " + "int_exit_status = ?, " + "int_version = int_version + 1 " - + "WHERE " + "frame.pk_frame = ? "; - - @Override - public boolean updateFrameMemoryError(FrameInterface frame) { - int result = getJdbcTemplate().update(UPDATE_FRAME_MEMORY_ERROR, - Dispatcher.EXIT_STATUS_MEMORY_FAILURE, frame.getFrameId()); - - return result > 0; - } - - private static final String UPDATE_FRAME_STARTED = "UPDATE " + "frame " + "SET " - + "str_state = ?, " + "str_host = ?, " + "int_cores = ?, " + "int_mem_reserved = ?, " - + "int_gpus = ?, " + "int_gpu_mem_reserved = ?, " + "ts_updated = current_timestamp, " - + "ts_started = current_timestamp, " + "ts_stopped = null, " - + "int_version = int_version + 1 " + "WHERE " + "pk_frame = ? " + "AND " + "str_state = ? " - + "AND " + "int_version = ? " + "AND " + "frame.pk_layer IN (" + "SELECT " + "layer.pk_layer " - + "FROM " + "layer " + "LEFT JOIN layer_limit ON layer_limit.pk_layer = layer.pk_layer " - + "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " - + "LEFT JOIN (" + "SELECT " + "limit_record.pk_limit_record, " - + "SUM(layer_stat.int_running_count) AS int_sum_running " + "FROM " + "layer_limit " - + "LEFT JOIN limit_record ON layer_limit.pk_limit_record = limit_record.pk_limit_record " - + "LEFT JOIN layer_stat ON layer_stat.pk_layer = layer_limit.pk_layer " - + "GROUP BY limit_record.pk_limit_record) AS sum_running " - + "ON limit_record.pk_limit_record = sum_running.pk_limit_record " + "WHERE " - + "sum_running.int_sum_running < limit_record.int_max_value " - + "OR sum_running.int_sum_running IS NULL " + ")"; - - private static final String UPDATE_FRAME_RETRIES = - "UPDATE " + "frame " + "SET " + "int_retries = int_retries + 1 " + "WHERE " + "pk_frame = ? " - + "AND " + "int_exit_status NOT IN (?,?,?,?,?,?,?) "; - - @Override - public void updateFrameStarted(VirtualProc proc, FrameInterface frame) { - - lockFrameForUpdate(frame, FrameState.WAITING); - - try { - int result = getJdbcTemplate().update(UPDATE_FRAME_STARTED, FrameState.RUNNING.toString(), - proc.hostName, proc.coresReserved, proc.memoryReserved, proc.gpusReserved, - proc.gpuMemoryReserved, frame.getFrameId(), FrameState.WAITING.toString(), - frame.getVersion()); - if (result == 0) { - String error_msg = "the frame " + frame + " was updated by another thread."; - throw new FrameReservationException(error_msg); - } - } catch (DataAccessException e) { - /* - * This usually happens when the folder's max cores limit has exceeded - */ - throw new FrameReservationException(e.getCause()); - } - - /* - * Frames that were killed via nimby or hardware errors not attributed to the software do not - * increment the retry counter. Like failed launch, orphaned frame, failed kill or down host. - */ - try { - getJdbcTemplate().update(UPDATE_FRAME_RETRIES, frame.getFrameId(), -1, - FrameExitStatus.SKIP_RETRY_VALUE, FrameExitStatus.FAILED_LAUNCH_VALUE, - Dispatcher.EXIT_STATUS_FRAME_CLEARED, Dispatcher.EXIT_STATUS_FRAME_ORPHAN, - Dispatcher.EXIT_STATUS_FAILED_KILL, Dispatcher.EXIT_STATUS_DOWN_HOST); - } catch (DataAccessException e) { - throw new FrameReservationException(e.getCause()); - } - } - - private static final String UPDATE_FRAME_FIXED = - "UPDATE " + "frame " + "SET " + "str_state = ?," + "str_host=?, " + "int_cores=?, " - + "int_mem_reserved = ?, " + "int_gpus = ?, " + "int_gpu_mem_reserved = ?, " - + "ts_updated = current_timestamp, " + "ts_started = current_timestamp, " - + "ts_stopped = null, " + "int_version = int_version + 1 " + "WHERE " + "pk_frame = ? " - + "AND " + "str_state = 'RUNNING'"; - - @Override - public boolean updateFrameFixed(VirtualProc proc, FrameInterface frame) { - return getJdbcTemplate().update(UPDATE_FRAME_FIXED, FrameState.RUNNING.toString(), - proc.hostName, proc.coresReserved, proc.memoryReserved, proc.gpusReserved, - proc.gpuMemoryReserved, frame.getFrameId()) == 1; - } - - @Override - public DispatchFrame getDispatchFrame(String uuid) { - return getJdbcTemplate().queryForObject(GET_DISPATCH_FRAME, DISPATCH_FRAME_MAPPER, uuid); - } - - static final RowMapper DISPATCH_FRAME_MAPPER = new RowMapper() { - public DispatchFrame mapRow(ResultSet rs, int rowNum) throws SQLException { - DispatchFrame frame = new DispatchFrame(); - frame.id = rs.getString("pk_frame"); - frame.name = rs.getString("frame_name"); - frame.layerId = rs.getString("pk_layer"); - frame.jobId = rs.getString("pk_job"); - frame.showId = rs.getString("pk_show"); - frame.facilityId = rs.getString("pk_facility"); - frame.retries = rs.getInt("int_retries"); - frame.state = FrameState.valueOf(rs.getString("frame_state")); - frame.command = rs.getString("str_cmd"); - frame.jobName = rs.getString("job_name"); - frame.layerName = rs.getString("layer_name"); - frame.chunkSize = rs.getInt("int_chunk_size"); - frame.range = rs.getString("str_range"); - frame.logDir = rs.getString("str_log_dir"); - frame.shot = rs.getString("str_shot"); - frame.show = rs.getString("show_name"); - frame.owner = rs.getString("str_user"); - int uid = rs.getInt("int_uid"); - frame.uid = rs.wasNull() ? Optional.empty() : Optional.of(uid); - frame.state = FrameState.valueOf(rs.getString("frame_state")); - frame.minCores = rs.getInt("int_cores_min"); - frame.maxCores = rs.getInt("int_cores_max"); - frame.threadable = rs.getBoolean("b_threadable"); - frame.setMinMemory(rs.getLong("int_mem_min")); - frame.minGpus = rs.getInt("int_gpus_min"); - frame.maxGpus = rs.getInt("int_gpus_max"); - frame.minGpuMemory = rs.getLong("int_gpu_mem_min"); - frame.version = rs.getInt("int_version"); - frame.services = rs.getString("str_services"); - frame.os = rs.getString("str_os"); - return frame; - } - }; - - private static final String GET_DISPATCH_FRAME = "SELECT " + "show.str_name AS show_name, " - + "job.str_name AS job_name, " + "job.pk_job," + "job.pk_show," + "job.pk_facility," - + "job.str_name," + "job.str_shot," + "job.str_user," + "job.int_uid," + "job.str_log_dir," - + "COALESCE(str_os, '') AS str_os, " + "frame.str_name AS frame_name, " - + "frame.str_state AS frame_state, " + "frame.pk_frame, " + "frame.pk_layer, " - + "frame.int_retries, " + "frame.int_version, " + "layer.str_name AS layer_name, " - + "layer.str_type AS layer_type, " + "layer.str_cmd, " + "layer.int_cores_min," - + "layer.int_cores_max," + "layer.b_threadable," + "layer.int_mem_min, " - + "layer.int_gpus_min," + "layer.int_gpus_max," + "layer.int_gpu_mem_min, " - + "layer.str_range, " + "layer.int_chunk_size, " + "layer.str_services " + "FROM " + "layer, " - + "job, " + "show, " + "frame LEFT JOIN proc ON (proc.pk_frame = frame.pk_frame) " + "WHERE " - + "job.pk_show = show.pk_show " + "AND " + "frame.pk_job = job.pk_job " + "AND " - + "frame.pk_layer = layer.pk_layer " + "AND " + "frame.pk_frame = ?"; - - private static final String GET_FRAME_DETAIL = - "SELECT " + "frame.*, " + "job.pk_facility," + "job.pk_show " + "FROM " + "frame," + "layer," - + "job," + "show " + "WHERE " + "frame.pk_job = job.pk_job " + "AND " - + "frame.pk_layer = layer.pk_layer " + "AND " + "job.pk_show = show.pk_show "; - - private static final String GET_MINIMAL_FRAME = "SELECT " + "frame.pk_frame," + "frame.str_name, " - + "frame.pk_job, " + "frame.pk_layer, " + "frame.str_state, " + "frame.int_version, " - + "job.pk_show, " + "job.pk_facility " + "FROM " + "frame," + "layer," + "job," + "show " - + "WHERE " + "frame.pk_job = job.pk_job " + "AND " + "frame.pk_layer = layer.pk_layer " - + "AND " + "job.pk_show = show.pk_show "; - - private static final RowMapper FRAME_MAPPER = new RowMapper() { - public FrameEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - FrameEntity frame = new FrameEntity(); - frame.id = rs.getString("pk_frame"); - frame.name = rs.getString("str_name"); - frame.jobId = rs.getString("pk_job"); - frame.layerId = rs.getString("pk_layer"); - frame.showId = rs.getString("pk_show"); - frame.facilityId = rs.getString("pk_facility"); - frame.version = rs.getInt("int_version"); - return frame; - } - }; - - private static final RowMapper FRAME_DETAIL_MAPPER = new RowMapper() { - public FrameDetail mapRow(ResultSet rs, int rowNum) throws SQLException { - FrameDetail frame = new FrameDetail(); - frame.id = rs.getString("pk_frame"); - frame.dependCount = rs.getInt("int_depend_count"); - frame.exitStatus = rs.getInt("int_exit_status"); - frame.jobId = rs.getString("pk_job"); - frame.layerId = rs.getString("pk_layer"); - frame.showId = rs.getString("pk_show"); - frame.maxRss = rs.getLong("int_mem_max_used"); - frame.name = rs.getString("str_name"); - frame.number = rs.getInt("int_number"); - frame.dispatchOrder = rs.getInt("int_dispatch_order"); - frame.retryCount = rs.getInt("int_retries"); - frame.dateStarted = rs.getTimestamp("ts_started"); - frame.dateStopped = rs.getTimestamp("ts_stopped"); - frame.dateUpdated = rs.getTimestamp("ts_updated"); - frame.dateLLU = rs.getTimestamp("ts_llu"); - frame.version = rs.getInt("int_version"); - - if (rs.getString("str_host") != null) { - frame.lastResource = String.format("%s/%d/%d", rs.getString("str_host"), - rs.getInt("int_cores"), rs.getInt("int_gpus")); - } else { - frame.lastResource = ""; - } - frame.state = FrameState.valueOf(rs.getString("str_state")); - - return frame; - } - }; - - public static final String FIND_ORPHANED_FRAMES = - "SELECT " + "frame.pk_frame, " + "frame.pk_layer, " + "frame.str_name, " - + "frame.int_version, " + "job.pk_job, " + "job.pk_show, " + "job.pk_facility " + "FROM " - + "frame, " + "job " + "WHERE " + "job.pk_job = frame.pk_job " + "AND " - + "frame.str_state = 'RUNNING' " + "AND " + "job.str_state = 'PENDING' " + "AND " - + "(SELECT COUNT(1) FROM proc WHERE proc.pk_frame = frame.pk_frame) = 0 " + "AND " - + "current_timestamp - frame.ts_updated > interval '300' second"; - - @Override - public List getOrphanedFrames() { - return getJdbcTemplate().query(FIND_ORPHANED_FRAMES, FRAME_MAPPER); - } - - private static final String IS_ORPHAN = "SELECT " + "COUNT(1) " + "FROM " + "frame " + "WHERE " - + "frame.pk_frame = ? " + "AND " + "frame.str_state = 'RUNNING' " + "AND " - + "(SELECT COUNT(1) FROM proc WHERE proc.pk_frame = frame.pk_frame) = 0 " + "AND " - + "current_timestamp - frame.ts_updated > interval '300' second"; - - @Override - public boolean isOrphan(FrameInterface frame) { - return getJdbcTemplate().queryForObject(IS_ORPHAN, Integer.class, frame.getFrameId()) == 1; - } - - private static final String INSERT_FRAME = "INSERT INTO " + "frame " + "(" + "pk_frame, " - + "pk_layer, " + "pk_job, " + "str_name, " + "str_state, " + "int_number, " - + "int_dispatch_order, " + "int_layer_order, " + "ts_updated, " + "ts_llu " + ") " - + "VALUES (?,?,?,?,?,?,?,?,current_timestamp,current_timestamp)"; - - @Override - public void insertFrames(LayerDetail layer, List frames) { - - int count = 0; - for (int frame : frames) { - getJdbcTemplate().update(INSERT_FRAME, SqlUtil.genKeyRandom(), layer.getLayerId(), - layer.getJobId(), CueUtil.buildFrameName(layer, frame), FrameState.SETUP.toString(), - frame, count, layer.dispatchOrder); - count++; - } - } - - @Override - public List getDependentFrames(LightweightDependency depend) { - - /* - * Compound depends are handled in the DependManager. - */ + private static final String UPDATE_FRAME_STOPPED_NORSS = "UPDATE " + "frame " + "SET " + + "str_state=?, " + "int_exit_status = ?, " + "ts_stopped = current_timestamp, " + + "ts_updated = current_timestamp, " + "int_version = int_version + 1, " + + "int_total_past_core_time = int_total_past_core_time + " + + "round(INTERVAL_TO_SECONDS(current_timestamp - ts_started) * int_cores / 100)," + + "int_total_past_gpu_time = int_total_past_gpu_time + " + + "round(INTERVAL_TO_SECONDS(current_timestamp - ts_started) * int_gpus) " + "WHERE " + + "frame.pk_frame = ? " + "AND " + "frame.str_state = ? " + "AND " + + "frame.int_version = ? "; + + @Override + public boolean updateFrameStopped(FrameInterface frame, FrameState state, int exitStatus) { + return getJdbcTemplate().update(UPDATE_FRAME_STOPPED_NORSS, state.toString(), exitStatus, + frame.getFrameId(), FrameState.RUNNING.toString(), frame.getVersion()) == 1; + } - String key = null; - StringBuilder sb = new StringBuilder(4096); - sb.append(GET_MINIMAL_FRAME); - sb.append(" AND frame.int_depend_count > 0 "); - - if (EnumSet.of(DependType.JOB_ON_JOB, DependType.JOB_ON_LAYER, DependType.JOB_ON_FRAME) - .contains(depend.type)) { - sb.append("AND job.pk_job = ?"); - key = depend.dependErJobId; - } else if (EnumSet - .of(DependType.LAYER_ON_FRAME, DependType.LAYER_ON_LAYER, DependType.LAYER_ON_JOB) - .contains(depend.type)) { - sb.append("AND layer.pk_layer = ?"); - key = depend.dependErLayerId; - } else if (EnumSet - .of(DependType.FRAME_ON_JOB, DependType.FRAME_ON_LAYER, DependType.FRAME_ON_FRAME) - .contains(depend.type)) { - sb.append("AND frame.pk_frame = ?"); - key = depend.dependErFrameId; - } else { - return new ArrayList(1); - } - - return getJdbcTemplate().query(sb.toString(), FRAME_MAPPER, new Object[] {key}); - } - - @Override - public FrameInterface findFrame(LayerInterface layer, int number) { - return getJdbcTemplate().queryForObject( - GET_MINIMAL_FRAME + " AND frame.pk_layer=? AND int_number=?", FRAME_MAPPER, - layer.getLayerId(), number); - } - - @Override - public FrameDetail getFrameDetail(FrameInterface frame) { - return getJdbcTemplate().queryForObject(GET_FRAME_DETAIL + " AND pk_frame=?", - FRAME_DETAIL_MAPPER, frame.getFrameId()); - } - - @Override - public FrameDetail getFrameDetail(String id) { - return getJdbcTemplate().queryForObject(GET_FRAME_DETAIL + " AND pk_frame=?", - FRAME_DETAIL_MAPPER, id); - } - - @Override - public FrameDetail findFrameDetail(JobInterface job, String name) { - // Uses C_FRAME_STR_NAME_UNQ - return getJdbcTemplate().queryForObject( - GET_FRAME_DETAIL + " AND frame.str_name=? AND frame.pk_job=?", FRAME_DETAIL_MAPPER, name, - job.getJobId()); - } - - @Override - public List findFrameDetails(FrameSearchInterface r) { - return getJdbcTemplate().query(r.getFilteredQuery(GET_FRAME_DETAIL), FRAME_DETAIL_MAPPER, - r.getValuesArray()); - } - - @Override - public List findFrames(FrameSearchInterface r) { - return getJdbcTemplate().query(r.getFilteredQuery(GET_MINIMAL_FRAME), FRAME_MAPPER, - r.getValuesArray()); - } - - private static final String FIND_LONGEST_FRAME = "SELECT " + "pk_frame " + "FROM " + "frame, " - + "layer " + "WHERE " + "frame.pk_layer = layer.pk_layer " + "AND " + "frame.pk_job = ? " - + "AND " + "str_state=? " + "AND " + "layer.str_type=? " + "ORDER BY " - + "ts_stopped - ts_started DESC " + "LIMIT 1"; - - @Override - public FrameDetail findLongestFrame(JobInterface job) { - String pk_frame = getJdbcTemplate().queryForObject(FIND_LONGEST_FRAME, String.class, - job.getJobId(), FrameState.SUCCEEDED.toString(), LayerType.RENDER.toString()); - return getFrameDetail(pk_frame); - } - - private static final String FIND_SHORTEST_FRAME = "SELECT " + "pk_frame " + "FROM " + "frame, " - + "layer " + "WHERE " + "frame.pk_layer = layer.pk_layer " + "AND " + "frame.pk_job = ? " - + "AND " + "frame.str_state = ? " + "AND " + "layer.str_type = ? " + "ORDER BY " - + "ts_stopped - ts_started ASC " + "LIMIT 1"; - - @Override - public FrameDetail findShortestFrame(JobInterface job) { - String pk_frame = getJdbcTemplate().queryForObject(FIND_SHORTEST_FRAME, String.class, - job.getJobId(), FrameState.SUCCEEDED.toString(), LayerType.RENDER.toString()); - return getFrameDetail(pk_frame); - } - - @Override - public FrameInterface getFrame(String id) { - return getJdbcTemplate().queryForObject(GET_MINIMAL_FRAME + " AND frame.pk_frame=?", - FRAME_MAPPER, id); - } - - @Override - public FrameInterface findFrame(JobInterface job, String name) { - // Uses C_FRAME_STR_NAME_UNQ - return getJdbcTemplate().queryForObject( - GET_MINIMAL_FRAME + " AND frame.str_name=? AND frame.pk_job=?", FRAME_MAPPER, name, - job.getJobId()); - } - - @Override - public void checkRetries(FrameInterface frame) { - int max_retries = getJdbcTemplate().queryForObject( - "SELECT int_max_retries FROM job WHERE pk_job=?", Integer.class, frame.getJobId()); - - if (getJdbcTemplate().queryForObject("SELECT int_retries FROM frame WHERE pk_frame=?", - Integer.class, frame.getFrameId()) >= max_retries) { - getJdbcTemplate().update("UPDATE frame SET str_state=? WHERE pk_frame=?", - FrameState.DEAD.toString(), frame.getFrameId()); - } - } - - private static final String UPDATE_FRAME_STATE = "UPDATE " + "frame " + "SET " + "str_state = ?, " - + "ts_updated = current_timestamp, " + "int_version = int_version + 1 " + "WHERE " - + "pk_frame = ? " + "AND " + "int_version = ? "; - - @Override - public boolean updateFrameState(FrameInterface frame, FrameState state) { - if (getJdbcTemplate().update(UPDATE_FRAME_STATE, state.toString(), frame.getFrameId(), - frame.getVersion()) == 1) { - logger.info("The frame " + frame + " state changed to " + state.toString()); - return true; - } - logger.info("Failed to change the frame " + frame + " state to " + state.toString()); - return false; - } - - private static final String MARK_AS_WAITING = "UPDATE " + "frame " + "SET " + "str_state=?, " - + "ts_updated = current_timestamp, " + "ts_llu = current_timestamp, " - + "int_depend_count = 0, " + "int_version = int_version + 1 " + "WHERE " + "pk_frame = ? " - + "AND " + "int_version = ? " + "AND " + "str_state = ? "; - - @Override - public void markFrameAsWaiting(FrameInterface frame) { - getJdbcTemplate().update(MARK_AS_WAITING, FrameState.WAITING.toString(), frame.getFrameId(), - frame.getVersion(), FrameState.DEPEND.toString()); - } - - private static final String MARK_AS_DEPEND = - "UPDATE " + "frame " + "SET " + "str_state=?, " + "int_depend_count = ?, " - + "ts_updated = current_timestamp, " + "int_version = int_version + 1 " + "WHERE " - + "pk_frame = ? " + "AND " + "int_version = ? " + "AND " + "str_state = ? "; - - private static final String GET_FRAME_DEPEND_COUNT = "SELECT " + "COUNT(1) " + "FROM " + "depend " - + "WHERE " + "( " + "(pk_job_depend_er = ? AND str_type LIKE 'JOB#_ON%' ESCAPE '#') " + "OR " - + "pk_layer_depend_er = ? " + "OR " + "pk_frame_depend_er = ? " + ") " + "AND " - + "depend.b_active = true " + "AND " + "depend.b_composite = false "; - - public void markFrameAsDepend(FrameInterface frame) { - // We need to full depend count in this case to reset the - // frames's depend count accurately. - int depend_count = getJdbcTemplate().queryForObject(GET_FRAME_DEPEND_COUNT, Integer.class, - frame.getJobId(), frame.getLayerId(), frame.getFrameId()); - - if (depend_count > 0) { - getJdbcTemplate().update(MARK_AS_DEPEND, FrameState.DEPEND.toString(), depend_count, - frame.getFrameId(), frame.getVersion(), FrameState.WAITING.toString()); - } - } - - private static final String FIND_HIGHEST_MEM_FRAME = - "SELECT " + "pk_frame " + "FROM " + "frame " + "WHERE " + "pk_job = ? " + "AND " - + "str_state = ? " + "ORDER BY " + "int_mem_max_used DESC " + "LIMIT 1"; - - @Override - public FrameDetail findHighestMemoryFrame(JobInterface job) { - String pk_frame = getJdbcTemplate().queryForObject(FIND_HIGHEST_MEM_FRAME, String.class, - job.getJobId(), FrameState.SUCCEEDED.toString()); - return getFrameDetail(pk_frame); - } - - private static final String FIND_LOWEST_MEM_FRAME = - "SELECT " + "pk_frame " + "FROM " + "frame " + "WHERE " + "pk_job = ? " + "AND " - + "str_state = ? " + "ORDER BY " + "int_mem_max_used ASC " + "LIMIT 1"; - - @Override - public FrameDetail findLowestMemoryFrame(JobInterface job) { - String pk_frame = getJdbcTemplate().queryForObject(FIND_LOWEST_MEM_FRAME, String.class, - job.getJobId(), FrameState.SUCCEEDED.toString()); - return getFrameDetail(pk_frame); - } - - @Override - public void reorderFramesFirst(LayerInterface layer, FrameSet frameSet) { - int start; - int size = frameSet.size(); - int min = getJdbcTemplate().queryForObject( - "SELECT MIN(int_dispatch_order) FROM frame WHERE pk_layer=?", Integer.class, - layer.getLayerId()); - - start = min - size; - for (int frameIdx = 0; frameIdx < size; frameIdx++) { - getJdbcTemplate().update( - "UPDATE frame SET int_dispatch_order=? WHERE str_name=? AND pk_job=?", start, - CueUtil.buildFrameName(layer, frameSet.get(frameIdx)), layer.getJobId()); - - logger.info( - "reordering " + CueUtil.buildFrameName(layer, frameSet.get(frameIdx)) + " to " + start); - start++; - } - } - - @Override - public void reorderFramesLast(LayerInterface layer, FrameSet frameSet) { - int start; - int size = frameSet.size(); - List frames = new ArrayList<>(size); - int max = getJdbcTemplate().queryForObject( - "SELECT MAX(int_dispatch_order) FROM frame WHERE pk_layer=?", Integer.class, - layer.getLayerId()); - - start = max + 1; - for (int i = 0; i <= size; i++) { - frames.add(new Object[] {start + i, CueUtil.buildFrameName(layer, i), layer.getJobId()}); - } - - if (frames.size() > 0) { - getJdbcTemplate().batchUpdate( - "UPDATE frame SET int_dispatch_order=? WHERE str_name=? AND pk_job=?", frames); - } - } - - @Override - public void reorderLayerReverse(LayerInterface layer, FrameSet frameSet) { - - int size = frameSet.size(); - List frames = new ArrayList<>(size); - - for (int i = 0; i < size; i++) { - if (i >= size - i - 1) { - break; - } - try { - int a = getJdbcTemplate().queryForObject( - "SELECT int_dispatch_order FROM frame WHERE str_name=? AND pk_job=? AND pk_layer=?", - Integer.class, CueUtil.buildFrameName(layer, frameSet.get(i)), layer.getJobId(), - layer.getLayerId()); - - int b = getJdbcTemplate().queryForObject( - "SELECT int_dispatch_order FROM frame WHERE str_name=? AND pk_job=? AND pk_layer=?", - Integer.class, CueUtil.buildFrameName(layer, frameSet.get(size - i - 1)), - layer.getJobId(), layer.getLayerId()); - - frames.add(new Object[] {a, layer.getLayerId(), - CueUtil.buildFrameName(layer, frameSet.get(size - i - 1))}); - frames.add( - new Object[] {b, layer.getLayerId(), CueUtil.buildFrameName(layer, frameSet.get(i))}); - - } catch (Exception e) { - logger.info("frame not found while attempting to reverse layer, skipping"); - } - } - - if (frames.size() > 0) { - getJdbcTemplate().batchUpdate( - "UPDATE frame SET int_dispatch_order=? WHERE pk_layer=? and str_name=?", frames); - } - } - - @Override - public void staggerLayer(LayerInterface layer, String frameRange, int stagger) { - - /* - * If the layer is only 1 frame we don't stagger it. - */ - if (getJdbcTemplate().queryForObject("SELECT int_total_count FROM layer_stat WHERE pk_layer=?", - Integer.class, layer.getLayerId()) == 1) { - return; + private static final String UPDATE_FRAME_STOPPED = "UPDATE " + "frame " + "SET " + + "str_state=?, " + "int_exit_status = ?, " + + "ts_stopped = current_timestamp + interval '1' second, " + + "ts_updated = current_timestamp, " + "int_mem_max_used = ?, " + + "int_version = int_version + 1, " + + "int_total_past_core_time = int_total_past_core_time + " + + "round(INTERVAL_TO_SECONDS(current_timestamp + interval '1' second - ts_started) * int_cores / 100), " + + "int_total_past_gpu_time = int_total_past_gpu_time + " + + "round(INTERVAL_TO_SECONDS(current_timestamp + interval '1' second - ts_started) * int_gpus) " + + "WHERE " + "frame.pk_frame = ? " + "AND " + "frame.str_state = ? " + "AND " + + "frame.int_version = ? "; + + @Override + public boolean updateFrameStopped(FrameInterface frame, FrameState state, int exitStatus, + long maxRss) { + + return getJdbcTemplate().update(UPDATE_FRAME_STOPPED, state.toString(), exitStatus, maxRss, + frame.getFrameId(), FrameState.RUNNING.toString(), frame.getVersion()) == 1; } - logger.info("staggering: " + layer.getName() + " range: " + frameRange + " on " + stagger); + private static final String UPDATE_FRAME_REASON = "UPDATE " + "frame " + "SET " + + "str_state = ?, " + "int_exit_status = ?, " + "ts_stopped = current_timestamp, " + + "ts_updated = current_timestamp, " + "int_version = int_version + 1 " + "WHERE " + + "frame.pk_frame = ? " + "AND " + "frame.pk_frame NOT IN " + + "(SELECT proc.pk_frame FROM " + "proc WHERE proc.pk_frame=?)"; - FrameSet frameSet = null; - FrameSet range = null; + private int updateFrame(FrameInterface frame, int exitStatus) { - try { - frameSet = new FrameSet(frameRange + ":" + stagger); - range = new FrameSet(frameRange); - } catch (Exception e) { - logger.warn("failed to stagger layer: " + layer.getName() + ", " + e); - return; + int result = getJdbcTemplate().update(UPDATE_FRAME_REASON, FrameState.WAITING.toString(), + exitStatus, frame.getFrameId(), frame.getFrameId()); + + return result; } - /* - * Find the dispatch order of the first frame we're working with and base our other staggers of - * this value. - */ - int first = getJdbcTemplate().queryForObject( - "SELECT int_dispatch_order FROM frame WHERE str_name=? AND pk_job=? AND pk_layer=?", - Integer.class, CueUtil.buildFrameName(layer, range.get(0)), layer.getJobId(), - layer.getLayerId()); + @Override + public boolean updateFrameHostDown(FrameInterface frame) { + return updateFrame(frame, Dispatcher.EXIT_STATUS_DOWN_HOST) > 0; + } + + @Override + public boolean updateFrameCleared(FrameInterface frame) { + return updateFrame(frame, Dispatcher.EXIT_STATUS_FRAME_CLEARED) > 0; + } + + private static final String UPDATE_FRAME_MEMORY_ERROR = + "UPDATE " + "frame " + "SET " + "int_exit_status = ?, " + + "int_version = int_version + 1 " + "WHERE " + "frame.pk_frame = ? "; + + @Override + public boolean updateFrameMemoryError(FrameInterface frame) { + int result = getJdbcTemplate().update(UPDATE_FRAME_MEMORY_ERROR, + Dispatcher.EXIT_STATUS_MEMORY_FAILURE, frame.getFrameId()); + + return result > 0; + } + + private static final String UPDATE_FRAME_STARTED = "UPDATE " + "frame " + "SET " + + "str_state = ?, " + "str_host = ?, " + "int_cores = ?, " + "int_mem_reserved = ?, " + + "int_gpus = ?, " + "int_gpu_mem_reserved = ?, " + "ts_updated = current_timestamp, " + + "ts_started = current_timestamp, " + "ts_stopped = null, " + + "int_version = int_version + 1 " + "WHERE " + "pk_frame = ? " + "AND " + + "str_state = ? " + "AND " + "int_version = ? " + "AND " + "frame.pk_layer IN (" + + "SELECT " + "layer.pk_layer " + "FROM " + "layer " + + "LEFT JOIN layer_limit ON layer_limit.pk_layer = layer.pk_layer " + + "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + + "LEFT JOIN (" + "SELECT " + "limit_record.pk_limit_record, " + + "SUM(layer_stat.int_running_count) AS int_sum_running " + "FROM " + "layer_limit " + + "LEFT JOIN limit_record ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + + "LEFT JOIN layer_stat ON layer_stat.pk_layer = layer_limit.pk_layer " + + "GROUP BY limit_record.pk_limit_record) AS sum_running " + + "ON limit_record.pk_limit_record = sum_running.pk_limit_record " + "WHERE " + + "sum_running.int_sum_running < limit_record.int_max_value " + + "OR sum_running.int_sum_running IS NULL " + ")"; + + private static final String UPDATE_FRAME_RETRIES = + "UPDATE " + "frame " + "SET " + "int_retries = int_retries + 1 " + "WHERE " + + "pk_frame = ? " + "AND " + "int_exit_status NOT IN (?,?,?,?,?,?,?) "; + + @Override + public void updateFrameStarted(VirtualProc proc, FrameInterface frame) { + + lockFrameForUpdate(frame, FrameState.WAITING); + + try { + int result = getJdbcTemplate().update(UPDATE_FRAME_STARTED, + FrameState.RUNNING.toString(), proc.hostName, proc.coresReserved, + proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, + frame.getFrameId(), FrameState.WAITING.toString(), frame.getVersion()); + if (result == 0) { + String error_msg = "the frame " + frame + " was updated by another thread."; + throw new FrameReservationException(error_msg); + } + } catch (DataAccessException e) { + /* + * This usually happens when the folder's max cores limit has exceeded + */ + throw new FrameReservationException(e.getCause()); + } - int size = range.size(); - for (int i = 0; i < size; i++) { - int frame = range.get(i); - int newDispatchOrder = frameSet.index(frame) + first; + /* + * Frames that were killed via nimby or hardware errors not attributed to the software do + * not increment the retry counter. Like failed launch, orphaned frame, failed kill or down + * host. + */ + try { + getJdbcTemplate().update(UPDATE_FRAME_RETRIES, frame.getFrameId(), -1, + FrameExitStatus.SKIP_RETRY_VALUE, FrameExitStatus.FAILED_LAUNCH_VALUE, + Dispatcher.EXIT_STATUS_FRAME_CLEARED, Dispatcher.EXIT_STATUS_FRAME_ORPHAN, + Dispatcher.EXIT_STATUS_FAILED_KILL, Dispatcher.EXIT_STATUS_DOWN_HOST); + } catch (DataAccessException e) { + throw new FrameReservationException(e.getCause()); + } + } + + private static final String UPDATE_FRAME_FIXED = + "UPDATE " + "frame " + "SET " + "str_state = ?," + "str_host=?, " + "int_cores=?, " + + "int_mem_reserved = ?, " + "int_gpus = ?, " + "int_gpu_mem_reserved = ?, " + + "ts_updated = current_timestamp, " + "ts_started = current_timestamp, " + + "ts_stopped = null, " + "int_version = int_version + 1 " + "WHERE " + + "pk_frame = ? " + "AND " + "str_state = 'RUNNING'"; + + @Override + public boolean updateFrameFixed(VirtualProc proc, FrameInterface frame) { + return getJdbcTemplate().update(UPDATE_FRAME_FIXED, FrameState.RUNNING.toString(), + proc.hostName, proc.coresReserved, proc.memoryReserved, proc.gpusReserved, + proc.gpuMemoryReserved, frame.getFrameId()) == 1; + } - getJdbcTemplate().update( - "UPDATE frame SET int_dispatch_order=? WHERE pk_layer=? and str_name=?", newDispatchOrder, - layer.getLayerId(), CueUtil.buildFrameName(layer, frame)); + @Override + public DispatchFrame getDispatchFrame(String uuid) { + return getJdbcTemplate().queryForObject(GET_DISPATCH_FRAME, DISPATCH_FRAME_MAPPER, uuid); } - } - @Override - public boolean isFrameComplete(FrameInterface f) { + static final RowMapper DISPATCH_FRAME_MAPPER = new RowMapper() { + public DispatchFrame mapRow(ResultSet rs, int rowNum) throws SQLException { + DispatchFrame frame = new DispatchFrame(); + frame.id = rs.getString("pk_frame"); + frame.name = rs.getString("frame_name"); + frame.layerId = rs.getString("pk_layer"); + frame.jobId = rs.getString("pk_job"); + frame.showId = rs.getString("pk_show"); + frame.facilityId = rs.getString("pk_facility"); + frame.retries = rs.getInt("int_retries"); + frame.state = FrameState.valueOf(rs.getString("frame_state")); + frame.command = rs.getString("str_cmd"); + frame.jobName = rs.getString("job_name"); + frame.layerName = rs.getString("layer_name"); + frame.chunkSize = rs.getInt("int_chunk_size"); + frame.range = rs.getString("str_range"); + frame.logDir = rs.getString("str_log_dir"); + frame.shot = rs.getString("str_shot"); + frame.show = rs.getString("show_name"); + frame.owner = rs.getString("str_user"); + int uid = rs.getInt("int_uid"); + frame.uid = rs.wasNull() ? Optional.empty() : Optional.of(uid); + frame.state = FrameState.valueOf(rs.getString("frame_state")); + frame.minCores = rs.getInt("int_cores_min"); + frame.maxCores = rs.getInt("int_cores_max"); + frame.threadable = rs.getBoolean("b_threadable"); + frame.setMinMemory(rs.getLong("int_mem_min")); + frame.minGpus = rs.getInt("int_gpus_min"); + frame.maxGpus = rs.getInt("int_gpus_max"); + frame.minGpuMemory = rs.getLong("int_gpu_mem_min"); + frame.version = rs.getInt("int_version"); + frame.services = rs.getString("str_services"); + frame.os = rs.getString("str_os"); + return frame; + } + }; + + private static final String GET_DISPATCH_FRAME = "SELECT " + "show.str_name AS show_name, " + + "job.str_name AS job_name, " + "job.pk_job," + "job.pk_show," + "job.pk_facility," + + "job.str_name," + "job.str_shot," + "job.str_user," + "job.int_uid," + + "job.str_log_dir," + "COALESCE(str_os, '') AS str_os, " + + "frame.str_name AS frame_name, " + "frame.str_state AS frame_state, " + + "frame.pk_frame, " + "frame.pk_layer, " + "frame.int_retries, " + + "frame.int_version, " + "layer.str_name AS layer_name, " + + "layer.str_type AS layer_type, " + "layer.str_cmd, " + "layer.int_cores_min," + + "layer.int_cores_max," + "layer.b_threadable," + "layer.int_mem_min, " + + "layer.int_gpus_min," + "layer.int_gpus_max," + "layer.int_gpu_mem_min, " + + "layer.str_range, " + "layer.int_chunk_size, " + "layer.str_services " + "FROM " + + "layer, " + "job, " + "show, " + + "frame LEFT JOIN proc ON (proc.pk_frame = frame.pk_frame) " + "WHERE " + + "job.pk_show = show.pk_show " + "AND " + "frame.pk_job = job.pk_job " + "AND " + + "frame.pk_layer = layer.pk_layer " + "AND " + "frame.pk_frame = ?"; + + private static final String GET_FRAME_DETAIL = + "SELECT " + "frame.*, " + "job.pk_facility," + "job.pk_show " + "FROM " + "frame," + + "layer," + "job," + "show " + "WHERE " + "frame.pk_job = job.pk_job " + "AND " + + "frame.pk_layer = layer.pk_layer " + "AND " + "job.pk_show = show.pk_show "; + + private static final String GET_MINIMAL_FRAME = "SELECT " + "frame.pk_frame," + + "frame.str_name, " + "frame.pk_job, " + "frame.pk_layer, " + "frame.str_state, " + + "frame.int_version, " + "job.pk_show, " + "job.pk_facility " + "FROM " + "frame," + + "layer," + "job," + "show " + "WHERE " + "frame.pk_job = job.pk_job " + "AND " + + "frame.pk_layer = layer.pk_layer " + "AND " + "job.pk_show = show.pk_show "; + + private static final RowMapper FRAME_MAPPER = new RowMapper() { + public FrameEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + FrameEntity frame = new FrameEntity(); + frame.id = rs.getString("pk_frame"); + frame.name = rs.getString("str_name"); + frame.jobId = rs.getString("pk_job"); + frame.layerId = rs.getString("pk_layer"); + frame.showId = rs.getString("pk_show"); + frame.facilityId = rs.getString("pk_facility"); + frame.version = rs.getInt("int_version"); + return frame; + } + }; + + private static final RowMapper FRAME_DETAIL_MAPPER = new RowMapper() { + public FrameDetail mapRow(ResultSet rs, int rowNum) throws SQLException { + FrameDetail frame = new FrameDetail(); + frame.id = rs.getString("pk_frame"); + frame.dependCount = rs.getInt("int_depend_count"); + frame.exitStatus = rs.getInt("int_exit_status"); + frame.jobId = rs.getString("pk_job"); + frame.layerId = rs.getString("pk_layer"); + frame.showId = rs.getString("pk_show"); + frame.maxRss = rs.getLong("int_mem_max_used"); + frame.name = rs.getString("str_name"); + frame.number = rs.getInt("int_number"); + frame.dispatchOrder = rs.getInt("int_dispatch_order"); + frame.retryCount = rs.getInt("int_retries"); + frame.dateStarted = rs.getTimestamp("ts_started"); + frame.dateStopped = rs.getTimestamp("ts_stopped"); + frame.dateUpdated = rs.getTimestamp("ts_updated"); + frame.dateLLU = rs.getTimestamp("ts_llu"); + frame.version = rs.getInt("int_version"); + + if (rs.getString("str_host") != null) { + frame.lastResource = String.format("%s/%d/%d", rs.getString("str_host"), + rs.getInt("int_cores"), rs.getInt("int_gpus")); + } else { + frame.lastResource = ""; + } + frame.state = FrameState.valueOf(rs.getString("str_state")); + + return frame; + } + }; + + public static final String FIND_ORPHANED_FRAMES = "SELECT " + "frame.pk_frame, " + + "frame.pk_layer, " + "frame.str_name, " + "frame.int_version, " + "job.pk_job, " + + "job.pk_show, " + "job.pk_facility " + "FROM " + "frame, " + "job " + "WHERE " + + "job.pk_job = frame.pk_job " + "AND " + "frame.str_state = 'RUNNING' " + "AND " + + "job.str_state = 'PENDING' " + "AND " + + "(SELECT COUNT(1) FROM proc WHERE proc.pk_frame = frame.pk_frame) = 0 " + "AND " + + "current_timestamp - frame.ts_updated > interval '300' second"; + + @Override + public List getOrphanedFrames() { + return getJdbcTemplate().query(FIND_ORPHANED_FRAMES, FRAME_MAPPER); + } - String state = getJdbcTemplate().queryForObject("SELECT str_state FROM frame WHERE pk_frame=?", - String.class, f.getFrameId()); + private static final String IS_ORPHAN = "SELECT " + "COUNT(1) " + "FROM " + "frame " + "WHERE " + + "frame.pk_frame = ? " + "AND " + "frame.str_state = 'RUNNING' " + "AND " + + "(SELECT COUNT(1) FROM proc WHERE proc.pk_frame = frame.pk_frame) = 0 " + "AND " + + "current_timestamp - frame.ts_updated > interval '300' second"; - if (state.equals(FrameState.SUCCEEDED.toString()) - || state.equals(FrameState.EATEN.toString())) { - return true; + @Override + public boolean isOrphan(FrameInterface frame) { + return getJdbcTemplate().queryForObject(IS_ORPHAN, Integer.class, frame.getFrameId()) == 1; } - return false; - } + private static final String INSERT_FRAME = "INSERT INTO " + "frame " + "(" + "pk_frame, " + + "pk_layer, " + "pk_job, " + "str_name, " + "str_state, " + "int_number, " + + "int_dispatch_order, " + "int_layer_order, " + "ts_updated, " + "ts_llu " + ") " + + "VALUES (?,?,?,?,?,?,?,?,current_timestamp,current_timestamp)"; - private static final RowMapper RESOURCE_USAGE_MAPPER = - new RowMapper() { - public ResourceUsage mapRow(ResultSet rs, int rowNum) throws SQLException { - return new ResourceUsage(rs.getLong("int_clock_time"), rs.getInt("int_cores"), - rs.getInt("int_gpus")); + @Override + public void insertFrames(LayerDetail layer, List frames) { + + int count = 0; + for (int frame : frames) { + getJdbcTemplate().update(INSERT_FRAME, SqlUtil.genKeyRandom(), layer.getLayerId(), + layer.getJobId(), CueUtil.buildFrameName(layer, frame), + FrameState.SETUP.toString(), frame, count, layer.dispatchOrder); + count++; } + } - }; + @Override + public List getDependentFrames(LightweightDependency depend) { + + /* + * Compound depends are handled in the DependManager. + */ + + String key = null; + StringBuilder sb = new StringBuilder(4096); + sb.append(GET_MINIMAL_FRAME); + sb.append(" AND frame.int_depend_count > 0 "); + + if (EnumSet.of(DependType.JOB_ON_JOB, DependType.JOB_ON_LAYER, DependType.JOB_ON_FRAME) + .contains(depend.type)) { + sb.append("AND job.pk_job = ?"); + key = depend.dependErJobId; + } else if (EnumSet + .of(DependType.LAYER_ON_FRAME, DependType.LAYER_ON_LAYER, DependType.LAYER_ON_JOB) + .contains(depend.type)) { + sb.append("AND layer.pk_layer = ?"); + key = depend.dependErLayerId; + } else if (EnumSet + .of(DependType.FRAME_ON_JOB, DependType.FRAME_ON_LAYER, DependType.FRAME_ON_FRAME) + .contains(depend.type)) { + sb.append("AND frame.pk_frame = ?"); + key = depend.dependErFrameId; + } else { + return new ArrayList(1); + } - @Override - public ResourceUsage getResourceUsage(FrameInterface f) { - /* - * Using current_timestamp = ts_started here because ts_stopped is not set. Stopping the frame - * allows it to be dispatched again, which could blow away the ts_stopped time. - */ - return getJdbcTemplate().queryForObject( - "SELECT " + "COALESCE(interval_to_seconds(current_timestamp - ts_started), 1) " - + "AS int_clock_time, " + "COALESCE(int_cores, 100) AS int_cores," + "int_gpus " - + "FROM " + "frame " + "WHERE " + "pk_frame = ?", - RESOURCE_USAGE_MAPPER, f.getFrameId()); - } - - private static final String UPDATE_FRAME_MEMORY_USAGE_AND_LLU_TIME = - "UPDATE " + "frame " + "SET " + "ts_updated = current_timestamp," + "int_mem_max_used = ?," - + "int_mem_used = ?," + "ts_llu = ? " + "WHERE " + "pk_frame = ? "; - - @Override - public void updateFrameMemoryUsageAndLluTime(FrameInterface f, long maxRss, long rss, - long lluTime) { - getJdbcTemplate().update(UPDATE_FRAME_MEMORY_USAGE_AND_LLU_TIME, maxRss, rss, - new Timestamp(lluTime * 1000l), f.getFrameId()); - } - - /** - * Attempt a SELECT FOR UPDATE NOWAIT on the frame record. If the frame is being modified by - * another transaction or if the version has been incremented a FrameReservationException is - * thrown. - * - * @param frame - * @param state - */ - @Override - public void lockFrameForUpdate(FrameInterface frame, FrameState state) { - try { - getJdbcTemplate().queryForObject( - "SELECT pk_frame FROM frame WHERE pk_frame=? AND " - + "str_state=? AND int_version =? FOR UPDATE NOWAIT", - String.class, frame.getFrameId(), state.toString(), frame.getVersion()); - } catch (Exception e) { - String error_msg = "the frame " + frame + " was updated by another thread."; - throw new FrameReservationException(error_msg, e); - } - } - - @Override - public boolean updateFrameCheckpointState(FrameInterface frame, CheckpointState state) { - - logger.info("Setting checkpoint state to: " + state.toString()); - - boolean result = false; - - if (state.equals(CheckpointState.COMPLETE)) { - /* - * Only update the checkpoint state to complete if the state is either Copying or Enabled. - */ - result = getJdbcTemplate().update( - "UPDATE frame SET str_checkpoint_state=?, " - + "int_checkpoint_count=int_checkpoint_count + 1 WHERE " - + "pk_frame=? AND str_checkpoint_state IN (?, ?)", - CheckpointState.COMPLETE.toString(), frame.getFrameId(), - CheckpointState.COPYING.toString(), CheckpointState.ENABLED.toString()) == 1; - } else { - result = getJdbcTemplate().update("UPDATE frame SET str_checkpoint_state=? WHERE pk_frame=?", - state.toString(), frame.getFrameId()) == 1; - } - - /* - * If the checkpoint state is complete or disabled then set the frame state back to waiting, if - * and only if the frame state is currently in the checkpoint state. + return getJdbcTemplate().query(sb.toString(), FRAME_MAPPER, new Object[] {key}); + } + + @Override + public FrameInterface findFrame(LayerInterface layer, int number) { + return getJdbcTemplate().queryForObject( + GET_MINIMAL_FRAME + " AND frame.pk_layer=? AND int_number=?", FRAME_MAPPER, + layer.getLayerId(), number); + } + + @Override + public FrameDetail getFrameDetail(FrameInterface frame) { + return getJdbcTemplate().queryForObject(GET_FRAME_DETAIL + " AND pk_frame=?", + FRAME_DETAIL_MAPPER, frame.getFrameId()); + } + + @Override + public FrameDetail getFrameDetail(String id) { + return getJdbcTemplate().queryForObject(GET_FRAME_DETAIL + " AND pk_frame=?", + FRAME_DETAIL_MAPPER, id); + } + + @Override + public FrameDetail findFrameDetail(JobInterface job, String name) { + // Uses C_FRAME_STR_NAME_UNQ + return getJdbcTemplate().queryForObject( + GET_FRAME_DETAIL + " AND frame.str_name=? AND frame.pk_job=?", FRAME_DETAIL_MAPPER, + name, job.getJobId()); + } + + @Override + public List findFrameDetails(FrameSearchInterface r) { + return getJdbcTemplate().query(r.getFilteredQuery(GET_FRAME_DETAIL), FRAME_DETAIL_MAPPER, + r.getValuesArray()); + } + + @Override + public List findFrames(FrameSearchInterface r) { + return getJdbcTemplate().query(r.getFilteredQuery(GET_MINIMAL_FRAME), FRAME_MAPPER, + r.getValuesArray()); + } + + private static final String FIND_LONGEST_FRAME = "SELECT " + "pk_frame " + "FROM " + "frame, " + + "layer " + "WHERE " + "frame.pk_layer = layer.pk_layer " + "AND " + + "frame.pk_job = ? " + "AND " + "str_state=? " + "AND " + "layer.str_type=? " + + "ORDER BY " + "ts_stopped - ts_started DESC " + "LIMIT 1"; + + @Override + public FrameDetail findLongestFrame(JobInterface job) { + String pk_frame = getJdbcTemplate().queryForObject(FIND_LONGEST_FRAME, String.class, + job.getJobId(), FrameState.SUCCEEDED.toString(), LayerType.RENDER.toString()); + return getFrameDetail(pk_frame); + } + + private static final String FIND_SHORTEST_FRAME = "SELECT " + "pk_frame " + "FROM " + "frame, " + + "layer " + "WHERE " + "frame.pk_layer = layer.pk_layer " + "AND " + + "frame.pk_job = ? " + "AND " + "frame.str_state = ? " + "AND " + "layer.str_type = ? " + + "ORDER BY " + "ts_stopped - ts_started ASC " + "LIMIT 1"; + + @Override + public FrameDetail findShortestFrame(JobInterface job) { + String pk_frame = getJdbcTemplate().queryForObject(FIND_SHORTEST_FRAME, String.class, + job.getJobId(), FrameState.SUCCEEDED.toString(), LayerType.RENDER.toString()); + return getFrameDetail(pk_frame); + } + + @Override + public FrameInterface getFrame(String id) { + return getJdbcTemplate().queryForObject(GET_MINIMAL_FRAME + " AND frame.pk_frame=?", + FRAME_MAPPER, id); + } + + @Override + public FrameInterface findFrame(JobInterface job, String name) { + // Uses C_FRAME_STR_NAME_UNQ + return getJdbcTemplate().queryForObject( + GET_MINIMAL_FRAME + " AND frame.str_name=? AND frame.pk_job=?", FRAME_MAPPER, name, + job.getJobId()); + } + + @Override + public void checkRetries(FrameInterface frame) { + int max_retries = getJdbcTemplate().queryForObject( + "SELECT int_max_retries FROM job WHERE pk_job=?", Integer.class, frame.getJobId()); + + if (getJdbcTemplate().queryForObject("SELECT int_retries FROM frame WHERE pk_frame=?", + Integer.class, frame.getFrameId()) >= max_retries) { + getJdbcTemplate().update("UPDATE frame SET str_state=? WHERE pk_frame=?", + FrameState.DEAD.toString(), frame.getFrameId()); + } + } + + private static final String UPDATE_FRAME_STATE = + "UPDATE " + "frame " + "SET " + "str_state = ?, " + "ts_updated = current_timestamp, " + + "int_version = int_version + 1 " + "WHERE " + "pk_frame = ? " + "AND " + + "int_version = ? "; + + @Override + public boolean updateFrameState(FrameInterface frame, FrameState state) { + if (getJdbcTemplate().update(UPDATE_FRAME_STATE, state.toString(), frame.getFrameId(), + frame.getVersion()) == 1) { + logger.info("The frame " + frame + " state changed to " + state.toString()); + return true; + } + logger.info("Failed to change the frame " + frame + " state to " + state.toString()); + return false; + } + + private static final String MARK_AS_WAITING = "UPDATE " + "frame " + "SET " + "str_state=?, " + + "ts_updated = current_timestamp, " + "ts_llu = current_timestamp, " + + "int_depend_count = 0, " + "int_version = int_version + 1 " + "WHERE " + + "pk_frame = ? " + "AND " + "int_version = ? " + "AND " + "str_state = ? "; + + @Override + public void markFrameAsWaiting(FrameInterface frame) { + getJdbcTemplate().update(MARK_AS_WAITING, FrameState.WAITING.toString(), frame.getFrameId(), + frame.getVersion(), FrameState.DEPEND.toString()); + } + + private static final String MARK_AS_DEPEND = "UPDATE " + "frame " + "SET " + "str_state=?, " + + "int_depend_count = ?, " + "ts_updated = current_timestamp, " + + "int_version = int_version + 1 " + "WHERE " + "pk_frame = ? " + "AND " + + "int_version = ? " + "AND " + "str_state = ? "; + + private static final String GET_FRAME_DEPEND_COUNT = + "SELECT " + "COUNT(1) " + "FROM " + "depend " + "WHERE " + "( " + + "(pk_job_depend_er = ? AND str_type LIKE 'JOB#_ON%' ESCAPE '#') " + "OR " + + "pk_layer_depend_er = ? " + "OR " + "pk_frame_depend_er = ? " + ") " + "AND " + + "depend.b_active = true " + "AND " + "depend.b_composite = false "; + + public void markFrameAsDepend(FrameInterface frame) { + // We need to full depend count in this case to reset the + // frames's depend count accurately. + int depend_count = getJdbcTemplate().queryForObject(GET_FRAME_DEPEND_COUNT, Integer.class, + frame.getJobId(), frame.getLayerId(), frame.getFrameId()); + + if (depend_count > 0) { + getJdbcTemplate().update(MARK_AS_DEPEND, FrameState.DEPEND.toString(), depend_count, + frame.getFrameId(), frame.getVersion(), FrameState.WAITING.toString()); + } + } + + private static final String FIND_HIGHEST_MEM_FRAME = + "SELECT " + "pk_frame " + "FROM " + "frame " + "WHERE " + "pk_job = ? " + "AND " + + "str_state = ? " + "ORDER BY " + "int_mem_max_used DESC " + "LIMIT 1"; + + @Override + public FrameDetail findHighestMemoryFrame(JobInterface job) { + String pk_frame = getJdbcTemplate().queryForObject(FIND_HIGHEST_MEM_FRAME, String.class, + job.getJobId(), FrameState.SUCCEEDED.toString()); + return getFrameDetail(pk_frame); + } + + private static final String FIND_LOWEST_MEM_FRAME = + "SELECT " + "pk_frame " + "FROM " + "frame " + "WHERE " + "pk_job = ? " + "AND " + + "str_state = ? " + "ORDER BY " + "int_mem_max_used ASC " + "LIMIT 1"; + + @Override + public FrameDetail findLowestMemoryFrame(JobInterface job) { + String pk_frame = getJdbcTemplate().queryForObject(FIND_LOWEST_MEM_FRAME, String.class, + job.getJobId(), FrameState.SUCCEEDED.toString()); + return getFrameDetail(pk_frame); + } + + @Override + public void reorderFramesFirst(LayerInterface layer, FrameSet frameSet) { + int start; + int size = frameSet.size(); + int min = getJdbcTemplate().queryForObject( + "SELECT MIN(int_dispatch_order) FROM frame WHERE pk_layer=?", Integer.class, + layer.getLayerId()); + + start = min - size; + for (int frameIdx = 0; frameIdx < size; frameIdx++) { + getJdbcTemplate().update( + "UPDATE frame SET int_dispatch_order=? WHERE str_name=? AND pk_job=?", start, + CueUtil.buildFrameName(layer, frameSet.get(frameIdx)), layer.getJobId()); + + logger.info("reordering " + CueUtil.buildFrameName(layer, frameSet.get(frameIdx)) + + " to " + start); + start++; + } + } + + @Override + public void reorderFramesLast(LayerInterface layer, FrameSet frameSet) { + int start; + int size = frameSet.size(); + List frames = new ArrayList<>(size); + int max = getJdbcTemplate().queryForObject( + "SELECT MAX(int_dispatch_order) FROM frame WHERE pk_layer=?", Integer.class, + layer.getLayerId()); + + start = max + 1; + for (int i = 0; i <= size; i++) { + frames.add( + new Object[] {start + i, CueUtil.buildFrameName(layer, i), layer.getJobId()}); + } + + if (frames.size() > 0) { + getJdbcTemplate().batchUpdate( + "UPDATE frame SET int_dispatch_order=? WHERE str_name=? AND pk_job=?", frames); + } + } + + @Override + public void reorderLayerReverse(LayerInterface layer, FrameSet frameSet) { + + int size = frameSet.size(); + List frames = new ArrayList<>(size); + + for (int i = 0; i < size; i++) { + if (i >= size - i - 1) { + break; + } + try { + int a = getJdbcTemplate().queryForObject( + "SELECT int_dispatch_order FROM frame WHERE str_name=? AND pk_job=? AND pk_layer=?", + Integer.class, CueUtil.buildFrameName(layer, frameSet.get(i)), + layer.getJobId(), layer.getLayerId()); + + int b = getJdbcTemplate().queryForObject( + "SELECT int_dispatch_order FROM frame WHERE str_name=? AND pk_job=? AND pk_layer=?", + Integer.class, CueUtil.buildFrameName(layer, frameSet.get(size - i - 1)), + layer.getJobId(), layer.getLayerId()); + + frames.add(new Object[] {a, layer.getLayerId(), + CueUtil.buildFrameName(layer, frameSet.get(size - i - 1))}); + frames.add(new Object[] {b, layer.getLayerId(), + CueUtil.buildFrameName(layer, frameSet.get(i))}); + + } catch (Exception e) { + logger.info("frame not found while attempting to reverse layer, skipping"); + } + } + + if (frames.size() > 0) { + getJdbcTemplate().batchUpdate( + "UPDATE frame SET int_dispatch_order=? WHERE pk_layer=? and str_name=?", + frames); + } + } + + @Override + public void staggerLayer(LayerInterface layer, String frameRange, int stagger) { + + /* + * If the layer is only 1 frame we don't stagger it. + */ + if (getJdbcTemplate().queryForObject( + "SELECT int_total_count FROM layer_stat WHERE pk_layer=?", Integer.class, + layer.getLayerId()) == 1) { + return; + } + + logger.info("staggering: " + layer.getName() + " range: " + frameRange + " on " + stagger); + + FrameSet frameSet = null; + FrameSet range = null; + + try { + frameSet = new FrameSet(frameRange + ":" + stagger); + range = new FrameSet(frameRange); + } catch (Exception e) { + logger.warn("failed to stagger layer: " + layer.getName() + ", " + e); + return; + } + + /* + * Find the dispatch order of the first frame we're working with and base our other staggers + * of this value. + */ + int first = getJdbcTemplate().queryForObject( + "SELECT int_dispatch_order FROM frame WHERE str_name=? AND pk_job=? AND pk_layer=?", + Integer.class, CueUtil.buildFrameName(layer, range.get(0)), layer.getJobId(), + layer.getLayerId()); + + int size = range.size(); + for (int i = 0; i < size; i++) { + int frame = range.get(i); + int newDispatchOrder = frameSet.index(frame) + first; + + getJdbcTemplate().update( + "UPDATE frame SET int_dispatch_order=? WHERE pk_layer=? and str_name=?", + newDispatchOrder, layer.getLayerId(), CueUtil.buildFrameName(layer, frame)); + } + } + + @Override + public boolean isFrameComplete(FrameInterface f) { + + String state = getJdbcTemplate().queryForObject( + "SELECT str_state FROM frame WHERE pk_frame=?", String.class, f.getFrameId()); + + if (state.equals(FrameState.SUCCEEDED.toString()) + || state.equals(FrameState.EATEN.toString())) { + return true; + } + + return false; + } + + private static final RowMapper RESOURCE_USAGE_MAPPER = + new RowMapper() { + public ResourceUsage mapRow(ResultSet rs, int rowNum) throws SQLException { + return new ResourceUsage(rs.getLong("int_clock_time"), rs.getInt("int_cores"), + rs.getInt("int_gpus")); + } + + }; + + @Override + public ResourceUsage getResourceUsage(FrameInterface f) { + /* + * Using current_timestamp = ts_started here because ts_stopped is not set. Stopping the + * frame allows it to be dispatched again, which could blow away the ts_stopped time. + */ + return getJdbcTemplate().queryForObject( + "SELECT " + "COALESCE(interval_to_seconds(current_timestamp - ts_started), 1) " + + "AS int_clock_time, " + "COALESCE(int_cores, 100) AS int_cores," + + "int_gpus " + "FROM " + "frame " + "WHERE " + "pk_frame = ?", + RESOURCE_USAGE_MAPPER, f.getFrameId()); + } + + private static final String UPDATE_FRAME_MEMORY_USAGE_AND_LLU_TIME = "UPDATE " + "frame " + + "SET " + "ts_updated = current_timestamp," + "int_mem_max_used = ?," + + "int_mem_used = ?," + "ts_llu = ? " + "WHERE " + "pk_frame = ? "; + + @Override + public void updateFrameMemoryUsageAndLluTime(FrameInterface f, long maxRss, long rss, + long lluTime) { + getJdbcTemplate().update(UPDATE_FRAME_MEMORY_USAGE_AND_LLU_TIME, maxRss, rss, + new Timestamp(lluTime * 1000l), f.getFrameId()); + } + + /** + * Attempt a SELECT FOR UPDATE NOWAIT on the frame record. If the frame is being modified by + * another transaction or if the version has been incremented a FrameReservationException is + * thrown. + * + * @param frame + * @param state */ - if ((state.equals(CheckpointState.DISABLED)) - || state.equals(CheckpointState.COMPLETE) && result) { - getJdbcTemplate().update("UPDATE frame SET str_state=? WHERE pk_frame=? AND str_state=?", - FrameState.WAITING.toString(), frame.getFrameId(), FrameState.CHECKPOINT.toString()); - } - - return result; - } - - @Override - public List getStaleCheckpoints(int cutoffTimeSec) { - return getJdbcTemplate().query( - GET_MINIMAL_FRAME + " AND job.str_state=? " + " AND frame.str_state=? " - + " AND current_timestamp - frame.ts_stopped > interval '" + cutoffTimeSec + "' second", - FRAME_MAPPER, JobState.PENDING.toString(), FrameState.CHECKPOINT.toString()); - } - - private static final String CREATE_FRAME_STATE_OVERRIDE = - "INSERT INTO frame_state_display_overrides (" + "pk_frame_override," + "pk_frame," - + "str_frame_state," + "str_override_text," + "str_rgb" + ") " + "VALUES (?,?,?,?,?)"; - - @Override - public void setFrameStateDisplayOverride(String frameId, FrameStateDisplayOverride override) { - getJdbcTemplate().update(CREATE_FRAME_STATE_OVERRIDE, SqlUtil.genKeyRandom(), frameId, - override.getState().toString(), override.getText(), - Integer.toString(override.getColor().getRed()) + "," - + Integer.toString(override.getColor().getGreen()) + "," - + Integer.toString(override.getColor().getBlue())); - } - - private static final String GET_FRAME_STATE_OVERRIDE = - "SELECT * from frame_state_display_overrides WHERE pk_frame = ?"; - - private static final RowMapper OVERRIDE_MAPPER = - new RowMapper() { - public FrameStateDisplayOverride mapRow(ResultSet rs, int rowNum) throws SQLException { - String[] rgb = rs.getString("str_rgb").split(","); - return FrameStateDisplayOverride.newBuilder() - .setState(FrameState.valueOf(rs.getString("str_frame_state"))) - .setText(rs.getString("str_override_text")) - .setColor(FrameStateDisplayOverride.RGB.newBuilder().setRed(Integer.parseInt(rgb[0])) - .setGreen(Integer.parseInt(rgb[1])).setBlue(Integer.parseInt(rgb[2])).build()) - .build(); + @Override + public void lockFrameForUpdate(FrameInterface frame, FrameState state) { + try { + getJdbcTemplate().queryForObject( + "SELECT pk_frame FROM frame WHERE pk_frame=? AND " + + "str_state=? AND int_version =? FOR UPDATE NOWAIT", + String.class, frame.getFrameId(), state.toString(), frame.getVersion()); + } catch (Exception e) { + String error_msg = "the frame " + frame + " was updated by another thread."; + throw new FrameReservationException(error_msg, e); } - }; - - @Override - public FrameStateDisplayOverrideSeq getFrameStateDisplayOverrides(String frameId) { - List overrides = - getJdbcTemplate().query(GET_FRAME_STATE_OVERRIDE, OVERRIDE_MAPPER, frameId); - return FrameStateDisplayOverrideSeq.newBuilder().addAllOverrides(overrides).build(); - } - - private static final String UPDATE_FRAME_STATE_OVERRIDE = - "UPDATE " + "frame_state_display_overrides " + "SET " + "str_override_text = ?," - + "str_rgb = ? " + "WHERE " + "pk_frame = ? " + "AND " + "str_frame_state = ?"; - - @Override - public void updateFrameStateDisplayOverride(String frameId, FrameStateDisplayOverride override) { - getJdbcTemplate().update(UPDATE_FRAME_STATE_OVERRIDE, override.getText(), - Integer.toString(override.getColor().getRed()) + "," - + Integer.toString(override.getColor().getGreen()) + "," - + Integer.toString(override.getColor().getBlue()), - frameId, override.getState().toString()); - } + } + + @Override + public boolean updateFrameCheckpointState(FrameInterface frame, CheckpointState state) { + + logger.info("Setting checkpoint state to: " + state.toString()); + + boolean result = false; + + if (state.equals(CheckpointState.COMPLETE)) { + /* + * Only update the checkpoint state to complete if the state is either Copying or + * Enabled. + */ + result = getJdbcTemplate().update( + "UPDATE frame SET str_checkpoint_state=?, " + + "int_checkpoint_count=int_checkpoint_count + 1 WHERE " + + "pk_frame=? AND str_checkpoint_state IN (?, ?)", + CheckpointState.COMPLETE.toString(), frame.getFrameId(), + CheckpointState.COPYING.toString(), CheckpointState.ENABLED.toString()) == 1; + } else { + result = getJdbcTemplate().update( + "UPDATE frame SET str_checkpoint_state=? WHERE pk_frame=?", state.toString(), + frame.getFrameId()) == 1; + } + + /* + * If the checkpoint state is complete or disabled then set the frame state back to waiting, + * if and only if the frame state is currently in the checkpoint state. + */ + if ((state.equals(CheckpointState.DISABLED)) + || state.equals(CheckpointState.COMPLETE) && result) { + getJdbcTemplate().update( + "UPDATE frame SET str_state=? WHERE pk_frame=? AND str_state=?", + FrameState.WAITING.toString(), frame.getFrameId(), + FrameState.CHECKPOINT.toString()); + } + + return result; + } + + @Override + public List getStaleCheckpoints(int cutoffTimeSec) { + return getJdbcTemplate().query( + GET_MINIMAL_FRAME + " AND job.str_state=? " + " AND frame.str_state=? " + + " AND current_timestamp - frame.ts_stopped > interval '" + cutoffTimeSec + + "' second", + FRAME_MAPPER, JobState.PENDING.toString(), FrameState.CHECKPOINT.toString()); + } + + private static final String CREATE_FRAME_STATE_OVERRIDE = + "INSERT INTO frame_state_display_overrides (" + "pk_frame_override," + "pk_frame," + + "str_frame_state," + "str_override_text," + "str_rgb" + ") " + + "VALUES (?,?,?,?,?)"; + + @Override + public void setFrameStateDisplayOverride(String frameId, FrameStateDisplayOverride override) { + getJdbcTemplate().update(CREATE_FRAME_STATE_OVERRIDE, SqlUtil.genKeyRandom(), frameId, + override.getState().toString(), override.getText(), + Integer.toString(override.getColor().getRed()) + "," + + Integer.toString(override.getColor().getGreen()) + "," + + Integer.toString(override.getColor().getBlue())); + } + + private static final String GET_FRAME_STATE_OVERRIDE = + "SELECT * from frame_state_display_overrides WHERE pk_frame = ?"; + + private static final RowMapper OVERRIDE_MAPPER = + new RowMapper() { + public FrameStateDisplayOverride mapRow(ResultSet rs, int rowNum) + throws SQLException { + String[] rgb = rs.getString("str_rgb").split(","); + return FrameStateDisplayOverride.newBuilder() + .setState(FrameState.valueOf(rs.getString("str_frame_state"))) + .setText(rs.getString("str_override_text")) + .setColor(FrameStateDisplayOverride.RGB.newBuilder() + .setRed(Integer.parseInt(rgb[0])) + .setGreen(Integer.parseInt(rgb[1])) + .setBlue(Integer.parseInt(rgb[2])).build()) + .build(); + } + }; + + @Override + public FrameStateDisplayOverrideSeq getFrameStateDisplayOverrides(String frameId) { + List overrides = + getJdbcTemplate().query(GET_FRAME_STATE_OVERRIDE, OVERRIDE_MAPPER, frameId); + return FrameStateDisplayOverrideSeq.newBuilder().addAllOverrides(overrides).build(); + } + + private static final String UPDATE_FRAME_STATE_OVERRIDE = + "UPDATE " + "frame_state_display_overrides " + "SET " + "str_override_text = ?," + + "str_rgb = ? " + "WHERE " + "pk_frame = ? " + "AND " + "str_frame_state = ?"; + + @Override + public void updateFrameStateDisplayOverride(String frameId, + FrameStateDisplayOverride override) { + getJdbcTemplate().update(UPDATE_FRAME_STATE_OVERRIDE, override.getText(), + Integer.toString(override.getColor().getRed()) + "," + + Integer.toString(override.getColor().getGreen()) + "," + + Integer.toString(override.getColor().getBlue()), + frameId, override.getState().toString()); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/GroupDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/GroupDaoJdbc.java index 7b40cb7a7..499eaf7db 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/GroupDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/GroupDaoJdbc.java @@ -42,370 +42,379 @@ public class GroupDaoJdbc extends JdbcDaoSupport implements GroupDao { - private static final int MAX_NESTING_LEVEL = 10; + private static final int MAX_NESTING_LEVEL = 10; - @Override - public String getRootGroupId(ShowInterface show) { - return getJdbcTemplate().queryForObject( - "SELECT pk_folder FROM folder WHERE pk_show=? AND pk_parent_folder IS NULL", String.class, - show.getShowId()); - } + @Override + public String getRootGroupId(ShowInterface show) { + return getJdbcTemplate().queryForObject( + "SELECT pk_folder FROM folder WHERE pk_show=? AND pk_parent_folder IS NULL", + String.class, show.getShowId()); + } - @Override - public void deleteGroup(GroupInterface group) { + @Override + public void deleteGroup(GroupInterface group) { - if (childGroupCount(group) > 0) { - throw new EntityRemovalError( - "failed to delete group " + group.getName() + ", still has sub groups"); - } + if (childGroupCount(group) > 0) { + throw new EntityRemovalError( + "failed to delete group " + group.getName() + ", still has sub groups"); + } - if (childJobCount(group) > 0) { - throw new EntityRemovalError( - "failed to delete group " + group.getName() + ", still has sub jobs"); + if (childJobCount(group) > 0) { + throw new EntityRemovalError( + "failed to delete group " + group.getName() + ", still has sub jobs"); + } + + // reparent all jobs to root group + getJdbcTemplate().update("UPDATE job SET pk_folder=? WHERE pk_folder=?", + getRootGroupId(group), group.getId()); + + getJdbcTemplate().update( + "DELETE FROM folder WHERE pk_parent_folder IS NOT NULL AND pk_folder=?", + group.getId()); } - // reparent all jobs to root group - getJdbcTemplate().update("UPDATE job SET pk_folder=? WHERE pk_folder=?", getRootGroupId(group), - group.getId()); - - getJdbcTemplate().update( - "DELETE FROM folder WHERE pk_parent_folder IS NOT NULL AND pk_folder=?", group.getId()); - } - - public static final String INSERT_GROUP = "INSERT INTO " + "folder " + "( " + "pk_folder," - + "pk_parent_folder," + "pk_show, " + "pk_dept," + "str_name " + ") " + "VALUES (?,?,?,?,?)"; - - @Override - public void insertGroup(GroupDetail group) { - group.id = SqlUtil.genKeyRandom(); - String parentId = group.parentId; - try { - getJdbcTemplate().update(INSERT_GROUP, group.id, parentId, group.showId, group.deptId, - group.name); - } catch (Exception e) { - throw new EntityCreationError("error creating group, " + e); + public static final String INSERT_GROUP = + "INSERT INTO " + "folder " + "( " + "pk_folder," + "pk_parent_folder," + "pk_show, " + + "pk_dept," + "str_name " + ") " + "VALUES (?,?,?,?,?)"; + + @Override + public void insertGroup(GroupDetail group) { + group.id = SqlUtil.genKeyRandom(); + String parentId = group.parentId; + try { + getJdbcTemplate().update(INSERT_GROUP, group.id, parentId, group.showId, group.deptId, + group.name); + } catch (Exception e) { + throw new EntityCreationError("error creating group, " + e); + } } - } - @Override - public void insertGroup(GroupDetail group, GroupInterface parent) { - if (parent != null) { - group.parentId = parent.getGroupId(); + @Override + public void insertGroup(GroupDetail group, GroupInterface parent) { + if (parent != null) { + group.parentId = parent.getGroupId(); + } + insertGroup(group); } - insertGroup(group); - } - @Override - public void updateGroupParent(GroupInterface group, GroupInterface dest) { + @Override + public void updateGroupParent(GroupInterface group, GroupInterface dest) { + + if (group.getGroupId().equals(dest.getGroupId())) { + throw new EntityModificationError( + "error moving group, " + "cannot move group into itself"); + } + + if (!group.getShowId().equals(dest.getShowId())) { + throw new EntityModificationError( + "error moving group, " + "cannot move groups between shows"); + } + + int recurse = 0; + String destParent = dest.getGroupId(); + while (true) { + destParent = getJdbcTemplate().queryForObject( + "SELECT pk_parent_folder FROM folder WHERE pk_folder=?", String.class, + destParent); + if (destParent == null) { + break; + } + if (destParent.equals(group.getGroupId())) { + throw new EntityModificationError("error moving group, you cannot move a group " + + "into one of its sub groups"); + } + recurse++; + if (recurse > MAX_NESTING_LEVEL) { + throw new EntityModificationError("error moving group, cannot tell " + + "if your moving a group into one of its sub groups"); + } + } + + int result = getJdbcTemplate().update( + "UPDATE folder SET pk_parent_folder=? WHERE pk_folder=? AND pk_parent_folder IS NOT NULL", + dest.getId(), group.getId()); - if (group.getGroupId().equals(dest.getGroupId())) { - throw new EntityModificationError("error moving group, " + "cannot move group into itself"); + recurseParentChange(group.getId(), dest.getId()); + if (result == 0) { + throw new EntityModificationError("error moving group, " + group.getName() + + ", the group does not exist or its the top level group"); + } } - if (!group.getShowId().equals(dest.getShowId())) { - throw new EntityModificationError( - "error moving group, " + "cannot move groups between shows"); + @Override + public void updateName(GroupInterface group, String value) { + getJdbcTemplate().update("UPDATE folder SET str_name=? WHERE pk_folder=?", value, + group.getId()); } - int recurse = 0; - String destParent = dest.getGroupId(); - while (true) { - destParent = getJdbcTemplate().queryForObject( - "SELECT pk_parent_folder FROM folder WHERE pk_folder=?", String.class, destParent); - if (destParent == null) { - break; - } - if (destParent.equals(group.getGroupId())) { - throw new EntityModificationError( - "error moving group, you cannot move a group " + "into one of its sub groups"); - } - recurse++; - if (recurse > MAX_NESTING_LEVEL) { - throw new EntityModificationError("error moving group, cannot tell " - + "if your moving a group into one of its sub groups"); - } + @Override + public void updateDepartment(GroupInterface group, DepartmentInterface dept) { + getJdbcTemplate().update("UPDATE folder SET pk_dept=? WHERE pk_folder=?", + dept.getDepartmentId(), group.getId()); } - int result = getJdbcTemplate().update( - "UPDATE folder SET pk_parent_folder=? WHERE pk_folder=? AND pk_parent_folder IS NOT NULL", - dest.getId(), group.getId()); + @Override + public void updateDefaultJobMaxCores(GroupInterface group, int value) { + if (value <= 0) { + value = CueUtil.FEATURE_DISABLED; + } + if (value < CueUtil.ONE_CORE && value != CueUtil.FEATURE_DISABLED) { + String msg = "The default max cores for a job must " + "be greater than a single core"; + throw new IllegalArgumentException(msg); + } + getJdbcTemplate().update("UPDATE folder SET int_job_max_cores=? WHERE pk_folder=?", value, + group.getId()); + } - recurseParentChange(group.getId(), dest.getId()); - if (result == 0) { - throw new EntityModificationError("error moving group, " + group.getName() - + ", the group does not exist or its the top level group"); + @Override + public void updateDefaultJobMinCores(GroupInterface group, int value) { + if (value <= 0) { + value = CueUtil.FEATURE_DISABLED; + } + if (value < CueUtil.ONE_CORE && value != CueUtil.FEATURE_DISABLED) { + String msg = "The default min cores for a job must " + "be greater than a single core"; + throw new IllegalArgumentException(msg); + } + getJdbcTemplate().update("UPDATE folder SET int_job_min_cores=? WHERE pk_folder=?", value, + group.getId()); } - } - - @Override - public void updateName(GroupInterface group, String value) { - getJdbcTemplate().update("UPDATE folder SET str_name=? WHERE pk_folder=?", value, - group.getId()); - } - - @Override - public void updateDepartment(GroupInterface group, DepartmentInterface dept) { - getJdbcTemplate().update("UPDATE folder SET pk_dept=? WHERE pk_folder=?", - dept.getDepartmentId(), group.getId()); - } - - @Override - public void updateDefaultJobMaxCores(GroupInterface group, int value) { - if (value <= 0) { - value = CueUtil.FEATURE_DISABLED; + + @Override + public void updateMaxCores(GroupInterface group, int value) { + if (value < 0) { + value = CueUtil.FEATURE_DISABLED; + } + if (value < CueUtil.ONE_CORE && value != CueUtil.FEATURE_DISABLED) { + String msg = "The group max cores feature must " + + "be a whole core or greater, pass in: " + value; + throw new IllegalArgumentException(msg); + } + + getJdbcTemplate().update("UPDATE folder_resource SET int_max_cores=? WHERE pk_folder=?", + value, group.getId()); } - if (value < CueUtil.ONE_CORE && value != CueUtil.FEATURE_DISABLED) { - String msg = "The default max cores for a job must " + "be greater than a single core"; - throw new IllegalArgumentException(msg); + + @Override + public void updateMinCores(GroupInterface group, int value) { + if (value < 0) { + value = 0; + } + getJdbcTemplate().update("UPDATE folder_resource SET int_min_cores=? WHERE pk_folder=?", + value, group.getId()); } - getJdbcTemplate().update("UPDATE folder SET int_job_max_cores=? WHERE pk_folder=?", value, - group.getId()); - } - - @Override - public void updateDefaultJobMinCores(GroupInterface group, int value) { - if (value <= 0) { - value = CueUtil.FEATURE_DISABLED; + + private static final String IS_OVER_MIN_CORES = "SELECT " + "COUNT(1) " + "FROM " + "job," + + "folder_resource fr " + "WHERE " + "job.pk_folder = fr.pk_folder " + "AND " + + "fr.int_cores > fr.int_min_cores " + "AND " + "job.pk_job = ?"; + + @Override + public boolean isOverMinCores(JobInterface job) { + return getJdbcTemplate().queryForObject(IS_OVER_MIN_CORES, Integer.class, + job.getJobId()) > 0; } - if (value < CueUtil.ONE_CORE && value != CueUtil.FEATURE_DISABLED) { - String msg = "The default min cores for a job must " + "be greater than a single core"; - throw new IllegalArgumentException(msg); + + @Override + public void updateDefaultJobMaxGpus(GroupInterface group, int value) { + if (value <= 0) { + value = CueUtil.FEATURE_DISABLED; + } + getJdbcTemplate().update("UPDATE folder SET int_job_max_gpus=? WHERE pk_folder=?", value, + group.getId()); } - getJdbcTemplate().update("UPDATE folder SET int_job_min_cores=? WHERE pk_folder=?", value, - group.getId()); - } - - @Override - public void updateMaxCores(GroupInterface group, int value) { - if (value < 0) { - value = CueUtil.FEATURE_DISABLED; + + @Override + public void updateDefaultJobMinGpus(GroupInterface group, int value) { + if (value <= 0) { + value = CueUtil.FEATURE_DISABLED; + } + getJdbcTemplate().update("UPDATE folder SET int_job_min_gpus=? WHERE pk_folder=?", value, + group.getId()); } - if (value < CueUtil.ONE_CORE && value != CueUtil.FEATURE_DISABLED) { - String msg = - "The group max cores feature must " + "be a whole core or greater, pass in: " + value; - throw new IllegalArgumentException(msg); + + @Override + public void updateMaxGpus(GroupInterface group, int value) { + if (value < 0) { + value = CueUtil.FEATURE_DISABLED; + } + + getJdbcTemplate().update("UPDATE folder_resource SET int_max_gpus=? WHERE pk_folder=?", + value, group.getId()); } - getJdbcTemplate().update("UPDATE folder_resource SET int_max_cores=? WHERE pk_folder=?", value, - group.getId()); - } + @Override + public void updateMinGpus(GroupInterface group, int value) { + if (value < 0) { + value = 0; + } + getJdbcTemplate().update("UPDATE folder_resource SET int_min_gpus=? WHERE pk_folder=?", + value, group.getId()); + } - @Override - public void updateMinCores(GroupInterface group, int value) { - if (value < 0) { - value = 0; + @Override + public void updateDefaultJobPriority(GroupInterface group, int value) { + if (value < 0) { + value = CueUtil.FEATURE_DISABLED; + } + getJdbcTemplate().update("UPDATE folder SET int_job_priority=? WHERE pk_folder=?", value, + group.getId()); + if (value != CueUtil.FEATURE_DISABLED) { + getJdbcTemplate().update("UPDATE job_resource SET int_priority=? WHERE pk_job IN (" + + "SELECT pk_job from job WHERE pk_folder=?)", value, group.getId()); + } } - getJdbcTemplate().update("UPDATE folder_resource SET int_min_cores=? WHERE pk_folder=?", value, - group.getId()); - } - - private static final String IS_OVER_MIN_CORES = "SELECT " + "COUNT(1) " + "FROM " + "job," - + "folder_resource fr " + "WHERE " + "job.pk_folder = fr.pk_folder " + "AND " - + "fr.int_cores > fr.int_min_cores " + "AND " + "job.pk_job = ?"; - - @Override - public boolean isOverMinCores(JobInterface job) { - return getJdbcTemplate().queryForObject(IS_OVER_MIN_CORES, Integer.class, job.getJobId()) > 0; - } - - @Override - public void updateDefaultJobMaxGpus(GroupInterface group, int value) { - if (value <= 0) { - value = CueUtil.FEATURE_DISABLED; + + private static final String GET_GROUP_DETAIL = "SELECT " + "folder.pk_folder, " + + "folder.int_job_max_cores," + "folder.int_job_min_cores," + "folder.int_job_max_gpus," + + "folder.int_job_min_gpus," + "folder.int_job_priority," + "folder.str_name," + + "folder.pk_parent_folder," + "folder.pk_show," + "folder.pk_dept," + + "folder_level.int_level, " + "folder_resource.int_min_cores," + + "folder_resource.int_max_cores," + "folder_resource.int_min_gpus," + + "folder_resource.int_max_gpus " + "FROM " + "folder, " + "folder_level, " + + "folder_resource " + "WHERE " + "folder.pk_folder = folder_level.pk_folder " + "AND " + + "folder.pk_folder = folder_resource.pk_folder"; + + private static final String GET_GROUP_DETAIL_BY_JOB = "SELECT " + "folder.pk_folder, " + + "folder.int_job_max_cores," + "folder.int_job_min_cores," + "folder.int_job_max_gpus," + + "folder.int_job_min_gpus," + "folder.int_job_priority," + "folder.str_name," + + "folder.pk_parent_folder," + "folder.pk_show," + "folder.pk_dept," + + "folder_level.int_level, " + "folder_resource.int_min_cores," + + "folder_resource.int_max_cores," + "folder_resource.int_min_gpus," + + "folder_resource.int_max_gpus " + "FROM " + "folder, " + "folder_level, " + + "folder_resource, " + "job " + "WHERE " + "folder.pk_folder = folder_level.pk_folder " + + "AND " + "folder.pk_folder = folder_resource.pk_folder " + "AND " + + "job.pk_folder = folder.pk_folder " + "AND " + "job.pk_job = ?"; + + @Override + public GroupDetail getGroupDetail(String id) { + return getJdbcTemplate().queryForObject(GET_GROUP_DETAIL + " AND folder.pk_folder=?", + GROUP_DETAIL_MAPPER, id); } - getJdbcTemplate().update("UPDATE folder SET int_job_max_gpus=? WHERE pk_folder=?", value, - group.getId()); - } - - @Override - public void updateDefaultJobMinGpus(GroupInterface group, int value) { - if (value <= 0) { - value = CueUtil.FEATURE_DISABLED; + + @Override + public GroupDetail getGroupDetail(JobInterface job) { + return getJdbcTemplate().queryForObject(GET_GROUP_DETAIL_BY_JOB, GROUP_DETAIL_MAPPER, + job.getId()); } - getJdbcTemplate().update("UPDATE folder SET int_job_min_gpus=? WHERE pk_folder=?", value, - group.getId()); - } - - @Override - public void updateMaxGpus(GroupInterface group, int value) { - if (value < 0) { - value = CueUtil.FEATURE_DISABLED; + + @Override + public GroupDetail getRootGroupDetail(ShowInterface show) { + return getJdbcTemplate().queryForObject( + GET_GROUP_DETAIL + " AND folder.pk_show=? AND pk_parent_folder IS NULL", + GROUP_DETAIL_MAPPER, show.getShowId()); } - getJdbcTemplate().update("UPDATE folder_resource SET int_max_gpus=? WHERE pk_folder=?", value, - group.getId()); - } + @Override + public GroupInterface getGroup(String id) { + return getJdbcTemplate().queryForObject( + "SELECT pk_show, pk_folder,str_name FROM folder WHERE pk_folder=?", GROUP_MAPPER, + id); + } - @Override - public void updateMinGpus(GroupInterface group, int value) { - if (value < 0) { - value = 0; + @Override + public List getGroups(List idl) { + return getJdbcTemplate().query( + "SELECT pk_show, pk_folder, str_name FROM folder WHERE " + + SqlUtil.buildBindVariableArray("pk_folder", idl.size()), + GROUP_MAPPER, idl.toArray()); } - getJdbcTemplate().update("UPDATE folder_resource SET int_min_gpus=? WHERE pk_folder=?", value, - group.getId()); - } - - @Override - public void updateDefaultJobPriority(GroupInterface group, int value) { - if (value < 0) { - value = CueUtil.FEATURE_DISABLED; + + @Override + public List getChildrenRecursive(GroupInterface group) { + List groups = new ArrayList(32); + GroupInterface current = group; + for (GroupInterface g : getChildren(current)) { + current = g; + groups.add(current); + groups.addAll(getChildrenRecursive(current)); + } + return groups; } - getJdbcTemplate().update("UPDATE folder SET int_job_priority=? WHERE pk_folder=?", value, - group.getId()); - if (value != CueUtil.FEATURE_DISABLED) { - getJdbcTemplate().update("UPDATE job_resource SET int_priority=? WHERE pk_job IN (" - + "SELECT pk_job from job WHERE pk_folder=?)", value, group.getId()); + + @Override + public List getChildren(GroupInterface group) { + return getJdbcTemplate().query( + "SELECT pk_show, pk_folder, str_name FROM folder WHERE pk_parent_folder = ?", + GROUP_MAPPER, group.getGroupId()); } - } - - private static final String GET_GROUP_DETAIL = - "SELECT " + "folder.pk_folder, " + "folder.int_job_max_cores," + "folder.int_job_min_cores," - + "folder.int_job_max_gpus," + "folder.int_job_min_gpus," + "folder.int_job_priority," - + "folder.str_name," + "folder.pk_parent_folder," + "folder.pk_show," + "folder.pk_dept," - + "folder_level.int_level, " + "folder_resource.int_min_cores," - + "folder_resource.int_max_cores," + "folder_resource.int_min_gpus," - + "folder_resource.int_max_gpus " + "FROM " + "folder, " + "folder_level, " - + "folder_resource " + "WHERE " + "folder.pk_folder = folder_level.pk_folder " + "AND " - + "folder.pk_folder = folder_resource.pk_folder"; - - private static final String GET_GROUP_DETAIL_BY_JOB = - "SELECT " + "folder.pk_folder, " + "folder.int_job_max_cores," + "folder.int_job_min_cores," - + "folder.int_job_max_gpus," + "folder.int_job_min_gpus," + "folder.int_job_priority," - + "folder.str_name," + "folder.pk_parent_folder," + "folder.pk_show," + "folder.pk_dept," - + "folder_level.int_level, " + "folder_resource.int_min_cores," - + "folder_resource.int_max_cores," + "folder_resource.int_min_gpus," - + "folder_resource.int_max_gpus " + "FROM " + "folder, " + "folder_level, " - + "folder_resource, " + "job " + "WHERE " + "folder.pk_folder = folder_level.pk_folder " - + "AND " + "folder.pk_folder = folder_resource.pk_folder " + "AND " - + "job.pk_folder = folder.pk_folder " + "AND " + "job.pk_job = ?"; - - @Override - public GroupDetail getGroupDetail(String id) { - return getJdbcTemplate().queryForObject(GET_GROUP_DETAIL + " AND folder.pk_folder=?", - GROUP_DETAIL_MAPPER, id); - } - - @Override - public GroupDetail getGroupDetail(JobInterface job) { - return getJdbcTemplate().queryForObject(GET_GROUP_DETAIL_BY_JOB, GROUP_DETAIL_MAPPER, - job.getId()); - } - - @Override - public GroupDetail getRootGroupDetail(ShowInterface show) { - return getJdbcTemplate().queryForObject( - GET_GROUP_DETAIL + " AND folder.pk_show=? AND pk_parent_folder IS NULL", - GROUP_DETAIL_MAPPER, show.getShowId()); - } - - @Override - public GroupInterface getGroup(String id) { - return getJdbcTemplate().queryForObject( - "SELECT pk_show, pk_folder,str_name FROM folder WHERE pk_folder=?", GROUP_MAPPER, id); - } - - @Override - public List getGroups(List idl) { - return getJdbcTemplate().query("SELECT pk_show, pk_folder, str_name FROM folder WHERE " - + SqlUtil.buildBindVariableArray("pk_folder", idl.size()), GROUP_MAPPER, idl.toArray()); - } - - @Override - public List getChildrenRecursive(GroupInterface group) { - List groups = new ArrayList(32); - GroupInterface current = group; - for (GroupInterface g : getChildren(current)) { - current = g; - groups.add(current); - groups.addAll(getChildrenRecursive(current)); + + private static final String IS_MANAGED = "SELECT " + "COUNT(1) " + "FROM " + "folder, " + + "point " + "WHERE " + "folder.pk_show = point.pk_show " + "AND " + + "folder.pk_dept = point.pk_dept " + "AND " + "folder.b_exclude_managed = false " + + "AND " + "point.b_managed = true " + "AND " + "folder.pk_folder = ?"; + + @Override + public boolean isManaged(GroupInterface group) { + return getJdbcTemplate().queryForObject(IS_MANAGED, Integer.class, group.getGroupId()) > 0; } - return groups; - } - - @Override - public List getChildren(GroupInterface group) { - return getJdbcTemplate().query( - "SELECT pk_show, pk_folder, str_name FROM folder WHERE pk_parent_folder = ?", GROUP_MAPPER, - group.getGroupId()); - } - - private static final String IS_MANAGED = "SELECT " + "COUNT(1) " + "FROM " + "folder, " + "point " - + "WHERE " + "folder.pk_show = point.pk_show " + "AND " + "folder.pk_dept = point.pk_dept " - + "AND " + "folder.b_exclude_managed = false " + "AND " + "point.b_managed = true " + "AND " - + "folder.pk_folder = ?"; - - @Override - public boolean isManaged(GroupInterface group) { - return getJdbcTemplate().queryForObject(IS_MANAGED, Integer.class, group.getGroupId()) > 0; - } - - public static final RowMapper GROUP_MAPPER = new RowMapper() { - public GroupInterface mapRow(final ResultSet rs, int rowNum) throws SQLException { - return new GroupInterface() { - String id = rs.getString("pk_folder"); - String show = rs.getString("pk_show"); - String name = rs.getString("str_name"); - - public String getGroupId() { - return id; - } - public String getShowId() { - return show; + public static final RowMapper GROUP_MAPPER = new RowMapper() { + public GroupInterface mapRow(final ResultSet rs, int rowNum) throws SQLException { + return new GroupInterface() { + String id = rs.getString("pk_folder"); + String show = rs.getString("pk_show"); + String name = rs.getString("str_name"); + + public String getGroupId() { + return id; + } + + public String getShowId() { + return show; + } + + public String getId() { + return id; + } + + public String getName() { + return name; + } + }; } - - public String getId() { - return id; + }; + + public static final RowMapper GROUP_DETAIL_MAPPER = new RowMapper() { + public GroupDetail mapRow(ResultSet rs, int rowNum) throws SQLException { + GroupDetail group = new GroupDetail(); + group.id = rs.getString("pk_folder"); + group.jobMaxCores = rs.getInt("int_job_max_cores"); + group.jobMinCores = rs.getInt("int_job_min_cores"); + group.jobMaxGpus = rs.getInt("int_job_max_gpus"); + group.jobMinGpus = rs.getInt("int_job_min_gpus"); + group.jobPriority = rs.getInt("int_job_priority"); + group.minCores = rs.getInt("int_min_cores"); + group.maxCores = rs.getInt("int_max_cores"); + group.minGpus = rs.getInt("int_min_gpus"); + group.maxGpus = rs.getInt("int_max_gpus"); + group.name = rs.getString("str_name"); + group.parentId = rs.getString("pk_parent_folder"); + group.showId = rs.getString("pk_show"); + group.deptId = rs.getString("pk_dept"); + return group; } + }; - public String getName() { - return name; - } - }; + private int childGroupCount(GroupInterface group) { + return getJdbcTemplate().queryForObject( + "SELECT COUNT(*) FROM folder WHERE pk_parent_folder=?", Integer.class, + group.getId()); } - }; - - public static final RowMapper GROUP_DETAIL_MAPPER = new RowMapper() { - public GroupDetail mapRow(ResultSet rs, int rowNum) throws SQLException { - GroupDetail group = new GroupDetail(); - group.id = rs.getString("pk_folder"); - group.jobMaxCores = rs.getInt("int_job_max_cores"); - group.jobMinCores = rs.getInt("int_job_min_cores"); - group.jobMaxGpus = rs.getInt("int_job_max_gpus"); - group.jobMinGpus = rs.getInt("int_job_min_gpus"); - group.jobPriority = rs.getInt("int_job_priority"); - group.minCores = rs.getInt("int_min_cores"); - group.maxCores = rs.getInt("int_max_cores"); - group.minGpus = rs.getInt("int_min_gpus"); - group.maxGpus = rs.getInt("int_max_gpus"); - group.name = rs.getString("str_name"); - group.parentId = rs.getString("pk_parent_folder"); - group.showId = rs.getString("pk_show"); - group.deptId = rs.getString("pk_dept"); - return group; + + private int childJobCount(GroupInterface group) { + return getJdbcTemplate().queryForObject( + "SELECT COUNT(*) FROM job WHERE pk_folder=? AND str_state=?", Integer.class, + group.getId(), JobState.PENDING.toString()); + } + + private void recurseParentChange(final String folderId, final String newParentId) { + getJdbcTemplate().call(new CallableStatementCreator() { + + public CallableStatement createCallableStatement(Connection con) throws SQLException { + CallableStatement c = con.prepareCall("{ call recurse_folder_parent_change(?,?) }"); + c.setString(1, folderId); + c.setString(2, newParentId); + return c; + } + }, new ArrayList()); } - }; - - private int childGroupCount(GroupInterface group) { - return getJdbcTemplate().queryForObject("SELECT COUNT(*) FROM folder WHERE pk_parent_folder=?", - Integer.class, group.getId()); - } - - private int childJobCount(GroupInterface group) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(*) FROM job WHERE pk_folder=? AND str_state=?", Integer.class, group.getId(), - JobState.PENDING.toString()); - } - - private void recurseParentChange(final String folderId, final String newParentId) { - getJdbcTemplate().call(new CallableStatementCreator() { - - public CallableStatement createCallableStatement(Connection con) throws SQLException { - CallableStatement c = con.prepareCall("{ call recurse_folder_parent_change(?,?) }"); - c.setString(1, folderId); - c.setString(2, newParentId); - return c; - } - }, new ArrayList()); - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HistoricalDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HistoricalDaoJdbc.java index 13318a499..88cd40797 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HistoricalDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HistoricalDaoJdbc.java @@ -25,19 +25,19 @@ public class HistoricalDaoJdbc extends JdbcDaoSupport implements HistoricalDao { - private static final String GET_FINISHED_JOBS = JobDaoJdbc.GET_JOB + "WHERE " - + "job.str_state = ? " + "AND " + "current_timestamp - job.ts_stopped > "; - - public List getFinishedJobs(int cutoffHours) { - String interval = "interval '" + cutoffHours + "' hour"; - return getJdbcTemplate().query(GET_FINISHED_JOBS + interval, JobDaoJdbc.JOB_MAPPER, - JobState.FINISHED.toString()); - } - - public void transferJob(JobInterface job) { - /** - * All of the historical transfer happens inside of triggers - */ - getJdbcTemplate().update("DELETE FROM job WHERE pk_job=?", job.getJobId()); - } + private static final String GET_FINISHED_JOBS = JobDaoJdbc.GET_JOB + "WHERE " + + "job.str_state = ? " + "AND " + "current_timestamp - job.ts_stopped > "; + + public List getFinishedJobs(int cutoffHours) { + String interval = "interval '" + cutoffHours + "' hour"; + return getJdbcTemplate().query(GET_FINISHED_JOBS + interval, JobDaoJdbc.JOB_MAPPER, + JobState.FINISHED.toString()); + } + + public void transferJob(JobInterface job) { + /** + * All of the historical transfer happens inside of triggers + */ + getJdbcTemplate().update("DELETE FROM job WHERE pk_job=?", job.getJobId()); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HostDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HostDaoJdbc.java index 77cc3b2d9..adf775a2a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HostDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HostDaoJdbc.java @@ -55,518 +55,530 @@ public class HostDaoJdbc extends JdbcDaoSupport implements HostDao { - @Autowired - private Environment env; - - public static final RowMapper HOST_DETAIL_MAPPER = new RowMapper() { - public HostEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - HostEntity host = new HostEntity(); - host.facilityId = rs.getString("pk_facility"); - host.allocId = rs.getString("pk_alloc"); - host.id = rs.getString("pk_host"); - host.lockState = LockState.valueOf(rs.getString("str_lock_state")); - host.name = rs.getString("str_name"); - host.nimbyEnabled = rs.getBoolean("b_nimby"); - host.state = HardwareState.valueOf(rs.getString("str_state")); - host.unlockAtBoot = rs.getBoolean("b_unlock_boot"); - host.cores = rs.getInt("int_cores"); - host.idleCores = rs.getInt("int_cores_idle"); - host.memory = rs.getLong("int_mem"); - host.idleMemory = rs.getLong("int_mem_idle"); - host.gpus = rs.getInt("int_gpus"); - host.idleGpus = rs.getInt("int_gpus_idle"); - host.gpuMemory = rs.getLong("int_gpu_mem"); - host.idleGpuMemory = rs.getLong("int_gpu_mem_idle"); - host.dateBooted = rs.getDate("ts_booted"); - host.dateCreated = rs.getDate("ts_created"); - host.datePinged = rs.getDate("ts_ping"); - return host; - } - }; - - public static final RowMapper HOST_MAPPER = new RowMapper() { - public HostInterface mapRow(final ResultSet rs, int rowNum) throws SQLException { - return new HostInterface() { - final String id = rs.getString("pk_host"); - final String allocid = rs.getString("pk_alloc"); - final String name = rs.getString("str_name"); - final String facility = rs.getString("pk_facility"); - - public String getHostId() { - return id; + @Autowired + private Environment env; + + public static final RowMapper HOST_DETAIL_MAPPER = new RowMapper() { + public HostEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + HostEntity host = new HostEntity(); + host.facilityId = rs.getString("pk_facility"); + host.allocId = rs.getString("pk_alloc"); + host.id = rs.getString("pk_host"); + host.lockState = LockState.valueOf(rs.getString("str_lock_state")); + host.name = rs.getString("str_name"); + host.nimbyEnabled = rs.getBoolean("b_nimby"); + host.state = HardwareState.valueOf(rs.getString("str_state")); + host.unlockAtBoot = rs.getBoolean("b_unlock_boot"); + host.cores = rs.getInt("int_cores"); + host.idleCores = rs.getInt("int_cores_idle"); + host.memory = rs.getLong("int_mem"); + host.idleMemory = rs.getLong("int_mem_idle"); + host.gpus = rs.getInt("int_gpus"); + host.idleGpus = rs.getInt("int_gpus_idle"); + host.gpuMemory = rs.getLong("int_gpu_mem"); + host.idleGpuMemory = rs.getLong("int_gpu_mem_idle"); + host.dateBooted = rs.getDate("ts_booted"); + host.dateCreated = rs.getDate("ts_created"); + host.datePinged = rs.getDate("ts_ping"); + return host; } + }; + + public static final RowMapper HOST_MAPPER = new RowMapper() { + public HostInterface mapRow(final ResultSet rs, int rowNum) throws SQLException { + return new HostInterface() { + final String id = rs.getString("pk_host"); + final String allocid = rs.getString("pk_alloc"); + final String name = rs.getString("str_name"); + final String facility = rs.getString("pk_facility"); + + public String getHostId() { + return id; + } + + public String getAllocationId() { + return allocid; + } + + public String getId() { + return id; + } + + public String getName() { + return name; + } + + public String getFacilityId() { + return facility; + }; + }; + } + }; + + private static final String GET_HOST_DETAIL = "SELECT " + "host.pk_host, " + "host.pk_alloc," + + "host.str_lock_state," + "host.b_nimby," + "host.b_unlock_boot," + "host.int_cores," + + "host.int_cores_idle," + "host.int_mem," + "host.int_mem_idle," + "host.int_gpus," + + "host.int_gpus_idle," + "host.int_gpu_mem," + "host.int_gpu_mem_idle," + + "host.ts_created," + "host.str_name, " + "host_stat.str_state," + "host_stat.ts_ping," + + "host_stat.ts_booted, " + "alloc.pk_facility " + "FROM " + "host, " + "alloc, " + + "host_stat " + "WHERE " + "host.pk_host = host_stat.pk_host " + "AND " + + "host.pk_alloc = alloc.pk_alloc "; + + @Override + public void lockForUpdate(HostInterface host) { + try { + getJdbcTemplate().queryForObject( + "SELECT pk_host FROM host WHERE pk_host=? " + "FOR UPDATE NOWAIT", String.class, + host.getHostId()); + } catch (Exception e) { + throw new ResourceReservationFailureException("unable to lock host " + host.getName() + + ", the host was locked by another thread.", e); + } + } + + @Override + public HostEntity getHostDetail(HostInterface host) { + return getJdbcTemplate().queryForObject(GET_HOST_DETAIL + " AND host.pk_host=?", + HOST_DETAIL_MAPPER, host.getHostId()); + } + + @Override + public HostEntity getHostDetail(String id) { + return getJdbcTemplate().queryForObject(GET_HOST_DETAIL + " AND host.pk_host=?", + HOST_DETAIL_MAPPER, id); + } + + @Override + public HostEntity findHostDetail(String name) { + return getJdbcTemplate().queryForObject(GET_HOST_DETAIL + " AND host.str_name=?", + HOST_DETAIL_MAPPER, name); + } + + private static final String GET_HOST = "SELECT " + "host.pk_host, " + "host.pk_alloc," + + "host.str_name, " + "alloc.pk_facility " + "FROM " + "host," + "alloc " + "WHERE " + + "host.pk_alloc = alloc.pk_alloc "; + + @Override + public HostInterface getHost(String id) { + return getJdbcTemplate().queryForObject(GET_HOST + " AND host.pk_host=?", HOST_MAPPER, id); + } + + @Override + public HostInterface getHost(LocalHostAssignment l) { + return getJdbcTemplate().queryForObject( + GET_HOST + " AND host.pk_host = (" + + "SELECT pk_host FROM host_local WHERE pk_host_local=?)", + HOST_MAPPER, l.getId()); + } + + @Override + public HostInterface findHost(String name) { + return getJdbcTemplate().queryForObject( + GET_HOST + " AND (host.str_name=? OR host.str_fqdn=?)", HOST_MAPPER, name, name); + } - public String getAllocationId() { - return allocid; + public static final RowMapper DISPATCH_HOST_MAPPER = + new RowMapper() { + public DispatchHost mapRow(ResultSet rs, int rowNum) throws SQLException { + DispatchHost host = new DispatchHost(); + host.id = rs.getString("pk_host"); + host.allocationId = rs.getString("pk_alloc"); + host.facilityId = rs.getString("pk_facility"); + host.name = rs.getString("str_name"); + host.lockState = LockState.valueOf(rs.getString("str_lock_state")); + host.memory = rs.getLong("int_mem"); + host.cores = rs.getInt("int_cores"); + host.gpus = rs.getInt("int_gpus"); + host.gpuMemory = rs.getLong("int_gpu_mem"); + host.idleMemory = rs.getLong("int_mem_idle"); + host.idleCores = rs.getInt("int_cores_idle"); + host.idleGpuMemory = rs.getLong("int_gpu_mem_idle"); + host.idleGpus = rs.getInt("int_gpus_idle"); + host.isNimby = rs.getBoolean("b_nimby"); + host.threadMode = rs.getInt("int_thread_mode"); + host.tags = rs.getString("str_tags"); + host.setOs(rs.getString("str_os")); + host.hardwareState = HardwareState.valueOf(rs.getString("str_state")); + return host; + } + }; + + public static final String GET_DISPATCH_HOST = "SELECT " + "host.pk_host," + "host.pk_alloc," + + "host.str_name," + "host.str_lock_state, " + "host.int_cores, " + + "host.int_cores_idle, " + "host.int_mem," + "host.int_mem_idle, " + "host.int_gpus, " + + "host.int_gpus_idle, " + "host.int_gpu_mem," + "host.int_gpu_mem_idle, " + + "host.b_nimby, " + "host.int_thread_mode, " + "host.str_tags, " + "host_stat.str_os, " + + "host_stat.str_state, " + "alloc.pk_facility " + "FROM " + "host " + + "INNER JOIN host_stat " + "ON (host.pk_host = host_stat.pk_host) " + + "INNER JOIN alloc " + "ON (host.pk_alloc = alloc.pk_alloc) "; + + @Override + public DispatchHost findDispatchHost(String name) { + try { + return getJdbcTemplate().queryForObject( + GET_DISPATCH_HOST + "WHERE (host.str_name=? OR host.str_fqdn=?)", + DISPATCH_HOST_MAPPER, name, name); + } catch (EmptyResultDataAccessException e) { + throw new EmptyResultDataAccessException("Failed to find host " + name, 1); } + } + + @Override + public DispatchHost getDispatchHost(String id) { + return getJdbcTemplate().queryForObject(GET_DISPATCH_HOST + "WHERE host.pk_host=?", + DISPATCH_HOST_MAPPER, id); + } + + private static final String[] INSERT_HOST_DETAIL = { + "INSERT INTO " + "host " + "(" + "pk_host, " + "pk_alloc, " + "str_name, " + "b_nimby, " + + "str_lock_state, " + "int_procs," + "int_cores, " + "int_cores_idle, " + + "int_mem," + "int_mem_idle," + "int_gpus, " + "int_gpus_idle, " + + "int_gpu_mem," + "int_gpu_mem_idle," + "str_fqdn, " + "int_thread_mode " + + ") " + "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", + + "INSERT INTO " + "host_stat " + "(" + "pk_host_stat," + "pk_host," + "int_mem_total, " + + "int_mem_free," + "int_gpu_mem_total, " + "int_gpu_mem_free," + + "int_swap_total, " + "int_swap_free," + "int_mcp_total, " + "int_mcp_free," + + "int_load, " + "ts_booted, " + "str_state, " + "str_os " + ") " + + "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)" + + }; + + @Override + public void insertRenderHost(RenderHost host, AllocationInterface a, boolean useLongNames) { + + ThreadMode threadMode = ThreadMode.AUTO; + if (host.getNimbyEnabled()) { + threadMode = ThreadMode.ALL; + } + + long memUnits = convertMemoryUnits(host); + long memReserverMin = + env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); + if (memUnits < memReserverMin) { + throw new EntityCreationError("could not create host " + host.getName() + ", " + + " must have at least " + memReserverMin + " free memory."); + } + + String fqdn; + String name = host.getName(); + try { + fqdn = InetAddress.getByName(host.getName()).getCanonicalHostName(); + // If the provided host name matches the pinged name, use the pinged name. + // Otherwise use the provided name. + // If the host lookup fails, use the provided name. + // In all cases attempt to strip off the domain when setting the name. + if (fqdn.equals(host.getName())) { + name = getHostNameFromFQDN(fqdn, useLongNames); + } else { + name = getHostNameFromFQDN(host.getName(), useLongNames); + fqdn = host.getName(); + } + } catch (UnknownHostException e) { + logger.info(e); + fqdn = host.getName(); + name = getHostNameFromFQDN(name, useLongNames); + } + + String hid = SqlUtil.genKeyRandom(); + int coreUnits = host.getNumProcs() * host.getCoresPerProc(); + String os = host.getAttributesMap().get("SP_OS"); + if (os == null) { + os = Dispatcher.OS_DEFAULT; + } + + getJdbcTemplate().update(INSERT_HOST_DETAIL[0], hid, a.getAllocationId(), name, + host.getNimbyEnabled(), LockState.OPEN.toString(), host.getNumProcs(), coreUnits, + coreUnits, memUnits, memUnits, host.getNumGpus(), host.getNumGpus(), + host.getTotalGpuMem(), host.getTotalGpuMem(), fqdn, threadMode.getNumber()); + + getJdbcTemplate().update(INSERT_HOST_DETAIL[1], hid, hid, host.getTotalMem(), + host.getFreeMem(), host.getTotalGpuMem(), host.getFreeGpuMem(), host.getTotalSwap(), + host.getFreeSwap(), host.getTotalMcp(), host.getFreeMcp(), host.getLoad(), + new Timestamp(host.getBootTime() * 1000l), host.getState().toString(), os); + } + + @Override + public void recalcuateTags(final String id) { + getJdbcTemplate().call(new CallableStatementCreator() { + public CallableStatement createCallableStatement(Connection con) throws SQLException { + CallableStatement c = con.prepareCall("{ call recalculate_tags(?) }"); + c.setString(1, id); + return c; + } + }, new ArrayList()); + } + + private static final String UPDATE_RENDER_HOST = + "UPDATE " + "host_stat " + "SET " + "int_mem_total = ?, " + "int_mem_free = ?, " + + "int_swap_total = ?, " + "int_swap_free = ?, " + "int_mcp_total = ?, " + + "int_mcp_free = ?, " + "int_gpu_mem_total = ?, " + "int_gpu_mem_free = ?, " + + "int_load = ?," + "ts_booted = ?, " + "ts_ping = current_timestamp, " + + "str_os = ? " + "WHERE " + "pk_host = ?"; + + @Override + public void updateHostStats(HostInterface host, long totalMemory, long freeMemory, + long totalSwap, long freeSwap, long totalMcp, long freeMcp, long totalGpuMemory, + long freeGpuMemory, int load, Timestamp bootTime, String os) { + + if (os == null) { + os = Dispatcher.OS_DEFAULT; + } + + getJdbcTemplate().update(UPDATE_RENDER_HOST, totalMemory, freeMemory, totalSwap, freeSwap, + totalMcp, freeMcp, totalGpuMemory, freeGpuMemory, load, bootTime, os, + host.getHostId()); + } + + @Override + public boolean hostExists(String hostname) { + try { + return getJdbcTemplate().queryForObject( + "SELECT 1 FROM host WHERE (str_fqdn=? OR str_name=?)", Integer.class, hostname, + hostname) > 0; + } catch (EmptyResultDataAccessException e) { + return false; + } + } + + @Override + public void updateHostResources(HostInterface host, HostReport report) { + + long memory = convertMemoryUnits(report.getHost()); + int cores = report.getHost().getNumProcs() * report.getHost().getCoresPerProc(); + long gpu_memory = report.getHost().getTotalGpuMem(); + int gpus = report.getHost().getNumGpus(); + + getJdbcTemplate().update( + "UPDATE " + "host " + "SET " + "b_nimby=?," + "int_cores=?," + "int_cores_idle=?," + + "int_mem=?," + "int_mem_idle=?, " + "int_gpus=?," + "int_gpus_idle=?," + + "int_gpu_mem=?," + "int_gpu_mem_idle=? " + "WHERE " + "pk_host=? " + + "AND " + "int_cores = int_cores_idle " + "AND " + + "int_mem = int_mem_idle " + "AND " + "int_gpus = int_gpus_idle", + report.getHost().getNimbyEnabled(), cores, cores, memory, memory, gpus, gpus, + gpu_memory, gpu_memory, host.getId()); + } + + @Override + public void updateHostLock(HostInterface host, LockState state, Source source) { + getJdbcTemplate().update( + "UPDATE host SET str_lock_state=?, str_lock_source=? WHERE pk_host=?", + state.toString(), source.toString(), host.getHostId()); + } + + @Override + public void updateHostRebootWhenIdle(HostInterface host, boolean enabled) { + getJdbcTemplate().update("UPDATE host SET b_reboot_idle=? WHERE pk_host=?", enabled, + host.getHostId()); + } - public String getId() { - return id; + @Override + public void deleteHost(HostInterface host) { + getJdbcTemplate().update("DELETE FROM comments WHERE pk_host=?", host.getHostId()); + getJdbcTemplate().update("DELETE FROM host WHERE pk_host=?", host.getHostId()); + } + + private static final String DELETE_DOWN_HOST_COMMENTS = + "DELETE " + "FROM " + "comments " + "USING " + "host_stat " + "WHERE " + + "comments.pk_host = host_stat.pk_host " + "AND " + "host_stat.str_state = ?"; + + private static final String DELETE_DOWN_HOSTS = + "DELETE " + "FROM " + "host " + "USING " + "host_stat " + "WHERE " + + "host.pk_host = host_stat.pk_host " + "AND " + "host_stat.str_state=?"; + + @Override + public void deleteDownHosts() { + getJdbcTemplate().update(DELETE_DOWN_HOST_COMMENTS, HardwareState.DOWN.toString()); + getJdbcTemplate().update(DELETE_DOWN_HOSTS, HardwareState.DOWN.toString()); + } + + @Override + public void updateHostState(HostInterface host, HardwareState state) { + getJdbcTemplate().update("UPDATE host_stat SET str_state=? WHERE pk_host=?", + state.toString(), host.getHostId()); + } + + @Override + public void updateHostFreeTempDir(HostInterface host, Long freeTempDir) { + getJdbcTemplate().update("UPDATE host_stat SET int_mcp_free=? WHERE pk_host=?", freeTempDir, + host.getHostId()); + } + + @Override + public void updateHostSetAllocation(HostInterface host, AllocationInterface alloc) { + + String tag = getJdbcTemplate().queryForObject("SELECT str_tag FROM alloc WHERE pk_alloc=?", + String.class, alloc.getAllocationId()); + getJdbcTemplate().update("UPDATE host SET pk_alloc=? WHERE pk_host=?", + alloc.getAllocationId(), host.getHostId()); + + removeTagsByType(host, HostTagType.ALLOC); + tagHost(host, tag, HostTagType.ALLOC); + } + + @Override + public boolean isHostLocked(HostInterface host) { + return getJdbcTemplate().queryForObject( + "SELECT COUNT(1) FROM host WHERE pk_host=? AND str_lock_state!=?", Integer.class, + host.getHostId(), LockState.OPEN.toString()) > 0; + } + + private static final String INSERT_TAG = "INSERT INTO " + "host_tag " + "(" + "pk_host_tag," + + "pk_host," + "str_tag," + "str_tag_type, " + "b_constant " + ") VALUES (?,?,?,?,?)"; + + @Override + public void tagHost(String id, String tag, HostTagType type) { + boolean constant = false; + if (type.equals(HostTagType.ALLOC)) + constant = true; + + getJdbcTemplate().update(INSERT_TAG, SqlUtil.genKeyRandom(), id, tag.trim(), + type.toString(), constant); + } + + @Override + public void tagHost(HostInterface host, String tag, HostTagType type) { + tagHost(host.getHostId(), tag, type); + } + + @Override + public void removeTagsByType(HostInterface host, HostTagType type) { + getJdbcTemplate().update("DELETE FROM host_tag WHERE pk_host=? AND str_tag_type=?", + host.getHostId(), type.toString()); + } + + @Override + public void removeTag(HostInterface host, String tag) { + getJdbcTemplate().update( + "DELETE FROM host_tag WHERE pk_host=? AND str_tag=? AND b_constant=false", + host.getHostId(), tag); + } + + @Override + public void renameTag(HostInterface host, String oldTag, String newTag) { + getJdbcTemplate().update( + "UPDATE host_tag SET str_tag=? WHERE pk_host=? AND str_tag=? AND b_constant=false", + newTag, host.getHostId(), oldTag); + } + + @Override + public void updateThreadMode(HostInterface host, ThreadMode mode) { + getJdbcTemplate().update("UPDATE host SET int_thread_mode=? WHERE pk_host=?", + mode.getNumber(), host.getHostId()); + } + + @Override + public void updateHostOs(HostInterface host, String os) { + getJdbcTemplate().update("UPDATE host_stat SET str_os=? WHERE pk_host=?", os, + host.getHostId()); + } + + @Override + public int getStrandedCoreUnits(HostInterface h) { + try { + int idle_cores = getJdbcTemplate().queryForObject( + "SELECT int_cores_idle FROM host WHERE pk_host = ? AND int_mem_idle <= ?", + Integer.class, h.getHostId(), Dispatcher.MEM_STRANDED_THRESHHOLD); + return (int) (Math.floor(idle_cores / 100.0)) * 100; + } catch (EmptyResultDataAccessException e) { + return 0; } + } - public String getName() { - return name; + @Override + public int getStrandedGpus(HostInterface h) { + try { + int idle_gpus = getJdbcTemplate().queryForObject( + "SELECT int_gpus_idle FROM host WHERE pk_host = ?", Integer.class, + h.getHostId()); + return idle_gpus; + } catch (EmptyResultDataAccessException e) { + return 0; } + } + + private static final String IS_HOST_UP = "SELECT " + "COUNT(1) " + "FROM " + "host_stat " + + "WHERE " + "host_stat.str_state = ? " + "AND " + "host_stat.pk_host = ? "; + + @Override + public boolean isHostUp(HostInterface host) { + return getJdbcTemplate().queryForObject(IS_HOST_UP, Integer.class, + HardwareState.UP.toString(), host.getHostId()) == 1; + } + + private static final String IS_PREFER_SHOW = "SELECT " + "COUNT(1) " + "FROM " + "host," + + "owner," + "deed " + "WHERE " + "host.pk_host = deed.pk_host " + "AND " + + "deed.pk_owner = owner.pk_owner " + "AND " + "host.pk_host = ?"; - public String getFacilityId() { - return facility; - }; - }; - } - }; - - private static final String GET_HOST_DETAIL = "SELECT " + "host.pk_host, " + "host.pk_alloc," - + "host.str_lock_state," + "host.b_nimby," + "host.b_unlock_boot," + "host.int_cores," - + "host.int_cores_idle," + "host.int_mem," + "host.int_mem_idle," + "host.int_gpus," - + "host.int_gpus_idle," + "host.int_gpu_mem," + "host.int_gpu_mem_idle," + "host.ts_created," - + "host.str_name, " + "host_stat.str_state," + "host_stat.ts_ping," + "host_stat.ts_booted, " - + "alloc.pk_facility " + "FROM " + "host, " + "alloc, " + "host_stat " + "WHERE " - + "host.pk_host = host_stat.pk_host " + "AND " + "host.pk_alloc = alloc.pk_alloc "; - - @Override - public void lockForUpdate(HostInterface host) { - try { - getJdbcTemplate().queryForObject( - "SELECT pk_host FROM host WHERE pk_host=? " + "FOR UPDATE NOWAIT", String.class, - host.getHostId()); - } catch (Exception e) { - throw new ResourceReservationFailureException( - "unable to lock host " + host.getName() + ", the host was locked by another thread.", e); - } - } - - @Override - public HostEntity getHostDetail(HostInterface host) { - return getJdbcTemplate().queryForObject(GET_HOST_DETAIL + " AND host.pk_host=?", - HOST_DETAIL_MAPPER, host.getHostId()); - } - - @Override - public HostEntity getHostDetail(String id) { - return getJdbcTemplate().queryForObject(GET_HOST_DETAIL + " AND host.pk_host=?", - HOST_DETAIL_MAPPER, id); - } - - @Override - public HostEntity findHostDetail(String name) { - return getJdbcTemplate().queryForObject(GET_HOST_DETAIL + " AND host.str_name=?", - HOST_DETAIL_MAPPER, name); - } - - private static final String GET_HOST = - "SELECT " + "host.pk_host, " + "host.pk_alloc," + "host.str_name, " + "alloc.pk_facility " - + "FROM " + "host," + "alloc " + "WHERE " + "host.pk_alloc = alloc.pk_alloc "; - - @Override - public HostInterface getHost(String id) { - return getJdbcTemplate().queryForObject(GET_HOST + " AND host.pk_host=?", HOST_MAPPER, id); - } - - @Override - public HostInterface getHost(LocalHostAssignment l) { - return getJdbcTemplate().queryForObject(GET_HOST + " AND host.pk_host = (" - + "SELECT pk_host FROM host_local WHERE pk_host_local=?)", HOST_MAPPER, l.getId()); - } - - @Override - public HostInterface findHost(String name) { - return getJdbcTemplate().queryForObject(GET_HOST + " AND (host.str_name=? OR host.str_fqdn=?)", - HOST_MAPPER, name, name); - } - - public static final RowMapper DISPATCH_HOST_MAPPER = new RowMapper() { - public DispatchHost mapRow(ResultSet rs, int rowNum) throws SQLException { - DispatchHost host = new DispatchHost(); - host.id = rs.getString("pk_host"); - host.allocationId = rs.getString("pk_alloc"); - host.facilityId = rs.getString("pk_facility"); - host.name = rs.getString("str_name"); - host.lockState = LockState.valueOf(rs.getString("str_lock_state")); - host.memory = rs.getLong("int_mem"); - host.cores = rs.getInt("int_cores"); - host.gpus = rs.getInt("int_gpus"); - host.gpuMemory = rs.getLong("int_gpu_mem"); - host.idleMemory = rs.getLong("int_mem_idle"); - host.idleCores = rs.getInt("int_cores_idle"); - host.idleGpuMemory = rs.getLong("int_gpu_mem_idle"); - host.idleGpus = rs.getInt("int_gpus_idle"); - host.isNimby = rs.getBoolean("b_nimby"); - host.threadMode = rs.getInt("int_thread_mode"); - host.tags = rs.getString("str_tags"); - host.setOs(rs.getString("str_os")); - host.hardwareState = HardwareState.valueOf(rs.getString("str_state")); - return host; - } - }; - - public static final String GET_DISPATCH_HOST = "SELECT " + "host.pk_host," + "host.pk_alloc," - + "host.str_name," + "host.str_lock_state, " + "host.int_cores, " + "host.int_cores_idle, " - + "host.int_mem," + "host.int_mem_idle, " + "host.int_gpus, " + "host.int_gpus_idle, " - + "host.int_gpu_mem," + "host.int_gpu_mem_idle, " + "host.b_nimby, " - + "host.int_thread_mode, " + "host.str_tags, " + "host_stat.str_os, " - + "host_stat.str_state, " + "alloc.pk_facility " + "FROM " + "host " + "INNER JOIN host_stat " - + "ON (host.pk_host = host_stat.pk_host) " + "INNER JOIN alloc " - + "ON (host.pk_alloc = alloc.pk_alloc) "; - - @Override - public DispatchHost findDispatchHost(String name) { - try { - return getJdbcTemplate().queryForObject( - GET_DISPATCH_HOST + "WHERE (host.str_name=? OR host.str_fqdn=?)", DISPATCH_HOST_MAPPER, - name, name); - } catch (EmptyResultDataAccessException e) { - throw new EmptyResultDataAccessException("Failed to find host " + name, 1); - } - } - - @Override - public DispatchHost getDispatchHost(String id) { - return getJdbcTemplate().queryForObject(GET_DISPATCH_HOST + "WHERE host.pk_host=?", - DISPATCH_HOST_MAPPER, id); - } - - private static final String[] INSERT_HOST_DETAIL = { - "INSERT INTO " + "host " + "(" + "pk_host, " + "pk_alloc, " + "str_name, " + "b_nimby, " - + "str_lock_state, " + "int_procs," + "int_cores, " + "int_cores_idle, " + "int_mem," - + "int_mem_idle," + "int_gpus, " + "int_gpus_idle, " + "int_gpu_mem," - + "int_gpu_mem_idle," + "str_fqdn, " + "int_thread_mode " + ") " - + "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", - - "INSERT INTO " + "host_stat " + "(" + "pk_host_stat," + "pk_host," + "int_mem_total, " - + "int_mem_free," + "int_gpu_mem_total, " + "int_gpu_mem_free," + "int_swap_total, " - + "int_swap_free," + "int_mcp_total, " + "int_mcp_free," + "int_load, " + "ts_booted, " - + "str_state, " + "str_os " + ") " + "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)" - - }; - - @Override - public void insertRenderHost(RenderHost host, AllocationInterface a, boolean useLongNames) { - - ThreadMode threadMode = ThreadMode.AUTO; - if (host.getNimbyEnabled()) { - threadMode = ThreadMode.ALL; - } - - long memUnits = convertMemoryUnits(host); - long memReserverMin = env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); - if (memUnits < memReserverMin) { - throw new EntityCreationError("could not create host " + host.getName() + ", " - + " must have at least " + memReserverMin + " free memory."); - } - - String fqdn; - String name = host.getName(); - try { - fqdn = InetAddress.getByName(host.getName()).getCanonicalHostName(); - // If the provided host name matches the pinged name, use the pinged name. - // Otherwise use the provided name. - // If the host lookup fails, use the provided name. - // In all cases attempt to strip off the domain when setting the name. - if (fqdn.equals(host.getName())) { - name = getHostNameFromFQDN(fqdn, useLongNames); - } else { - name = getHostNameFromFQDN(host.getName(), useLongNames); - fqdn = host.getName(); - } - } catch (UnknownHostException e) { - logger.info(e); - fqdn = host.getName(); - name = getHostNameFromFQDN(name, useLongNames); - } - - String hid = SqlUtil.genKeyRandom(); - int coreUnits = host.getNumProcs() * host.getCoresPerProc(); - String os = host.getAttributesMap().get("SP_OS"); - if (os == null) { - os = Dispatcher.OS_DEFAULT; - } - - getJdbcTemplate().update(INSERT_HOST_DETAIL[0], hid, a.getAllocationId(), name, - host.getNimbyEnabled(), LockState.OPEN.toString(), host.getNumProcs(), coreUnits, coreUnits, - memUnits, memUnits, host.getNumGpus(), host.getNumGpus(), host.getTotalGpuMem(), - host.getTotalGpuMem(), fqdn, threadMode.getNumber()); - - getJdbcTemplate().update(INSERT_HOST_DETAIL[1], hid, hid, host.getTotalMem(), host.getFreeMem(), - host.getTotalGpuMem(), host.getFreeGpuMem(), host.getTotalSwap(), host.getFreeSwap(), - host.getTotalMcp(), host.getFreeMcp(), host.getLoad(), - new Timestamp(host.getBootTime() * 1000l), host.getState().toString(), os); - } - - @Override - public void recalcuateTags(final String id) { - getJdbcTemplate().call(new CallableStatementCreator() { - public CallableStatement createCallableStatement(Connection con) throws SQLException { - CallableStatement c = con.prepareCall("{ call recalculate_tags(?) }"); - c.setString(1, id); - return c; - } - }, new ArrayList()); - } - - private static final String UPDATE_RENDER_HOST = "UPDATE " + "host_stat " + "SET " - + "int_mem_total = ?, " + "int_mem_free = ?, " + "int_swap_total = ?, " - + "int_swap_free = ?, " + "int_mcp_total = ?, " + "int_mcp_free = ?, " - + "int_gpu_mem_total = ?, " + "int_gpu_mem_free = ?, " + "int_load = ?," + "ts_booted = ?, " - + "ts_ping = current_timestamp, " + "str_os = ? " + "WHERE " + "pk_host = ?"; - - @Override - public void updateHostStats(HostInterface host, long totalMemory, long freeMemory, long totalSwap, - long freeSwap, long totalMcp, long freeMcp, long totalGpuMemory, long freeGpuMemory, int load, - Timestamp bootTime, String os) { - - if (os == null) { - os = Dispatcher.OS_DEFAULT; - } - - getJdbcTemplate().update(UPDATE_RENDER_HOST, totalMemory, freeMemory, totalSwap, freeSwap, - totalMcp, freeMcp, totalGpuMemory, freeGpuMemory, load, bootTime, os, host.getHostId()); - } - - @Override - public boolean hostExists(String hostname) { - try { - return getJdbcTemplate().queryForObject("SELECT 1 FROM host WHERE (str_fqdn=? OR str_name=?)", - Integer.class, hostname, hostname) > 0; - } catch (EmptyResultDataAccessException e) { - return false; - } - } - - @Override - public void updateHostResources(HostInterface host, HostReport report) { - - long memory = convertMemoryUnits(report.getHost()); - int cores = report.getHost().getNumProcs() * report.getHost().getCoresPerProc(); - long gpu_memory = report.getHost().getTotalGpuMem(); - int gpus = report.getHost().getNumGpus(); - - getJdbcTemplate().update( - "UPDATE " + "host " + "SET " + "b_nimby=?," + "int_cores=?," + "int_cores_idle=?," - + "int_mem=?," + "int_mem_idle=?, " + "int_gpus=?," + "int_gpus_idle=?," - + "int_gpu_mem=?," + "int_gpu_mem_idle=? " + "WHERE " + "pk_host=? " + "AND " - + "int_cores = int_cores_idle " + "AND " + "int_mem = int_mem_idle " + "AND " - + "int_gpus = int_gpus_idle", - report.getHost().getNimbyEnabled(), cores, cores, memory, memory, gpus, gpus, gpu_memory, - gpu_memory, host.getId()); - } - - @Override - public void updateHostLock(HostInterface host, LockState state, Source source) { - getJdbcTemplate().update("UPDATE host SET str_lock_state=?, str_lock_source=? WHERE pk_host=?", - state.toString(), source.toString(), host.getHostId()); - } - - @Override - public void updateHostRebootWhenIdle(HostInterface host, boolean enabled) { - getJdbcTemplate().update("UPDATE host SET b_reboot_idle=? WHERE pk_host=?", enabled, - host.getHostId()); - } - - @Override - public void deleteHost(HostInterface host) { - getJdbcTemplate().update("DELETE FROM comments WHERE pk_host=?", host.getHostId()); - getJdbcTemplate().update("DELETE FROM host WHERE pk_host=?", host.getHostId()); - } - - private static final String DELETE_DOWN_HOST_COMMENTS = - "DELETE " + "FROM " + "comments " + "USING " + "host_stat " + "WHERE " - + "comments.pk_host = host_stat.pk_host " + "AND " + "host_stat.str_state = ?"; - - private static final String DELETE_DOWN_HOSTS = - "DELETE " + "FROM " + "host " + "USING " + "host_stat " + "WHERE " - + "host.pk_host = host_stat.pk_host " + "AND " + "host_stat.str_state=?"; - - @Override - public void deleteDownHosts() { - getJdbcTemplate().update(DELETE_DOWN_HOST_COMMENTS, HardwareState.DOWN.toString()); - getJdbcTemplate().update(DELETE_DOWN_HOSTS, HardwareState.DOWN.toString()); - } - - @Override - public void updateHostState(HostInterface host, HardwareState state) { - getJdbcTemplate().update("UPDATE host_stat SET str_state=? WHERE pk_host=?", state.toString(), - host.getHostId()); - } - - @Override - public void updateHostFreeTempDir(HostInterface host, Long freeTempDir) { - getJdbcTemplate().update("UPDATE host_stat SET int_mcp_free=? WHERE pk_host=?", freeTempDir, - host.getHostId()); - } - - @Override - public void updateHostSetAllocation(HostInterface host, AllocationInterface alloc) { - - String tag = getJdbcTemplate().queryForObject("SELECT str_tag FROM alloc WHERE pk_alloc=?", - String.class, alloc.getAllocationId()); - getJdbcTemplate().update("UPDATE host SET pk_alloc=? WHERE pk_host=?", alloc.getAllocationId(), - host.getHostId()); - - removeTagsByType(host, HostTagType.ALLOC); - tagHost(host, tag, HostTagType.ALLOC); - } - - @Override - public boolean isHostLocked(HostInterface host) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM host WHERE pk_host=? AND str_lock_state!=?", Integer.class, - host.getHostId(), LockState.OPEN.toString()) > 0; - } - - private static final String INSERT_TAG = "INSERT INTO " + "host_tag " + "(" + "pk_host_tag," - + "pk_host," + "str_tag," + "str_tag_type, " + "b_constant " + ") VALUES (?,?,?,?,?)"; - - @Override - public void tagHost(String id, String tag, HostTagType type) { - boolean constant = false; - if (type.equals(HostTagType.ALLOC)) - constant = true; - - getJdbcTemplate().update(INSERT_TAG, SqlUtil.genKeyRandom(), id, tag.trim(), type.toString(), - constant); - } - - @Override - public void tagHost(HostInterface host, String tag, HostTagType type) { - tagHost(host.getHostId(), tag, type); - } - - @Override - public void removeTagsByType(HostInterface host, HostTagType type) { - getJdbcTemplate().update("DELETE FROM host_tag WHERE pk_host=? AND str_tag_type=?", - host.getHostId(), type.toString()); - } - - @Override - public void removeTag(HostInterface host, String tag) { - getJdbcTemplate().update( - "DELETE FROM host_tag WHERE pk_host=? AND str_tag=? AND b_constant=false", host.getHostId(), - tag); - } - - @Override - public void renameTag(HostInterface host, String oldTag, String newTag) { - getJdbcTemplate().update( - "UPDATE host_tag SET str_tag=? WHERE pk_host=? AND str_tag=? AND b_constant=false", newTag, - host.getHostId(), oldTag); - } - - @Override - public void updateThreadMode(HostInterface host, ThreadMode mode) { - getJdbcTemplate().update("UPDATE host SET int_thread_mode=? WHERE pk_host=?", mode.getNumber(), - host.getHostId()); - } - - @Override - public void updateHostOs(HostInterface host, String os) { - getJdbcTemplate().update("UPDATE host_stat SET str_os=? WHERE pk_host=?", os, host.getHostId()); - } - - @Override - public int getStrandedCoreUnits(HostInterface h) { - try { - int idle_cores = getJdbcTemplate().queryForObject( - "SELECT int_cores_idle FROM host WHERE pk_host = ? AND int_mem_idle <= ?", Integer.class, - h.getHostId(), Dispatcher.MEM_STRANDED_THRESHHOLD); - return (int) (Math.floor(idle_cores / 100.0)) * 100; - } catch (EmptyResultDataAccessException e) { - return 0; - } - } - - @Override - public int getStrandedGpus(HostInterface h) { - try { - int idle_gpus = getJdbcTemplate().queryForObject( - "SELECT int_gpus_idle FROM host WHERE pk_host = ?", Integer.class, h.getHostId()); - return idle_gpus; - } catch (EmptyResultDataAccessException e) { - return 0; - } - } - - private static final String IS_HOST_UP = "SELECT " + "COUNT(1) " + "FROM " + "host_stat " - + "WHERE " + "host_stat.str_state = ? " + "AND " + "host_stat.pk_host = ? "; - - @Override - public boolean isHostUp(HostInterface host) { - return getJdbcTemplate().queryForObject(IS_HOST_UP, Integer.class, HardwareState.UP.toString(), - host.getHostId()) == 1; - } - - private static final String IS_PREFER_SHOW = "SELECT " + "COUNT(1) " + "FROM " + "host," - + "owner," + "deed " + "WHERE " + "host.pk_host = deed.pk_host " + "AND " - + "deed.pk_owner = owner.pk_owner " + "AND " + "host.pk_host = ?"; - - @Override - public boolean isPreferShow(HostInterface h) { - return getJdbcTemplate().queryForObject(IS_PREFER_SHOW, Integer.class, h.getHostId()) > 0; - } - - @Override - public boolean isNimbyHost(HostInterface h) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM host WHERE b_nimby=true AND pk_host=?", Integer.class, - h.getHostId()) > 0; - } - - /** - * Checks if the passed in name looks like a fully qualified domain name. If so, returns the - * hostname without the domain. Otherwise returns the passed in name unchanged. - * - * @param fqdn - String - * @return String - hostname - */ - private String getHostNameFromFQDN(String fqdn, Boolean useLongNames) { - String hostName; - Pattern ipPattern = Pattern.compile("^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$"); - Matcher ipMatcher = ipPattern.matcher(fqdn); - if (ipMatcher.matches()) { - hostName = fqdn; - } else if (fqdn.contains(":")) { - // looks like IPv6 address. - hostName = fqdn; - } else if (useLongNames) { - hostName = fqdn; - Pattern domainPattern = - Pattern.compile(".*(\\.(.*)\\.(co(m|.[a-z]{2})|biz|edu|info|net|org|cn|de|eu|nl))$"); - Matcher domainMatcher = domainPattern.matcher(fqdn); - if (domainMatcher.matches()) { - hostName = fqdn.replace(domainMatcher.group(1), ""); - } - } else { - hostName = fqdn.split("\\.")[0]; - } - return hostName; - - } - - /** - * Converts the amount of memory reported by the machine to a modificed value which takes into - * account the operating system and the possibility of user applications. - * - * @param host - * @return - */ - private long convertMemoryUnits(RenderHost host) { - - long memUnits; - if (host.getTagsList().contains("64bit")) { - memUnits = CueUtil.convertKbToFakeKb64bit(env, host.getTotalMem()); - } else { - memUnits = CueUtil.convertKbToFakeKb32bit(env, host.getTotalMem()); - } - - /* - * If this is a desktop, we'll just cut the memory so we don't annoy the user. + @Override + public boolean isPreferShow(HostInterface h) { + return getJdbcTemplate().queryForObject(IS_PREFER_SHOW, Integer.class, h.getHostId()) > 0; + } + + @Override + public boolean isNimbyHost(HostInterface h) { + return getJdbcTemplate().queryForObject( + "SELECT COUNT(1) FROM host WHERE b_nimby=true AND pk_host=?", Integer.class, + h.getHostId()) > 0; + } + + /** + * Checks if the passed in name looks like a fully qualified domain name. If so, returns the + * hostname without the domain. Otherwise returns the passed in name unchanged. + * + * @param fqdn - String + * @return String - hostname */ - if (host.getNimbyEnabled()) { - long memReservedSystem = - env.getRequiredProperty("dispatcher.memory.mem_reserved_system", Long.class); - memUnits = (long) (memUnits / 1.5) + memReservedSystem; + private String getHostNameFromFQDN(String fqdn, Boolean useLongNames) { + String hostName; + Pattern ipPattern = Pattern.compile("^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$"); + Matcher ipMatcher = ipPattern.matcher(fqdn); + if (ipMatcher.matches()) { + hostName = fqdn; + } else if (fqdn.contains(":")) { + // looks like IPv6 address. + hostName = fqdn; + } else if (useLongNames) { + hostName = fqdn; + Pattern domainPattern = Pattern + .compile(".*(\\.(.*)\\.(co(m|.[a-z]{2})|biz|edu|info|net|org|cn|de|eu|nl))$"); + Matcher domainMatcher = domainPattern.matcher(fqdn); + if (domainMatcher.matches()) { + hostName = fqdn.replace(domainMatcher.group(1), ""); + } + } else { + hostName = fqdn.split("\\.")[0]; + } + return hostName; + } - return memUnits; - } + /** + * Converts the amount of memory reported by the machine to a modificed value which takes into + * account the operating system and the possibility of user applications. + * + * @param host + * @return + */ + private long convertMemoryUnits(RenderHost host) { + + long memUnits; + if (host.getTagsList().contains("64bit")) { + memUnits = CueUtil.convertKbToFakeKb64bit(env, host.getTotalMem()); + } else { + memUnits = CueUtil.convertKbToFakeKb32bit(env, host.getTotalMem()); + } + + /* + * If this is a desktop, we'll just cut the memory so we don't annoy the user. + */ + if (host.getNimbyEnabled()) { + long memReservedSystem = + env.getRequiredProperty("dispatcher.memory.mem_reserved_system", Long.class); + memUnits = (long) (memUnits / 1.5) + memReservedSystem; + } + + return memUnits; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/JobDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/JobDaoJdbc.java index 5838cef1f..361fc9883 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/JobDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/JobDaoJdbc.java @@ -56,760 +56,784 @@ import com.imageworks.spcue.util.SqlUtil; public class JobDaoJdbc extends JdbcDaoSupport implements JobDao { - private static final Pattern LAST_JOB_STRIP_PATTERN = Pattern.compile("_v*([_0-9]*$)"); - - /* - * Maps a row to a DispatchJob object - */ - public static final RowMapper DISPATCH_JOB_MAPPER = new RowMapper() { - public DispatchJob mapRow(ResultSet rs, int rowNum) throws SQLException { - DispatchJob job = new DispatchJob(); - job.id = rs.getString("pk_job"); - job.showId = rs.getString("pk_show"); - job.facilityId = rs.getString("pk_facility"); - job.name = rs.getString("str_name"); - job.state = JobState.valueOf(rs.getString("str_state")); - job.maxRetries = rs.getInt("int_max_retries"); - job.paused = rs.getBoolean("b_paused"); - job.autoEat = rs.getBoolean("b_autoeat"); - job.autoBook = rs.getBoolean("b_auto_book"); - job.autoUnbook = rs.getBoolean("b_auto_unbook"); - return job; - } - }; - - /* - * Maps a row to minimal job. - */ - public static final RowMapper JOB_MAPPER = new RowMapper() { - public JobInterface mapRow(final ResultSet rs, int rowNum) throws SQLException { - return new JobInterface() { - final String jobid = rs.getString("pk_job"); - final String showid = rs.getString("pk_show"); - final String name = rs.getString("str_name"); - final String facility = rs.getString("pk_facility"); - - public String getJobId() { - return jobid; + private static final Pattern LAST_JOB_STRIP_PATTERN = Pattern.compile("_v*([_0-9]*$)"); + + /* + * Maps a row to a DispatchJob object + */ + public static final RowMapper DISPATCH_JOB_MAPPER = new RowMapper() { + public DispatchJob mapRow(ResultSet rs, int rowNum) throws SQLException { + DispatchJob job = new DispatchJob(); + job.id = rs.getString("pk_job"); + job.showId = rs.getString("pk_show"); + job.facilityId = rs.getString("pk_facility"); + job.name = rs.getString("str_name"); + job.state = JobState.valueOf(rs.getString("str_state")); + job.maxRetries = rs.getInt("int_max_retries"); + job.paused = rs.getBoolean("b_paused"); + job.autoEat = rs.getBoolean("b_autoeat"); + job.autoBook = rs.getBoolean("b_auto_book"); + job.autoUnbook = rs.getBoolean("b_auto_unbook"); + return job; + } + }; + + /* + * Maps a row to minimal job. + */ + public static final RowMapper JOB_MAPPER = new RowMapper() { + public JobInterface mapRow(final ResultSet rs, int rowNum) throws SQLException { + return new JobInterface() { + final String jobid = rs.getString("pk_job"); + final String showid = rs.getString("pk_show"); + final String name = rs.getString("str_name"); + final String facility = rs.getString("pk_facility"); + + public String getJobId() { + return jobid; + } + + public String getShowId() { + return showid; + } + + public String getId() { + return jobid; + } + + public String getName() { + return name; + } + + public String getFacilityId() { + return facility; + } + }; + } + }; + + /* + * Maps a row to a JobDetail object + */ + private static final RowMapper JOB_DETAIL_MAPPER = new RowMapper() { + public JobDetail mapRow(ResultSet rs, int rowNum) throws SQLException { + JobDetail job = new JobDetail(); + job.id = rs.getString("pk_job"); + job.showId = rs.getString("pk_show"); + job.facilityId = rs.getString("pk_facility"); + job.deptId = rs.getString("pk_dept"); + job.groupId = rs.getString("pk_folder"); + job.logDir = rs.getString("str_log_dir"); + job.maxCoreUnits = rs.getInt("int_max_cores"); + job.minCoreUnits = rs.getInt("int_min_cores"); + job.maxGpuUnits = rs.getInt("int_max_gpus"); + job.minGpuUnits = rs.getInt("int_min_gpus"); + job.name = rs.getString("str_name"); + job.priority = rs.getInt("int_priority"); + job.shot = rs.getString("str_shot"); + job.state = JobState.valueOf(rs.getString("str_state")); + int uid = rs.getInt("int_uid"); + job.uid = rs.wasNull() ? Optional.empty() : Optional.of(uid); + job.user = rs.getString("str_user"); + job.email = rs.getString("str_email"); + job.totalFrames = rs.getInt("int_frame_count"); + job.totalLayers = rs.getInt("int_layer_count"); + Timestamp startTime = rs.getTimestamp("ts_started"); + job.startTime = startTime != null ? (int) (startTime.getTime() / 1000) : 0; + Timestamp stopTime = rs.getTimestamp("ts_stopped"); + job.stopTime = stopTime != null ? (int) (stopTime.getTime() / 1000) : 0; + job.isPaused = rs.getBoolean("b_paused"); + job.maxRetries = rs.getInt("int_max_retries"); + job.showName = rs.getString("show_name"); + job.facilityName = rs.getString("facility_name"); + job.deptName = rs.getString("dept_name"); + return job; + } + }; + + private static final String GET_DISPATCH_JOB = "SELECT " + "job.pk_job, " + "job.pk_facility, " + + "job.pk_show, " + "job.str_name, " + "job.str_show, " + "job.str_state, " + + "job.b_paused, " + "job.int_max_retries, " + "job.b_autoeat, " + "job.b_auto_book," + + "job.b_auto_unbook " + "FROM " + "job " + "WHERE " + "pk_job = ?"; + + @Override + public DispatchJob getDispatchJob(String uuid) { + return getJdbcTemplate().queryForObject(GET_DISPATCH_JOB, DISPATCH_JOB_MAPPER, uuid); + } + + private static final String IS_JOB_COMPLETE = "SELECT " + "SUM (" + "int_waiting_count + " + + "int_running_count + " + "int_dead_count + " + "int_depend_count + " + + "int_checkpoint_count " + ") " + "FROM " + "job_stat " + "WHERE " + "pk_job=?"; + + @Override + public boolean isJobComplete(JobInterface job) { + if (isLaunching(job)) { + return false; + } + return getJdbcTemplate().queryForObject(IS_JOB_COMPLETE, Integer.class, + job.getJobId()) == 0; + } + + public static final String GET_JOB = "SELECT " + "job.pk_job, " + "job.pk_show, " + + "job.pk_dept," + "job.pk_facility," + "job.str_name " + "FROM " + "job "; + + private static final String GET_JOB_DETAIL = "SELECT " + "job.pk_job," + "job.pk_show," + + "job.pk_facility," + "job.pk_dept," + "job.pk_folder," + "job.str_log_dir," + + "job.str_name," + "job.str_shot," + "job.str_state," + "job.int_uid," + + "job.str_user," + "job.str_email," + "job.int_frame_count," + "job.int_layer_count," + + "job.ts_started," + "job.ts_stopped," + "job.b_paused," + "job.int_max_retries," + + "job_resource.int_max_cores," + "job_resource.int_min_cores," + + "job_resource.int_max_gpus," + "job_resource.int_min_gpus," + + "job_resource.int_priority," + "show.str_name AS show_name, " + + "dept.str_name AS dept_name, " + "facility.str_name AS facility_name " + "FROM " + + "job, " + "job_resource, " + "show, " + "dept, " + "facility " + "WHERE " + + "job.pk_job = job_resource.pk_job " + "AND " + "job.pk_show = show.pk_show " + "AND " + + "job.pk_dept = dept.pk_dept " + "AND " + "job.pk_facility = facility.pk_facility "; + + private static final String GET_JOB_BY_ID = GET_JOB_DETAIL + "AND job.pk_job=?"; + + private static final String FIND_JOB_BY_NAME = GET_JOB_DETAIL + "AND job.str_visible_name=? "; + + @Override + public JobDetail getJobDetail(String id) { + return getJdbcTemplate().queryForObject(GET_JOB_BY_ID, JOB_DETAIL_MAPPER, id); + } + + @Override + public JobDetail findLastJob(String name) { + Matcher matcher = LAST_JOB_STRIP_PATTERN.matcher(name); + name = matcher.replaceAll("%"); + + return getJdbcTemplate().queryForObject( + GET_JOB_DETAIL + " AND job.str_state = 'FINISHED' AND job.str_name LIKE ? " + + "ORDER BY job.ts_stopped LIMIT 1", + JOB_DETAIL_MAPPER, name); + } + + @Override + public JobInterface getJob(String id) { + return getJdbcTemplate().queryForObject(GET_JOB + " WHERE pk_job=?", JOB_MAPPER, id); + } + + public static final String GET_JOBS_BY_TASK = "SELECT " + "job.pk_job, " + "job.pk_show, " + + "job.pk_dept, " + "job.pk_facility, " + "job.str_name " + "FROM " + "job," + "folder " + + "WHERE " + "job.pk_folder = folder.pk_folder " + "AND " + + "folder.b_exclude_managed = false " + "AND " + "job.str_state = ? " + "AND " + + "job.pk_dept = ? " + "AND " + "job.str_shot = ? " + "ORDER BY " + "ts_started ASC "; + + @Override + public List getJobs(TaskEntity t) { + return getJdbcTemplate().query(GET_JOBS_BY_TASK, JOB_MAPPER, JobState.PENDING.toString(), + t.deptId, t.shot); + } + + @Override + public JobDetail findJobDetail(String name) { + return getJdbcTemplate().queryForObject(FIND_JOB_BY_NAME, JOB_DETAIL_MAPPER, name); + } + + @Override + public JobInterface findJob(String name) { + return getJdbcTemplate().queryForObject(GET_JOB + " WHERE job.str_visible_name=?", + JOB_MAPPER, name); + } + + @Override + public List findJobs(ShowInterface show) { + return getJdbcTemplate().query(GET_JOB_DETAIL + " AND job.pk_show=?", JOB_DETAIL_MAPPER, + show.getShowId()); + } + + @Override + public List findJobs(GroupInterface group) { + return getJdbcTemplate().query(GET_JOB_DETAIL + " AND job.pk_folder=?", JOB_DETAIL_MAPPER, + group.getId()); + } + + @Override + public void deleteJob(JobInterface j) { + /* See trigger before_delete_job */ + getJdbcTemplate().update("DELETE FROM job WHERE pk_job=?", j.getId()); + } + + @Override + public void updatePriority(JobInterface j, int v) { + getJdbcTemplate().update("UPDATE job_resource SET int_priority=? WHERE pk_job=?", v, + j.getJobId()); + } + + @Override + public void updatePriority(GroupInterface g, int v) { + getJdbcTemplate().update( + "UPDATE job_resource SET int_priority=? WHERE " + + "pk_job IN (SELECT pk_job FROM job WHERE job.pk_folder=?)", + v, g.getGroupId()); + } + + @Override + public void updateMinCores(GroupInterface g, int v) { + getJdbcTemplate().update( + "UPDATE job_resource SET int_min_cores=? WHERE " + + "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", + v, g.getGroupId()); + } + + @Override + public void updateMaxCores(GroupInterface g, int v) { + getJdbcTemplate().update( + "UPDATE job_resource SET int_max_cores=? WHERE " + + "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", + v, g.getGroupId()); + } + + @Override + public void updateMinCores(JobInterface j, int v) { + getJdbcTemplate().update("UPDATE job_resource SET int_min_cores=? WHERE pk_job=?", v, + j.getJobId()); + } + + @Override + public void updateMaxCores(JobInterface j, int v) { + getJdbcTemplate().update("UPDATE job_resource SET int_max_cores=? WHERE pk_job=?", v, + j.getJobId()); + } + + @Override + public void updateMinGpus(GroupInterface g, int v) { + getJdbcTemplate().update( + "UPDATE job_resource SET int_min_gpus=? WHERE " + + "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", + v, g.getGroupId()); + } + + @Override + public void updateMaxGpus(GroupInterface g, int v) { + getJdbcTemplate().update( + "UPDATE job_resource SET int_max_gpus=? WHERE " + + "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", + v, g.getGroupId()); + } + + @Override + public void updateMinGpus(JobInterface j, int v) { + getJdbcTemplate().update("UPDATE job_resource SET int_min_gpus=? WHERE pk_job=?", v, + j.getJobId()); + } + + @Override + public void updateMaxGpus(JobInterface j, int v) { + getJdbcTemplate().update("UPDATE job_resource SET int_max_gpus=? WHERE pk_job=?", v, + j.getJobId()); + } + + @Override + public void updatePaused(JobInterface j, boolean b) { + getJdbcTemplate().update("UPDATE job SET b_paused=? WHERE pk_job=?", b, j.getJobId()); + } + + @Override + public void updateAutoEat(JobInterface j, boolean b) { + int maxRetries = 1; + if (b) { + maxRetries = 0; } + getJdbcTemplate().update("UPDATE job SET b_autoeat=?, int_max_retries=? WHERE pk_job=?", b, + maxRetries, j.getJobId()); + } + + @Override + public void updateState(JobInterface job, JobState state) { + getJdbcTemplate().update("UPDATE job SET str_state=? WHERE pk_job=?", state.toString(), + job.getJobId()); + } + + @Override + public void updateLogPath(JobInterface job, String path) { + getJdbcTemplate().update("UPDATE job SET str_log_dir=? WHERE pk_job=?", path, + job.getJobId()); + } - public String getShowId() { - return showid; + @Override + public void updateMaxRSS(JobInterface job, long value) { + getJdbcTemplate().update( + "UPDATE job_mem SET int_max_rss=? WHERE pk_job=? AND int_max_rss < ?", value, + job.getJobId(), value); + } + + private static final String UPDATE_JOB_FINISHED = "UPDATE " + "job " + "SET " + + "str_state = ?, " + "str_visible_name = NULL, " + "ts_stopped = current_timestamp " + + "WHERE " + "str_state = 'PENDING' " + "AND " + "pk_job = ?"; + + @Override + public boolean updateJobFinished(JobInterface job) { + // Only return true if this thread was the one who actually + // set the job state to finished. + if (getJdbcTemplate().update(UPDATE_JOB_FINISHED, JobState.FINISHED.toString(), + job.getJobId()) == 1) { + return true; } + return false; + } - public String getId() { - return jobid; + private static final String INSERT_JOB = "INSERT INTO " + "job " + "(" + "pk_job," + "pk_show," + + "pk_folder," + "pk_facility," + "pk_dept," + "str_name," + "str_visible_name," + + "str_show," + "str_shot," + "str_user," + "str_email," + "str_state," + "str_log_dir," + + "str_os, " + "int_uid," + "b_paused," + "b_autoeat," + "int_max_retries " + ") " + + "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + + @Override + public void insertJob(JobDetail j, JobLogUtil jobLogUtil) { + j.id = SqlUtil.genKeyRandom(); + j.logDir = jobLogUtil.getJobLogPath(j); + if (j.minCoreUnits < 100) { + j.minCoreUnits = 100; } - public String getName() { - return name; + getJdbcTemplate().update(INSERT_JOB, j.id, j.showId, j.groupId, j.facilityId, j.deptId, + j.name, j.name, j.showName, j.shot, j.user, j.email, j.state.toString(), j.logDir, + j.os, j.uid.orElse(null), j.isPaused, j.isAutoEat, j.maxRetries); + } + + private static final String JOB_EXISTS = "SELECT " + "1 " + "FROM " + "job " + "WHERE " + + "str_name = ? " + "AND " + "str_state='PENDING' " + "LIMIT 1"; + + @Override + public boolean exists(String name) { + try { + return (getJdbcTemplate().queryForObject(JOB_EXISTS, Integer.class, name) >= 1); + } catch (Exception e) { + return false; } + } + + private static final String IS_LAUNCHING = + "SELECT " + "str_state " + "FROM " + "job " + "WHERE " + "pk_job=?"; + + @Override + public boolean isLaunching(JobInterface j) { + return getJdbcTemplate().queryForObject(IS_LAUNCHING, String.class, j.getJobId()) + .equals(JobState.STARTUP.toString()); + } - public String getFacilityId() { - return facility; + @Override + public void activateJob(JobInterface job, JobState jobState) { + + Long[] jobTotals = {0L, 0L}; // Depend, Waiting + + /* + * Sets all frames in the setup state to Waiting. Frames with a depend count > 0 are + * automatically updated to Depend via the update_frame_wait_to_dep trigger. + */ + getJdbcTemplate().update("UPDATE frame SET str_state=? WHERE pk_job=? AND str_state=?", + FrameState.WAITING.toString(), job.getId(), FrameState.SETUP.toString()); + + List> layers = getJdbcTemplate() + .queryForList("SELECT pk_layer, str_state, count(1) AS c FROM frame " + + "WHERE pk_job=? GROUP BY pk_layer, str_state", job.getId()); + + for (Map row : layers) { + String layer = (String) row.get("pk_layer"); + FrameState state = FrameState.valueOf((String) row.get("str_state")); + Long count = (Long) row.get("c"); + + if (count == 0 || state == null) { + continue; + } + + switch (state) { + case DEPEND: + jobTotals[0] = jobTotals[0] + count; + getJdbcTemplate().update( + "UPDATE layer_stat SET int_depend_count=?,int_total_count=int_total_count + ? WHERE pk_layer=?", + count, count, layer); + break; + case WAITING: + jobTotals[1] = jobTotals[1] + count; + getJdbcTemplate().update( + "UPDATE layer_stat SET int_waiting_count=?,int_total_count=int_total_count + ? WHERE pk_layer=?", + count, count, layer); + break; + } } - }; - } - }; - - /* - * Maps a row to a JobDetail object - */ - private static final RowMapper JOB_DETAIL_MAPPER = new RowMapper() { - public JobDetail mapRow(ResultSet rs, int rowNum) throws SQLException { - JobDetail job = new JobDetail(); - job.id = rs.getString("pk_job"); - job.showId = rs.getString("pk_show"); - job.facilityId = rs.getString("pk_facility"); - job.deptId = rs.getString("pk_dept"); - job.groupId = rs.getString("pk_folder"); - job.logDir = rs.getString("str_log_dir"); - job.maxCoreUnits = rs.getInt("int_max_cores"); - job.minCoreUnits = rs.getInt("int_min_cores"); - job.maxGpuUnits = rs.getInt("int_max_gpus"); - job.minGpuUnits = rs.getInt("int_min_gpus"); - job.name = rs.getString("str_name"); - job.priority = rs.getInt("int_priority"); - job.shot = rs.getString("str_shot"); - job.state = JobState.valueOf(rs.getString("str_state")); - int uid = rs.getInt("int_uid"); - job.uid = rs.wasNull() ? Optional.empty() : Optional.of(uid); - job.user = rs.getString("str_user"); - job.email = rs.getString("str_email"); - job.totalFrames = rs.getInt("int_frame_count"); - job.totalLayers = rs.getInt("int_layer_count"); - Timestamp startTime = rs.getTimestamp("ts_started"); - job.startTime = startTime != null ? (int) (startTime.getTime() / 1000) : 0; - Timestamp stopTime = rs.getTimestamp("ts_stopped"); - job.stopTime = stopTime != null ? (int) (stopTime.getTime() / 1000) : 0; - job.isPaused = rs.getBoolean("b_paused"); - job.maxRetries = rs.getInt("int_max_retries"); - job.showName = rs.getString("show_name"); - job.facilityName = rs.getString("facility_name"); - job.deptName = rs.getString("dept_name"); - return job; - } - }; - - private static final String GET_DISPATCH_JOB = "SELECT " + "job.pk_job, " + "job.pk_facility, " - + "job.pk_show, " + "job.str_name, " + "job.str_show, " + "job.str_state, " + "job.b_paused, " - + "job.int_max_retries, " + "job.b_autoeat, " + "job.b_auto_book," + "job.b_auto_unbook " - + "FROM " + "job " + "WHERE " + "pk_job = ?"; - - @Override - public DispatchJob getDispatchJob(String uuid) { - return getJdbcTemplate().queryForObject(GET_DISPATCH_JOB, DISPATCH_JOB_MAPPER, uuid); - } - - private static final String IS_JOB_COMPLETE = "SELECT " + "SUM (" + "int_waiting_count + " - + "int_running_count + " + "int_dead_count + " + "int_depend_count + " - + "int_checkpoint_count " + ") " + "FROM " + "job_stat " + "WHERE " + "pk_job=?"; - - @Override - public boolean isJobComplete(JobInterface job) { - if (isLaunching(job)) { - return false; - } - return getJdbcTemplate().queryForObject(IS_JOB_COMPLETE, Integer.class, job.getJobId()) == 0; - } - - public static final String GET_JOB = "SELECT " + "job.pk_job, " + "job.pk_show, " + "job.pk_dept," - + "job.pk_facility," + "job.str_name " + "FROM " + "job "; - - private static final String GET_JOB_DETAIL = "SELECT " + "job.pk_job," + "job.pk_show," - + "job.pk_facility," + "job.pk_dept," + "job.pk_folder," + "job.str_log_dir," - + "job.str_name," + "job.str_shot," + "job.str_state," + "job.int_uid," + "job.str_user," - + "job.str_email," + "job.int_frame_count," + "job.int_layer_count," + "job.ts_started," - + "job.ts_stopped," + "job.b_paused," + "job.int_max_retries," + "job_resource.int_max_cores," - + "job_resource.int_min_cores," + "job_resource.int_max_gpus," + "job_resource.int_min_gpus," - + "job_resource.int_priority," + "show.str_name AS show_name, " - + "dept.str_name AS dept_name, " + "facility.str_name AS facility_name " + "FROM " + "job, " - + "job_resource, " + "show, " + "dept, " + "facility " + "WHERE " - + "job.pk_job = job_resource.pk_job " + "AND " + "job.pk_show = show.pk_show " + "AND " - + "job.pk_dept = dept.pk_dept " + "AND " + "job.pk_facility = facility.pk_facility "; - - private static final String GET_JOB_BY_ID = GET_JOB_DETAIL + "AND job.pk_job=?"; - - private static final String FIND_JOB_BY_NAME = GET_JOB_DETAIL + "AND job.str_visible_name=? "; - - @Override - public JobDetail getJobDetail(String id) { - return getJdbcTemplate().queryForObject(GET_JOB_BY_ID, JOB_DETAIL_MAPPER, id); - } - - @Override - public JobDetail findLastJob(String name) { - Matcher matcher = LAST_JOB_STRIP_PATTERN.matcher(name); - name = matcher.replaceAll("%"); - - return getJdbcTemplate() - .queryForObject(GET_JOB_DETAIL + " AND job.str_state = 'FINISHED' AND job.str_name LIKE ? " - + "ORDER BY job.ts_stopped LIMIT 1", JOB_DETAIL_MAPPER, name); - } - - @Override - public JobInterface getJob(String id) { - return getJdbcTemplate().queryForObject(GET_JOB + " WHERE pk_job=?", JOB_MAPPER, id); - } - - public static final String GET_JOBS_BY_TASK = "SELECT " + "job.pk_job, " + "job.pk_show, " - + "job.pk_dept, " + "job.pk_facility, " + "job.str_name " + "FROM " + "job," + "folder " - + "WHERE " + "job.pk_folder = folder.pk_folder " + "AND " - + "folder.b_exclude_managed = false " + "AND " + "job.str_state = ? " + "AND " - + "job.pk_dept = ? " + "AND " + "job.str_shot = ? " + "ORDER BY " + "ts_started ASC "; - - @Override - public List getJobs(TaskEntity t) { - return getJdbcTemplate().query(GET_JOBS_BY_TASK, JOB_MAPPER, JobState.PENDING.toString(), - t.deptId, t.shot); - } - - @Override - public JobDetail findJobDetail(String name) { - return getJdbcTemplate().queryForObject(FIND_JOB_BY_NAME, JOB_DETAIL_MAPPER, name); - } - - @Override - public JobInterface findJob(String name) { - return getJdbcTemplate().queryForObject(GET_JOB + " WHERE job.str_visible_name=?", JOB_MAPPER, - name); - } - - @Override - public List findJobs(ShowInterface show) { - return getJdbcTemplate().query(GET_JOB_DETAIL + " AND job.pk_show=?", JOB_DETAIL_MAPPER, - show.getShowId()); - } - - @Override - public List findJobs(GroupInterface group) { - return getJdbcTemplate().query(GET_JOB_DETAIL + " AND job.pk_folder=?", JOB_DETAIL_MAPPER, - group.getId()); - } - - @Override - public void deleteJob(JobInterface j) { - /* See trigger before_delete_job */ - getJdbcTemplate().update("DELETE FROM job WHERE pk_job=?", j.getId()); - } - - @Override - public void updatePriority(JobInterface j, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_priority=? WHERE pk_job=?", v, - j.getJobId()); - } - - @Override - public void updatePriority(GroupInterface g, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_priority=? WHERE " - + "pk_job IN (SELECT pk_job FROM job WHERE job.pk_folder=?)", v, g.getGroupId()); - } - - @Override - public void updateMinCores(GroupInterface g, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_min_cores=? WHERE " - + "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", v, g.getGroupId()); - } - - @Override - public void updateMaxCores(GroupInterface g, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_max_cores=? WHERE " - + "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", v, g.getGroupId()); - } - - @Override - public void updateMinCores(JobInterface j, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_min_cores=? WHERE pk_job=?", v, - j.getJobId()); - } - - @Override - public void updateMaxCores(JobInterface j, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_max_cores=? WHERE pk_job=?", v, - j.getJobId()); - } - - @Override - public void updateMinGpus(GroupInterface g, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_min_gpus=? WHERE " - + "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", v, g.getGroupId()); - } - - @Override - public void updateMaxGpus(GroupInterface g, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_max_gpus=? WHERE " - + "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", v, g.getGroupId()); - } - - @Override - public void updateMinGpus(JobInterface j, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_min_gpus=? WHERE pk_job=?", v, - j.getJobId()); - } - - @Override - public void updateMaxGpus(JobInterface j, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_max_gpus=? WHERE pk_job=?", v, - j.getJobId()); - } - - @Override - public void updatePaused(JobInterface j, boolean b) { - getJdbcTemplate().update("UPDATE job SET b_paused=? WHERE pk_job=?", b, j.getJobId()); - } - - @Override - public void updateAutoEat(JobInterface j, boolean b) { - int maxRetries = 1; - if (b) { - maxRetries = 0; - } - getJdbcTemplate().update("UPDATE job SET b_autoeat=?, int_max_retries=? WHERE pk_job=?", b, - maxRetries, j.getJobId()); - } - - @Override - public void updateState(JobInterface job, JobState state) { - getJdbcTemplate().update("UPDATE job SET str_state=? WHERE pk_job=?", state.toString(), - job.getJobId()); - } - - @Override - public void updateLogPath(JobInterface job, String path) { - getJdbcTemplate().update("UPDATE job SET str_log_dir=? WHERE pk_job=?", path, job.getJobId()); - } - - @Override - public void updateMaxRSS(JobInterface job, long value) { - getJdbcTemplate().update("UPDATE job_mem SET int_max_rss=? WHERE pk_job=? AND int_max_rss < ?", - value, job.getJobId(), value); - } - - private static final String UPDATE_JOB_FINISHED = "UPDATE " + "job " + "SET " + "str_state = ?, " - + "str_visible_name = NULL, " + "ts_stopped = current_timestamp " + "WHERE " - + "str_state = 'PENDING' " + "AND " + "pk_job = ?"; - - @Override - public boolean updateJobFinished(JobInterface job) { - // Only return true if this thread was the one who actually - // set the job state to finished. - if (getJdbcTemplate().update(UPDATE_JOB_FINISHED, JobState.FINISHED.toString(), - job.getJobId()) == 1) { - return true; - } - return false; - } - - private static final String INSERT_JOB = "INSERT INTO " + "job " + "(" + "pk_job," + "pk_show," - + "pk_folder," + "pk_facility," + "pk_dept," + "str_name," + "str_visible_name," + "str_show," - + "str_shot," + "str_user," + "str_email," + "str_state," + "str_log_dir," + "str_os, " - + "int_uid," + "b_paused," + "b_autoeat," + "int_max_retries " + ") " - + "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; - - @Override - public void insertJob(JobDetail j, JobLogUtil jobLogUtil) { - j.id = SqlUtil.genKeyRandom(); - j.logDir = jobLogUtil.getJobLogPath(j); - if (j.minCoreUnits < 100) { - j.minCoreUnits = 100; - } - - getJdbcTemplate().update(INSERT_JOB, j.id, j.showId, j.groupId, j.facilityId, j.deptId, j.name, - j.name, j.showName, j.shot, j.user, j.email, j.state.toString(), j.logDir, j.os, - j.uid.orElse(null), j.isPaused, j.isAutoEat, j.maxRetries); - } - - private static final String JOB_EXISTS = "SELECT " + "1 " + "FROM " + "job " + "WHERE " - + "str_name = ? " + "AND " + "str_state='PENDING' " + "LIMIT 1"; - - @Override - public boolean exists(String name) { - try { - return (getJdbcTemplate().queryForObject(JOB_EXISTS, Integer.class, name) >= 1); - } catch (Exception e) { - return false; - } - } - - private static final String IS_LAUNCHING = - "SELECT " + "str_state " + "FROM " + "job " + "WHERE " + "pk_job=?"; - - @Override - public boolean isLaunching(JobInterface j) { - return getJdbcTemplate().queryForObject(IS_LAUNCHING, String.class, j.getJobId()) - .equals(JobState.STARTUP.toString()); - } - - @Override - public void activateJob(JobInterface job, JobState jobState) { - - Long[] jobTotals = {0L, 0L}; // Depend, Waiting - /* - * Sets all frames in the setup state to Waiting. Frames with a depend count > 0 are - * automatically updated to Depend via the update_frame_wait_to_dep trigger. - */ - getJdbcTemplate().update("UPDATE frame SET str_state=? WHERE pk_job=? AND str_state=?", - FrameState.WAITING.toString(), job.getId(), FrameState.SETUP.toString()); - - List> layers = - getJdbcTemplate().queryForList("SELECT pk_layer, str_state, count(1) AS c FROM frame " - + "WHERE pk_job=? GROUP BY pk_layer, str_state", job.getId()); - - for (Map row : layers) { - String layer = (String) row.get("pk_layer"); - FrameState state = FrameState.valueOf((String) row.get("str_state")); - Long count = (Long) row.get("c"); - - if (count == 0 || state == null) { - continue; - } - - switch (state) { - case DEPEND: - jobTotals[0] = jobTotals[0] + count; - getJdbcTemplate().update( - "UPDATE layer_stat SET int_depend_count=?,int_total_count=int_total_count + ? WHERE pk_layer=?", - count, count, layer); - break; - case WAITING: - jobTotals[1] = jobTotals[1] + count; - getJdbcTemplate().update( - "UPDATE layer_stat SET int_waiting_count=?,int_total_count=int_total_count + ? WHERE pk_layer=?", - count, count, layer); - break; - } - } - - getJdbcTemplate().update( - "UPDATE job_stat SET int_depend_count=?,int_waiting_count=? WHERE pk_job=?", jobTotals[0], - jobTotals[1], job.getJobId()); - - getJdbcTemplate().update("UPDATE job SET int_frame_count=?, int_layer_count=? WHERE pk_job=?", - jobTotals[0] + jobTotals[1], layers.size(), job.getJobId()); - - getJdbcTemplate().update( - "UPDATE show_stats SET int_frame_insert_count=int_frame_insert_count+?, int_job_insert_count=int_job_insert_count+1 WHERE pk_show=?", - jobTotals[0] + jobTotals[1], job.getShowId()); - - updateState(job, jobState); - } - - private static final String HAS_PENDING_FRAMES = "SELECT " + "int_waiting_count " + "FROM " - + "job," + "job_stat " + "WHERE " + "job.pk_job = job_stat.pk_job " + "AND " - + "job.str_state = 'PENDING' " + "AND " + "job.b_paused = false " + "AND " - + "job.b_auto_book = true " + "AND " + "job.pk_job = ?"; - - @Override - public boolean hasPendingFrames(JobInterface job) { - try { - return getJdbcTemplate().queryForObject(HAS_PENDING_FRAMES, Integer.class, - job.getJobId()) > 0; - } catch (DataAccessException e) { - return false; - } - } - - private static final String IS_JOB_OVER_MIN_CORES = - "SELECT " + "COUNT(1) " + "FROM " + "job_resource " + "WHERE " + "job_resource.pk_job = ? " - + "AND " + "job_resource.int_cores > job_resource.int_min_cores"; - - @Override - public boolean isOverMinCores(JobInterface job) { - return getJdbcTemplate().queryForObject(IS_JOB_OVER_MIN_CORES, Integer.class, - job.getJobId()) > 0; - } - - private static final String IS_JOB_OVER_MAX_CORES = - "SELECT " + "COUNT(1) " + "FROM " + "job_resource " + "WHERE " + "job_resource.pk_job = ? " - + "AND " + "job_resource.int_cores + ? > job_resource.int_max_cores"; - - @Override - public boolean isOverMaxCores(JobInterface job) { - return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_CORES, Integer.class, job.getJobId(), - 0) > 0; - } - - @Override - public boolean isOverMaxCores(JobInterface job, int coreUnits) { - return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_CORES, Integer.class, job.getJobId(), - coreUnits) > 0; - } - - private static final String IS_JOB_AT_MAX_CORES = - "SELECT " + "COUNT(1) " + "FROM " + "job_resource " + "WHERE " + "job_resource.pk_job = ? " - + "AND " + "job_resource.int_cores >= job_resource.int_max_cores "; - - @Override - public boolean isAtMaxCores(JobInterface job) { - return getJdbcTemplate().queryForObject(IS_JOB_AT_MAX_CORES, Integer.class, job.getJobId()) > 0; - } - - private static final String IS_JOB_OVER_MAX_GPUS = - "SELECT " + "COUNT(1) " + "FROM " + "job_resource " + "WHERE " + "job_resource.pk_job = ? " - + "AND " + "job_resource.int_gpus + ? > job_resource.int_max_gpus"; - - @Override - public boolean isOverMaxGpus(JobInterface job) { - return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_GPUS, Integer.class, job.getJobId(), - 0) > 0; - } - - @Override - public boolean isOverMaxGpus(JobInterface job, int gpu) { - return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_GPUS, Integer.class, job.getJobId(), - gpu) > 0; - } - - private static final String IS_JOB_AT_MAX_GPUS = - "SELECT " + "COUNT(1) " + "FROM " + "job_resource " + "WHERE " + "job_resource.pk_job = ? " - + "AND " + "job_resource.int_gpus >= job_resource.int_max_gpus "; - - @Override - public boolean isAtMaxGpus(JobInterface job) { - return getJdbcTemplate().queryForObject(IS_JOB_AT_MAX_GPUS, Integer.class, job.getJobId()) > 0; - } - - @Override - public void updateMaxFrameRetries(JobInterface j, int max_retries) { - if (max_retries < 0) { - throw new IllegalArgumentException("max retries must be greater than 0"); - } - - int max_max_retries = getJdbcTemplate().queryForObject( - "SELECT int_value FROM config WHERE str_key=?", Integer.class, "MAX_FRAME_RETRIES"); - - if (max_retries > max_max_retries) { - throw new IllegalArgumentException("max retries must be less than " + max_max_retries); - } - - getJdbcTemplate().update("UPDATE job SET int_max_retries=? WHERE pk_job=?", max_retries, - j.getJobId()); - } - - private static final String GET_FRAME_STATE_TOTALS = - "SELECT " + "job.int_frame_count," + "job_stat.* " + "FROM " + "job," + "job_stat " + "WHERE " - + "job.pk_job = job_stat.pk_job " + "AND " + "job.pk_job=?"; - - public FrameStateTotals getFrameStateTotals(JobInterface job) { - return getJdbcTemplate().queryForObject(GET_FRAME_STATE_TOTALS, - new RowMapper() { - public FrameStateTotals mapRow(ResultSet rs, int rowNum) throws SQLException { - FrameStateTotals t = new FrameStateTotals(); - t.dead = rs.getInt("int_dead_count"); - t.depend = rs.getInt("int_depend_count"); - t.eaten = rs.getInt("int_eaten_count"); - t.running = rs.getInt("int_running_count"); - t.succeeded = rs.getInt("int_succeeded_count"); - t.waiting = rs.getInt("int_waiting_count"); - t.total = rs.getInt("int_frame_count"); - return t; - } - }, job.getJobId()); - } - - private static final String GET_EXECUTION_SUMMARY = "SELECT " + "job_usage.int_core_time_success," - + "job_usage.int_core_time_fail," + "job_usage.int_gpu_time_success," - + "job_usage.int_gpu_time_fail," + "job_mem.int_max_rss " + "FROM " + "job," + "job_usage, " - + "job_mem " + "WHERE " + "job.pk_job = job_usage.pk_job " + "AND " - + "job.pk_job = job_mem.pk_job " + "AND " + "job.pk_job = ?"; - - public ExecutionSummary getExecutionSummary(JobInterface job) { - return getJdbcTemplate().queryForObject(GET_EXECUTION_SUMMARY, - new RowMapper() { - public ExecutionSummary mapRow(ResultSet rs, int rowNum) throws SQLException { - ExecutionSummary e = new ExecutionSummary(); - e.coreTimeSuccess = rs.getLong("int_core_time_success"); - e.coreTimeFail = rs.getLong("int_core_time_fail"); - e.coreTime = e.coreTimeSuccess + e.coreTimeFail; - e.gpuTimeSuccess = rs.getLong("int_gpu_time_success"); - e.gpuTimeFail = rs.getLong("int_gpu_time_fail"); - e.gpuTime = e.gpuTimeSuccess + e.gpuTimeFail; - e.highMemoryKb = rs.getLong("int_max_rss"); - - return e; - } - }, job.getJobId()); - } - - private static final String INSERT_JOB_ENV = "INSERT INTO " + "job_env " + "(" - + "pk_job_env, pk_job, str_key, str_value " + ") " + "VALUES (?,?,?,?)"; - - @Override - public void insertEnvironment(JobInterface job, Map env) { - for (Map.Entry e : env.entrySet()) { - String pk = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_JOB_ENV, pk, job.getJobId(), e.getKey(), e.getValue()); - } - } - - @Override - public void insertEnvironment(JobInterface job, String key, String value) { - String pk = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_JOB_ENV, pk, job.getJobId(), key, value); - } - - @Override - public Map getEnvironment(JobInterface job) { - Map result = new HashMap(); - - List> _result = getJdbcTemplate() - .queryForList("SELECT str_key, str_value FROM job_env WHERE pk_job=?", job.getJobId()); - - for (Map o : _result) { - result.put((String) o.get("str_key"), (String) o.get("str_value")); - } - - return result; - } - - @Override - public void updateParent(JobInterface job, GroupDetail dest) { - updateParent(job, dest, new Inherit[] {Inherit.All}); - } - - @Override - public void updateParent(JobInterface job, GroupDetail dest, Inherit[] inherits) { - - if (!job.getShowId().equals(dest.getShowId())) { - throw new EntityModificationError("error moving job, " + "cannot move jobs between shows"); - } - - StringBuilder query = new StringBuilder(1024); - query.append("UPDATE job_resource SET "); - List values = new ArrayList(); - - Set inheritSet = new HashSet(inherits.length); - inheritSet.addAll(Arrays.asList(inherits)); - - for (Inherit i : inheritSet) { - switch (i) { - case Priority: - if (dest.jobPriority != CueUtil.FEATURE_DISABLED) { - query.append("int_priority=?,"); - values.add(dest.jobPriority); - } - break; - - case MinCores: - if (dest.jobMinCores != CueUtil.FEATURE_DISABLED) { - query.append("int_min_cores=?,"); - values.add(dest.jobMinCores); - } - break; - - case MaxCores: - if (dest.jobMaxCores != CueUtil.FEATURE_DISABLED) { - query.append("int_max_cores=?,"); - values.add(dest.jobMaxCores); - } - break; - - case MinGpus: - if (dest.jobMinGpus != CueUtil.FEATURE_DISABLED) { - query.append("int_min_gpus=?,"); - values.add(dest.jobMinGpus); - } - break; - - case MaxGpus: - if (dest.jobMaxGpus != CueUtil.FEATURE_DISABLED) { - query.append("int_max_gpus=?,"); - values.add(dest.jobMaxGpus); - } - break; - - case All: - if (dest.jobPriority != CueUtil.FEATURE_DISABLED) { - query.append("int_priority=?,"); - values.add(dest.jobPriority); - } - - if (dest.jobMinCores != CueUtil.FEATURE_DISABLED) { - query.append("int_min_cores=?,"); - values.add(dest.jobMinCores); - } - - if (dest.jobMaxCores != CueUtil.FEATURE_DISABLED) { - query.append("int_max_cores=?,"); - values.add(dest.jobMaxCores); - } - - if (dest.jobMinGpus != CueUtil.FEATURE_DISABLED) { - query.append("int_min_gpus=?,"); - values.add(dest.jobMinGpus); - } - - if (dest.jobMaxGpus != CueUtil.FEATURE_DISABLED) { - query.append("int_max_gpus=?,"); - values.add(dest.jobMaxGpus); - } - break; - } - } - - getJdbcTemplate().update("UPDATE job SET pk_folder=?, pk_dept=? WHERE pk_job=?", - dest.getGroupId(), dest.getDepartmentId(), job.getJobId()); - - getJdbcTemplate().update("UPDATE job_history SET pk_dept=? WHERE pk_job=?", - dest.getDepartmentId(), job.getJobId()); - - if (values.size() > 0) { - query.deleteCharAt(query.length() - 1); - query.append(" WHERE pk_job=?"); - values.add(job.getJobId()); - getJdbcTemplate().update(query.toString(), values.toArray()); - } - } - - private static final String HAS_PENDING_JOBS = - "SELECT " + "job.pk_job " + "FROM " + "job, " + "job_stat, " + "job_resource " + "WHERE " - + "job.pk_job = job_stat.pk_job " + "AND " + "job.pk_job = job_resource.pk_job " + "AND " - + "job.str_state = 'PENDING' " + "AND " + "job.b_paused = false " + "AND " - + "job.b_auto_book = true " + "AND " + "job_stat.int_waiting_count != 0 " + "AND " - + "job_resource.int_cores < job_resource.int_max_cores " + "AND " - + "job_resource.int_gpus < job_resource.int_max_gpus " + "AND " + "job.pk_facility = ? " - + "LIMIT 1"; - - @Override - public boolean cueHasPendingJobs(FacilityInterface f) { - return getJdbcTemplate().queryForList(HAS_PENDING_JOBS, f.getFacilityId()).size() > 0; - } - - @Override - public void enableAutoBooking(JobInterface job, boolean value) { - getJdbcTemplate().update("UPDATE job SET b_auto_book=? WHERE pk_job=?", value, job.getJobId()); - } - - @Override - public void enableAutoUnBooking(JobInterface job, boolean value) { - getJdbcTemplate().update("UPDATE job SET b_auto_unbook=? WHERE pk_job=?", value, - job.getJobId()); - } - - public static final String MAP_POST_JOB = - "INSERT INTO " + "job_post " + "(pk_job_post, pk_job, pk_post_job) " + "VALUES (?,?,?)"; - - @Override - public void mapPostJob(BuildableJob job) { - getJdbcTemplate().update(MAP_POST_JOB, SqlUtil.genKeyRandom(), job.detail.id, - job.getPostJob().detail.id); - } - - public static final String ACTIVATE_POST_JOB = "UPDATE " + "job " + "SET " + "str_state=? " - + "WHERE " + "pk_job IN (SELECT pk_post_job FROM job_post WHERE pk_job = ?)"; - - @Override - public void activatePostJob(JobInterface job) { - getJdbcTemplate().update(ACTIVATE_POST_JOB, JobState.PENDING.toString(), job.getJobId()); - getJdbcTemplate().update("DELETE FROM job_post WHERE pk_job=?", job.getJobId()); - } - - @Override - public void updateDepartment(GroupInterface group, DepartmentInterface dept) { - getJdbcTemplate().update("UPDATE job SET pk_dept=? WHERE pk_folder=?", dept.getDepartmentId(), - group.getGroupId()); - } - - @Override - public void updateDepartment(JobInterface job, DepartmentInterface dept) { - getJdbcTemplate().update("UPDATE job SET pk_dept=? WHERE pk_job=?", dept.getDepartmentId(), - job.getJobId()); - } - - public void updateUsage(JobInterface job, ResourceUsage usage, int exitStatus) { - - if (exitStatus == 0) { - - getJdbcTemplate().update( - "UPDATE " + "job_usage " + "SET " + "int_core_time_success = int_core_time_success + ?," - + "int_gpu_time_success = int_gpu_time_success + ?," - + "int_clock_time_success = int_clock_time_success + ?," - + "int_frame_success_count = int_frame_success_count + 1 " + "WHERE " + "pk_job = ? ", - usage.getCoreTimeSeconds(), usage.getGpuTimeSeconds(), usage.getClockTimeSeconds(), - job.getJobId()); - - getJdbcTemplate().update( - "UPDATE " + "job_usage " + "SET " + "int_clock_time_high = ? " + "WHERE " + "pk_job = ? " - + "AND " + "int_clock_time_high < ?", - usage.getClockTimeSeconds(), job.getJobId(), usage.getClockTimeSeconds()); - } else { - - getJdbcTemplate().update( - "UPDATE " + "job_usage " + "SET " + "int_core_time_fail = int_core_time_fail + ?," - + "int_clock_time_fail = int_clock_time_fail + ?," - + "int_frame_fail_count = int_frame_fail_count + 1 " + "WHERE " + "pk_job = ? ", - usage.getCoreTimeSeconds(), usage.getClockTimeSeconds(), job.getJobId()); - } - } - - public void updateEmail(JobInterface job, String email) { - getJdbcTemplate().update("UPDATE job SET str_email=? WHERE pk_job=?", email, job.getJobId()); - } - - public String getEmail(JobInterface job) { - String jobId = job.getJobId(); - return getJdbcTemplate().queryForObject("SELECT str_email FROM job WHERE pk_job = ?", - String.class, jobId); - } + getJdbcTemplate().update( + "UPDATE job_stat SET int_depend_count=?,int_waiting_count=? WHERE pk_job=?", + jobTotals[0], jobTotals[1], job.getJobId()); + + getJdbcTemplate().update( + "UPDATE job SET int_frame_count=?, int_layer_count=? WHERE pk_job=?", + jobTotals[0] + jobTotals[1], layers.size(), job.getJobId()); + + getJdbcTemplate().update( + "UPDATE show_stats SET int_frame_insert_count=int_frame_insert_count+?, int_job_insert_count=int_job_insert_count+1 WHERE pk_show=?", + jobTotals[0] + jobTotals[1], job.getShowId()); + + updateState(job, jobState); + } + + private static final String HAS_PENDING_FRAMES = "SELECT " + "int_waiting_count " + "FROM " + + "job," + "job_stat " + "WHERE " + "job.pk_job = job_stat.pk_job " + "AND " + + "job.str_state = 'PENDING' " + "AND " + "job.b_paused = false " + "AND " + + "job.b_auto_book = true " + "AND " + "job.pk_job = ?"; + + @Override + public boolean hasPendingFrames(JobInterface job) { + try { + return getJdbcTemplate().queryForObject(HAS_PENDING_FRAMES, Integer.class, + job.getJobId()) > 0; + } catch (DataAccessException e) { + return false; + } + } + + private static final String IS_JOB_OVER_MIN_CORES = "SELECT " + "COUNT(1) " + "FROM " + + "job_resource " + "WHERE " + "job_resource.pk_job = ? " + "AND " + + "job_resource.int_cores > job_resource.int_min_cores"; + + @Override + public boolean isOverMinCores(JobInterface job) { + return getJdbcTemplate().queryForObject(IS_JOB_OVER_MIN_CORES, Integer.class, + job.getJobId()) > 0; + } + + private static final String IS_JOB_OVER_MAX_CORES = "SELECT " + "COUNT(1) " + "FROM " + + "job_resource " + "WHERE " + "job_resource.pk_job = ? " + "AND " + + "job_resource.int_cores + ? > job_resource.int_max_cores"; + + @Override + public boolean isOverMaxCores(JobInterface job) { + return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_CORES, Integer.class, + job.getJobId(), 0) > 0; + } + + @Override + public boolean isOverMaxCores(JobInterface job, int coreUnits) { + return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_CORES, Integer.class, + job.getJobId(), coreUnits) > 0; + } + + private static final String IS_JOB_AT_MAX_CORES = "SELECT " + "COUNT(1) " + "FROM " + + "job_resource " + "WHERE " + "job_resource.pk_job = ? " + "AND " + + "job_resource.int_cores >= job_resource.int_max_cores "; + + @Override + public boolean isAtMaxCores(JobInterface job) { + return getJdbcTemplate().queryForObject(IS_JOB_AT_MAX_CORES, Integer.class, + job.getJobId()) > 0; + } + + private static final String IS_JOB_OVER_MAX_GPUS = "SELECT " + "COUNT(1) " + "FROM " + + "job_resource " + "WHERE " + "job_resource.pk_job = ? " + "AND " + + "job_resource.int_gpus + ? > job_resource.int_max_gpus"; + + @Override + public boolean isOverMaxGpus(JobInterface job) { + return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_GPUS, Integer.class, job.getJobId(), + 0) > 0; + } + + @Override + public boolean isOverMaxGpus(JobInterface job, int gpu) { + return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_GPUS, Integer.class, job.getJobId(), + gpu) > 0; + } + + private static final String IS_JOB_AT_MAX_GPUS = "SELECT " + "COUNT(1) " + "FROM " + + "job_resource " + "WHERE " + "job_resource.pk_job = ? " + "AND " + + "job_resource.int_gpus >= job_resource.int_max_gpus "; + + @Override + public boolean isAtMaxGpus(JobInterface job) { + return getJdbcTemplate().queryForObject(IS_JOB_AT_MAX_GPUS, Integer.class, + job.getJobId()) > 0; + } + + @Override + public void updateMaxFrameRetries(JobInterface j, int max_retries) { + if (max_retries < 0) { + throw new IllegalArgumentException("max retries must be greater than 0"); + } + + int max_max_retries = getJdbcTemplate().queryForObject( + "SELECT int_value FROM config WHERE str_key=?", Integer.class, "MAX_FRAME_RETRIES"); + + if (max_retries > max_max_retries) { + throw new IllegalArgumentException("max retries must be less than " + max_max_retries); + } + + getJdbcTemplate().update("UPDATE job SET int_max_retries=? WHERE pk_job=?", max_retries, + j.getJobId()); + } + + private static final String GET_FRAME_STATE_TOTALS = + "SELECT " + "job.int_frame_count," + "job_stat.* " + "FROM " + "job," + "job_stat " + + "WHERE " + "job.pk_job = job_stat.pk_job " + "AND " + "job.pk_job=?"; + + public FrameStateTotals getFrameStateTotals(JobInterface job) { + return getJdbcTemplate().queryForObject(GET_FRAME_STATE_TOTALS, + new RowMapper() { + public FrameStateTotals mapRow(ResultSet rs, int rowNum) throws SQLException { + FrameStateTotals t = new FrameStateTotals(); + t.dead = rs.getInt("int_dead_count"); + t.depend = rs.getInt("int_depend_count"); + t.eaten = rs.getInt("int_eaten_count"); + t.running = rs.getInt("int_running_count"); + t.succeeded = rs.getInt("int_succeeded_count"); + t.waiting = rs.getInt("int_waiting_count"); + t.total = rs.getInt("int_frame_count"); + return t; + } + }, job.getJobId()); + } + + private static final String GET_EXECUTION_SUMMARY = + "SELECT " + "job_usage.int_core_time_success," + "job_usage.int_core_time_fail," + + "job_usage.int_gpu_time_success," + "job_usage.int_gpu_time_fail," + + "job_mem.int_max_rss " + "FROM " + "job," + "job_usage, " + "job_mem " + + "WHERE " + "job.pk_job = job_usage.pk_job " + "AND " + + "job.pk_job = job_mem.pk_job " + "AND " + "job.pk_job = ?"; + + public ExecutionSummary getExecutionSummary(JobInterface job) { + return getJdbcTemplate().queryForObject(GET_EXECUTION_SUMMARY, + new RowMapper() { + public ExecutionSummary mapRow(ResultSet rs, int rowNum) throws SQLException { + ExecutionSummary e = new ExecutionSummary(); + e.coreTimeSuccess = rs.getLong("int_core_time_success"); + e.coreTimeFail = rs.getLong("int_core_time_fail"); + e.coreTime = e.coreTimeSuccess + e.coreTimeFail; + e.gpuTimeSuccess = rs.getLong("int_gpu_time_success"); + e.gpuTimeFail = rs.getLong("int_gpu_time_fail"); + e.gpuTime = e.gpuTimeSuccess + e.gpuTimeFail; + e.highMemoryKb = rs.getLong("int_max_rss"); + + return e; + } + }, job.getJobId()); + } + + private static final String INSERT_JOB_ENV = "INSERT INTO " + "job_env " + "(" + + "pk_job_env, pk_job, str_key, str_value " + ") " + "VALUES (?,?,?,?)"; + + @Override + public void insertEnvironment(JobInterface job, Map env) { + for (Map.Entry e : env.entrySet()) { + String pk = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_JOB_ENV, pk, job.getJobId(), e.getKey(), e.getValue()); + } + } + + @Override + public void insertEnvironment(JobInterface job, String key, String value) { + String pk = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_JOB_ENV, pk, job.getJobId(), key, value); + } + + @Override + public Map getEnvironment(JobInterface job) { + Map result = new HashMap(); + + List> _result = getJdbcTemplate().queryForList( + "SELECT str_key, str_value FROM job_env WHERE pk_job=?", job.getJobId()); + + for (Map o : _result) { + result.put((String) o.get("str_key"), (String) o.get("str_value")); + } + + return result; + } + + @Override + public void updateParent(JobInterface job, GroupDetail dest) { + updateParent(job, dest, new Inherit[] {Inherit.All}); + } + + @Override + public void updateParent(JobInterface job, GroupDetail dest, Inherit[] inherits) { + + if (!job.getShowId().equals(dest.getShowId())) { + throw new EntityModificationError( + "error moving job, " + "cannot move jobs between shows"); + } + + StringBuilder query = new StringBuilder(1024); + query.append("UPDATE job_resource SET "); + List values = new ArrayList(); + + Set inheritSet = new HashSet(inherits.length); + inheritSet.addAll(Arrays.asList(inherits)); + + for (Inherit i : inheritSet) { + switch (i) { + case Priority: + if (dest.jobPriority != CueUtil.FEATURE_DISABLED) { + query.append("int_priority=?,"); + values.add(dest.jobPriority); + } + break; + + case MinCores: + if (dest.jobMinCores != CueUtil.FEATURE_DISABLED) { + query.append("int_min_cores=?,"); + values.add(dest.jobMinCores); + } + break; + + case MaxCores: + if (dest.jobMaxCores != CueUtil.FEATURE_DISABLED) { + query.append("int_max_cores=?,"); + values.add(dest.jobMaxCores); + } + break; + + case MinGpus: + if (dest.jobMinGpus != CueUtil.FEATURE_DISABLED) { + query.append("int_min_gpus=?,"); + values.add(dest.jobMinGpus); + } + break; + + case MaxGpus: + if (dest.jobMaxGpus != CueUtil.FEATURE_DISABLED) { + query.append("int_max_gpus=?,"); + values.add(dest.jobMaxGpus); + } + break; + + case All: + if (dest.jobPriority != CueUtil.FEATURE_DISABLED) { + query.append("int_priority=?,"); + values.add(dest.jobPriority); + } + + if (dest.jobMinCores != CueUtil.FEATURE_DISABLED) { + query.append("int_min_cores=?,"); + values.add(dest.jobMinCores); + } + + if (dest.jobMaxCores != CueUtil.FEATURE_DISABLED) { + query.append("int_max_cores=?,"); + values.add(dest.jobMaxCores); + } + + if (dest.jobMinGpus != CueUtil.FEATURE_DISABLED) { + query.append("int_min_gpus=?,"); + values.add(dest.jobMinGpus); + } + + if (dest.jobMaxGpus != CueUtil.FEATURE_DISABLED) { + query.append("int_max_gpus=?,"); + values.add(dest.jobMaxGpus); + } + break; + } + } + + getJdbcTemplate().update("UPDATE job SET pk_folder=?, pk_dept=? WHERE pk_job=?", + dest.getGroupId(), dest.getDepartmentId(), job.getJobId()); + + getJdbcTemplate().update("UPDATE job_history SET pk_dept=? WHERE pk_job=?", + dest.getDepartmentId(), job.getJobId()); + + if (values.size() > 0) { + query.deleteCharAt(query.length() - 1); + query.append(" WHERE pk_job=?"); + values.add(job.getJobId()); + getJdbcTemplate().update(query.toString(), values.toArray()); + } + } + + private static final String HAS_PENDING_JOBS = "SELECT " + "job.pk_job " + "FROM " + "job, " + + "job_stat, " + "job_resource " + "WHERE " + "job.pk_job = job_stat.pk_job " + "AND " + + "job.pk_job = job_resource.pk_job " + "AND " + "job.str_state = 'PENDING' " + "AND " + + "job.b_paused = false " + "AND " + "job.b_auto_book = true " + "AND " + + "job_stat.int_waiting_count != 0 " + "AND " + + "job_resource.int_cores < job_resource.int_max_cores " + "AND " + + "job_resource.int_gpus < job_resource.int_max_gpus " + "AND " + "job.pk_facility = ? " + + "LIMIT 1"; + + @Override + public boolean cueHasPendingJobs(FacilityInterface f) { + return getJdbcTemplate().queryForList(HAS_PENDING_JOBS, f.getFacilityId()).size() > 0; + } + + @Override + public void enableAutoBooking(JobInterface job, boolean value) { + getJdbcTemplate().update("UPDATE job SET b_auto_book=? WHERE pk_job=?", value, + job.getJobId()); + } + + @Override + public void enableAutoUnBooking(JobInterface job, boolean value) { + getJdbcTemplate().update("UPDATE job SET b_auto_unbook=? WHERE pk_job=?", value, + job.getJobId()); + } + + public static final String MAP_POST_JOB = + "INSERT INTO " + "job_post " + "(pk_job_post, pk_job, pk_post_job) " + "VALUES (?,?,?)"; + + @Override + public void mapPostJob(BuildableJob job) { + getJdbcTemplate().update(MAP_POST_JOB, SqlUtil.genKeyRandom(), job.detail.id, + job.getPostJob().detail.id); + } + + public static final String ACTIVATE_POST_JOB = "UPDATE " + "job " + "SET " + "str_state=? " + + "WHERE " + "pk_job IN (SELECT pk_post_job FROM job_post WHERE pk_job = ?)"; + + @Override + public void activatePostJob(JobInterface job) { + getJdbcTemplate().update(ACTIVATE_POST_JOB, JobState.PENDING.toString(), job.getJobId()); + getJdbcTemplate().update("DELETE FROM job_post WHERE pk_job=?", job.getJobId()); + } + + @Override + public void updateDepartment(GroupInterface group, DepartmentInterface dept) { + getJdbcTemplate().update("UPDATE job SET pk_dept=? WHERE pk_folder=?", + dept.getDepartmentId(), group.getGroupId()); + } + + @Override + public void updateDepartment(JobInterface job, DepartmentInterface dept) { + getJdbcTemplate().update("UPDATE job SET pk_dept=? WHERE pk_job=?", dept.getDepartmentId(), + job.getJobId()); + } + + public void updateUsage(JobInterface job, ResourceUsage usage, int exitStatus) { + + if (exitStatus == 0) { + + getJdbcTemplate().update( + "UPDATE " + "job_usage " + "SET " + + "int_core_time_success = int_core_time_success + ?," + + "int_gpu_time_success = int_gpu_time_success + ?," + + "int_clock_time_success = int_clock_time_success + ?," + + "int_frame_success_count = int_frame_success_count + 1 " + "WHERE " + + "pk_job = ? ", + usage.getCoreTimeSeconds(), usage.getGpuTimeSeconds(), + usage.getClockTimeSeconds(), job.getJobId()); + + getJdbcTemplate().update( + "UPDATE " + "job_usage " + "SET " + "int_clock_time_high = ? " + "WHERE " + + "pk_job = ? " + "AND " + "int_clock_time_high < ?", + usage.getClockTimeSeconds(), job.getJobId(), usage.getClockTimeSeconds()); + } else { + + getJdbcTemplate().update("UPDATE " + "job_usage " + "SET " + + "int_core_time_fail = int_core_time_fail + ?," + + "int_clock_time_fail = int_clock_time_fail + ?," + + "int_frame_fail_count = int_frame_fail_count + 1 " + "WHERE " + "pk_job = ? ", + usage.getCoreTimeSeconds(), usage.getClockTimeSeconds(), job.getJobId()); + } + } + + public void updateEmail(JobInterface job, String email) { + getJdbcTemplate().update("UPDATE job SET str_email=? WHERE pk_job=?", email, + job.getJobId()); + } + + public String getEmail(JobInterface job) { + String jobId = job.getJobId(); + return getJdbcTemplate().queryForObject("SELECT str_email FROM job WHERE pk_job = ?", + String.class, jobId); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LayerDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LayerDaoJdbc.java index eab61fcdf..174128ac9 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LayerDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LayerDaoJdbc.java @@ -52,606 +52,627 @@ import org.apache.logging.log4j.LogManager; public class LayerDaoJdbc extends JdbcDaoSupport implements LayerDao { - private final long MEM_RESERVED_MIN; - private static final Logger logger = LogManager.getLogger(LayerDaoJdbc.class); - private static final String INSERT_OUTPUT_PATH = "INSERT INTO " + "layer_output " + "( " - + "pk_layer_output," + "pk_layer," + "pk_job," + "str_filespec " + ") VALUES (?,?,?,?)"; - - @Autowired - public LayerDaoJdbc(Environment env) { - this.MEM_RESERVED_MIN = - env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); - } - - @Override - public void insertLayerOutput(LayerInterface layer, String filespec) { - getJdbcTemplate().update(INSERT_OUTPUT_PATH, UUID.randomUUID().toString(), layer.getLayerId(), - layer.getJobId(), filespec); - } - - private static final String GET_OUTPUT = "SELECT " + "str_filespec " + "FROM " + "layer_output " - + "WHERE " + "pk_layer = ? " + "ORDER BY " + "ser_order"; - - private static final RowMapper OUTPUT_MAPPER = new RowMapper() { - public String mapRow(ResultSet rs, int rowNum) throws SQLException { - return rs.getString("str_filespec"); - } - }; - - @Override - public List getLayerOutputs(LayerInterface layer) { - return getJdbcTemplate().query(GET_OUTPUT, OUTPUT_MAPPER, layer.getLayerId()); - } - - private static final String IS_LAYER_DISPATCHABLE = - "SELECT " + "int_waiting_count " + "FROM " + "layer_stat " + "WHERE " + "pk_layer=?"; - - @Override - public boolean isLayerDispatchable(LayerInterface l) { - return getJdbcTemplate().queryForObject(IS_LAYER_DISPATCHABLE, Integer.class, - l.getLayerId()) > 0; - } - - private static final String IS_LAYER_COMPLETE = - "SELECT " + "SUM ( " + "int_waiting_count + " + "int_running_count + " + "int_dead_count + " - + "int_depend_count " + ") " + "FROM " + "layer_stat " + "WHERE " + "pk_layer=?"; - - public boolean isLayerComplete(LayerInterface l) { - if (isLaunching(l)) { - return false; - } - return getJdbcTemplate().queryForObject(IS_LAYER_COMPLETE, Integer.class, l.getLayerId()) == 0; - } - - private static final String IS_LAUNCHING = - "SELECT " + "str_state " + "FROM " + "job " + "WHERE " + "pk_job=?"; - - @Override - public boolean isLaunching(LayerInterface l) { - return getJdbcTemplate().queryForObject(IS_LAUNCHING, String.class, l.getJobId()) - .equals(JobState.STARTUP.toString()); - } - - private static final String IS_THREADABLE = - "SELECT " + "b_threadable " + "FROM " + "layer " + "WHERE " + "pk_layer = ?"; - - @Override - public boolean isThreadable(LayerInterface l) { - return getJdbcTemplate().queryForObject(IS_THREADABLE, Boolean.class, l.getLayerId()); - } - - /** - * Query for layers table. Where clauses are appended later - */ - public static final String GET_LAYER_DETAIL = "SELECT " + "layer.*, " + "job.pk_show, " - + "job.pk_facility " + "FROM " + "layer," + "job," + "show " + "WHERE " - + "layer.pk_job = job.pk_job " + "AND " + "job.pk_show = show.pk_show "; - - private static final String GET_LAYER = "SELECT " + "layer.pk_layer," + "layer.pk_job," - + "job.pk_show," + "job.pk_facility, " + "layer.str_name " + "FROM " + "layer," + "job," - + "show " + "WHERE " + "layer.pk_job = job.pk_job " + "AND " + "job.pk_show = show.pk_show "; - - /** - * Maps a ResultSet to a LayerDetail - */ - public static final RowMapper LAYER_DETAIL_MAPPER = new RowMapper() { - public LayerDetail mapRow(ResultSet rs, int rowNum) throws SQLException { - LayerDetail layer = new LayerDetail(); - layer.chunkSize = rs.getInt("int_chunk_size"); - layer.command = rs.getString("str_cmd"); - layer.dispatchOrder = rs.getInt("int_dispatch_order"); - layer.id = rs.getString("pk_layer"); - layer.jobId = rs.getString("pk_job"); - layer.showId = rs.getString("pk_show"); - layer.facilityId = rs.getString("pk_facility"); - layer.name = rs.getString("str_name"); - layer.range = rs.getString("str_range"); - layer.minimumCores = rs.getInt("int_cores_min"); - layer.minimumMemory = rs.getLong("int_mem_min"); - layer.minimumGpus = rs.getInt("int_gpus_min"); - layer.minimumGpuMemory = rs.getLong("int_gpu_mem_min"); - layer.type = LayerType.valueOf(rs.getString("str_type")); - layer.tags = Sets.newHashSet(rs.getString("str_tags").replaceAll(" ", "").split("\\|")); - layer.services.addAll(Lists.newArrayList(rs.getString("str_services").split(","))); - layer.timeout = rs.getInt("int_timeout"); - layer.timeout_llu = rs.getInt("int_timeout_llu"); - return layer; - } - }; - - /** - * Maps a ResultSet to a LayerDetail - */ - private static final RowMapper LAYER_MAPPER = new RowMapper() { - public LayerEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - LayerEntity layer = new LayerEntity(); - layer.id = rs.getString("pk_layer"); - layer.jobId = rs.getString("pk_job"); - layer.showId = rs.getString("pk_show"); - layer.facilityId = rs.getString("pk_facility"); - layer.name = rs.getString("str_name"); - return layer; - } - }; - - @Override - public LayerDetail getLayerDetail(String id) { - LayerDetail layerDetail = getJdbcTemplate() - .queryForObject(GET_LAYER_DETAIL + " AND layer.pk_layer=?", LAYER_DETAIL_MAPPER, id); - layerDetail.limits.addAll(getLimitNames(layerDetail)); - return layerDetail; - } - - @Override - public LayerDetail getLayerDetail(LayerInterface layer) { - LayerDetail layerDetail = getJdbcTemplate().queryForObject( - GET_LAYER_DETAIL + " AND layer.pk_layer=?", LAYER_DETAIL_MAPPER, layer.getLayerId()); - layerDetail.limits.addAll(getLimitNames(layerDetail)); - return layerDetail; - } - - @Override - public LayerDetail findLayerDetail(JobInterface job, String name) { - LayerDetail layerDetail = getJdbcTemplate().queryForObject( - GET_LAYER_DETAIL + " AND layer.pk_job=? AND layer.str_name=?", LAYER_DETAIL_MAPPER, - job.getJobId(), name); - layerDetail.limits.addAll(getLimitNames(layerDetail)); - return layerDetail; - } - - @Override - public LayerInterface findLayer(JobInterface job, String name) { - try { - return getJdbcTemplate().queryForObject( - GET_LAYER + " AND layer.pk_job=? AND layer.str_name=?", LAYER_MAPPER, job.getJobId(), - name); - } catch (org.springframework.dao.EmptyResultDataAccessException e) { - throw new EmptyResultDataAccessException( - "The layer " + name + " was not found in " + job.getName() + e, 0); - } - } - - @Override - public List getLayerDetails(JobInterface job) { - List layers = getJdbcTemplate().query(GET_LAYER_DETAIL + " AND layer.pk_job=?", - LAYER_DETAIL_MAPPER, job.getJobId()); - layers.stream().forEach(layerDetail -> layerDetail.limits.addAll(getLimitNames(layerDetail))); - return layers; - } - - @Override - public List getLayers(JobInterface job) { - return getJdbcTemplate().query(GET_LAYER + " AND layer.pk_job=?", LAYER_MAPPER, job.getJobId()); - } - - @Override - public LayerInterface getLayer(String id) { - return getJdbcTemplate().queryForObject(GET_LAYER + " AND layer.pk_layer=?", LAYER_MAPPER, id); - } - - private static final String INSERT_LAYER = "INSERT INTO " + "layer " + "(" + "pk_layer, " - + "pk_job, " + "str_name, " + "str_cmd, " + "str_range, " + "int_chunk_size, " - + "int_dispatch_order, " + "str_tags, " + "str_type," + "int_cores_min, " + "int_cores_max, " - + "b_threadable, " + "int_mem_min, " + "int_gpus_min, " + "int_gpus_max, " - + "int_gpu_mem_min, " + "str_services, " + "int_timeout," + "int_timeout_llu " + ") " - + "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; - - @Override - public void insertLayerDetail(LayerDetail l) { - l.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_LAYER, l.id, l.jobId, l.name, l.command, l.range, l.chunkSize, - l.dispatchOrder, StringUtils.join(l.tags, " | "), l.type.toString(), l.minimumCores, - l.maximumCores, l.isThreadable, l.minimumMemory, l.minimumGpus, l.maximumGpus, - l.minimumGpuMemory, StringUtils.join(l.services, ","), l.timeout, l.timeout_llu); - } - - @Override - public void updateLayerMinMemory(LayerInterface layer, long val) { - if (val < MEM_RESERVED_MIN) { - val = MEM_RESERVED_MIN; - } - getJdbcTemplate().update("UPDATE layer SET int_mem_min=? WHERE pk_layer=?", val, - layer.getLayerId()); - } - - @Override - public void updateLayerMinGpuMemory(LayerInterface layer, long kb) { - getJdbcTemplate().update("UPDATE layer SET int_gpu_mem_min=? WHERE pk_layer=?", kb, - layer.getLayerId()); - } - - private static final String BALANCE_MEM = "UPDATE " + "layer " + "SET " + "int_mem_min = ? " - + "WHERE " + "pk_layer = ? " + "AND " + "int_mem_min > ? " + "AND " + "b_optimize = true"; - - @Override - public boolean balanceLayerMinMemory(LayerInterface layer, long frameMaxRss) { + private final long MEM_RESERVED_MIN; + private static final Logger logger = LogManager.getLogger(LayerDaoJdbc.class); + private static final String INSERT_OUTPUT_PATH = "INSERT INTO " + "layer_output " + "( " + + "pk_layer_output," + "pk_layer," + "pk_job," + "str_filespec " + ") VALUES (?,?,?,?)"; + + @Autowired + public LayerDaoJdbc(Environment env) { + this.MEM_RESERVED_MIN = + env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); + } + + @Override + public void insertLayerOutput(LayerInterface layer, String filespec) { + getJdbcTemplate().update(INSERT_OUTPUT_PATH, UUID.randomUUID().toString(), + layer.getLayerId(), layer.getJobId(), filespec); + } + + private static final String GET_OUTPUT = "SELECT " + "str_filespec " + "FROM " + "layer_output " + + "WHERE " + "pk_layer = ? " + "ORDER BY " + "ser_order"; + + private static final RowMapper OUTPUT_MAPPER = new RowMapper() { + public String mapRow(ResultSet rs, int rowNum) throws SQLException { + return rs.getString("str_filespec"); + } + }; + + @Override + public List getLayerOutputs(LayerInterface layer) { + return getJdbcTemplate().query(GET_OUTPUT, OUTPUT_MAPPER, layer.getLayerId()); + } + + private static final String IS_LAYER_DISPATCHABLE = + "SELECT " + "int_waiting_count " + "FROM " + "layer_stat " + "WHERE " + "pk_layer=?"; + + @Override + public boolean isLayerDispatchable(LayerInterface l) { + return getJdbcTemplate().queryForObject(IS_LAYER_DISPATCHABLE, Integer.class, + l.getLayerId()) > 0; + } + + private static final String IS_LAYER_COMPLETE = "SELECT " + "SUM ( " + "int_waiting_count + " + + "int_running_count + " + "int_dead_count + " + "int_depend_count " + ") " + "FROM " + + "layer_stat " + "WHERE " + "pk_layer=?"; + + public boolean isLayerComplete(LayerInterface l) { + if (isLaunching(l)) { + return false; + } + return getJdbcTemplate().queryForObject(IS_LAYER_COMPLETE, Integer.class, + l.getLayerId()) == 0; + } + + private static final String IS_LAUNCHING = + "SELECT " + "str_state " + "FROM " + "job " + "WHERE " + "pk_job=?"; + + @Override + public boolean isLaunching(LayerInterface l) { + return getJdbcTemplate().queryForObject(IS_LAUNCHING, String.class, l.getJobId()) + .equals(JobState.STARTUP.toString()); + } + + private static final String IS_THREADABLE = + "SELECT " + "b_threadable " + "FROM " + "layer " + "WHERE " + "pk_layer = ?"; + + @Override + public boolean isThreadable(LayerInterface l) { + return getJdbcTemplate().queryForObject(IS_THREADABLE, Boolean.class, l.getLayerId()); + } /** - * Lowers the memory value on the frame when the maxrss is lower than the memory requirement. + * Query for layers table. Where clauses are appended later */ - long maxrss = getJdbcTemplate().queryForObject( - "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", Long.class, layer.getLayerId()); - - if (maxrss < frameMaxRss) { - maxrss = frameMaxRss; - } - if (maxrss < MEM_RESERVED_MIN) { - maxrss = MEM_RESERVED_MIN; - } else { - maxrss = maxrss + CueUtil.MB256; - } - - boolean result = getJdbcTemplate().update(BALANCE_MEM, maxrss, layer.getLayerId(), maxrss) == 1; - if (result) { - logger.info(layer.getName() + " was balanced to " + maxrss); - } - return result; - } - - @Override - public void increaseLayerMinMemory(LayerInterface layer, long val) { - getJdbcTemplate().update("UPDATE layer SET int_mem_min=? WHERE pk_layer=? AND int_mem_min < ?", - val, layer.getLayerId(), val); - } - - @Override - public void increaseLayerMinGpuMemory(LayerInterface layer, long kb) { - getJdbcTemplate().update( - "UPDATE layer SET int_gpu_mem_min=? WHERE pk_layer=? AND int_gpu_mem_min < ?", kb, - layer.getLayerId(), kb); - } - - @Override - public void updateLayerMinCores(LayerInterface layer, int val) { - if (val < Dispatcher.CORE_POINTS_RESERVED_MIN) { - val = Dispatcher.CORE_POINTS_RESERVED_DEFAULT; - } - getJdbcTemplate().update("UPDATE layer SET int_cores_min=? WHERE pk_layer=?", val, - layer.getLayerId()); - } - - @Override - public void updateLayerMaxCores(LayerInterface layer, int val) { - getJdbcTemplate().update("UPDATE layer SET int_cores_max=? WHERE pk_layer=?", val, - layer.getLayerId()); - } - - @Override - public void updateLayerMinGpus(LayerInterface layer, int val) { - getJdbcTemplate().update("UPDATE layer SET int_gpus_min=? WHERE pk_layer=?", val, - layer.getLayerId()); - } - - @Override - public void updateLayerMaxGpus(LayerInterface layer, int val) { - getJdbcTemplate().update("UPDATE layer SET int_gpus_max=? WHERE pk_layer=?", val, - layer.getLayerId()); - } - - private static final String UPDATE_LAYER_MAX_RSS = - "UPDATE " + "layer_mem " + "SET " + "int_max_rss = ? " + "WHERE " + "pk_layer = ?"; - - @Override - public void updateLayerMaxRSS(LayerInterface layer, long val, boolean force) { - StringBuilder sb = new StringBuilder(UPDATE_LAYER_MAX_RSS); - Object[] options; - if (!force) { - options = new Object[] {val, layer.getLayerId(), val}; - sb.append(" AND int_max_rss < ?"); - } else { - options = new Object[] {val, layer.getLayerId()}; - } - getJdbcTemplate().update(sb.toString(), options); - } - - @Override - public void updateLayerTags(LayerInterface layer, Set tags) { - if (tags.size() == 0) { - throw new IllegalArgumentException("Layers must have at least one tag."); - } - StringBuilder sb = new StringBuilder(128); - for (String t : tags) { - if (t == null) { - continue; - } - if (t.length() < 1) { - continue; - } - sb.append(t + " | "); - } - sb.delete(sb.length() - 3, sb.length()); - if (sb.length() == 0) { - throw new IllegalArgumentException( - "Invalid layer tags, cannot contain null tags or " + "tags of zero length."); - } - getJdbcTemplate().update("UPDATE layer SET str_tags=? WHERE pk_layer=?", sb.toString(), - layer.getLayerId()); - } - - @Override - public void appendLayerTags(LayerInterface layer, String val) { - String appendTag = " | " + val; - String matchTag = "%" + val + "%"; - - getJdbcTemplate().update( - "UPDATE layer SET str_tags = str_tags || ? " + "WHERE pk_layer=? AND str_tags NOT LIKE ?", - appendTag, layer.getLayerId(), matchTag); - } - - public FrameStateTotals getFrameStateTotals(LayerInterface layer) { - return getJdbcTemplate().queryForObject("SELECT * FROM layer_stat WHERE pk_layer=?", - new RowMapper() { - public FrameStateTotals mapRow(ResultSet rs, int rowNum) throws SQLException { - FrameStateTotals t = new FrameStateTotals(); - t.dead = rs.getInt("int_dead_count"); - t.depend = rs.getInt("int_depend_count"); - t.eaten = rs.getInt("int_eaten_count"); - t.running = rs.getInt("int_running_count"); - t.succeeded = rs.getInt("int_succeeded_count"); - t.waiting = rs.getInt("int_waiting_count"); - t.total = rs.getInt("int_total_count"); - return t; - } - }, layer.getLayerId()); - } - - private static final String GET_EXECUTION_SUMMARY = - "SELECT " + "layer_usage.int_core_time_success," + "layer_usage.int_core_time_fail," - + "layer_usage.int_gpu_time_success," + "layer_usage.int_gpu_time_fail," - + "layer_usage.int_clock_time_success," + "layer_mem.int_max_rss " + "FROM " + "layer," - + "layer_usage, " + "layer_mem " + "WHERE " + "layer.pk_layer = layer_usage.pk_layer " - + "AND " + "layer.pk_layer = layer_mem.pk_layer " + "AND " + "layer.pk_layer = ?"; - - @Override - public ExecutionSummary getExecutionSummary(LayerInterface layer) { - return getJdbcTemplate().queryForObject(GET_EXECUTION_SUMMARY, - new RowMapper() { - public ExecutionSummary mapRow(ResultSet rs, int rowNum) throws SQLException { - ExecutionSummary e = new ExecutionSummary(); - e.coreTimeSuccess = rs.getLong("int_core_time_success"); - e.coreTimeFail = rs.getLong("int_core_time_fail"); - e.coreTime = e.coreTimeSuccess + e.coreTimeFail; - e.gpuTimeSuccess = rs.getLong("int_gpu_time_success"); - e.gpuTimeFail = rs.getLong("int_gpu_time_fail"); - e.gpuTime = e.gpuTimeSuccess + e.gpuTimeFail; - e.highMemoryKb = rs.getLong("int_max_rss"); - return e; - } + public static final String GET_LAYER_DETAIL = "SELECT " + "layer.*, " + "job.pk_show, " + + "job.pk_facility " + "FROM " + "layer," + "job," + "show " + "WHERE " + + "layer.pk_job = job.pk_job " + "AND " + "job.pk_show = show.pk_show "; + + private static final String GET_LAYER = + "SELECT " + "layer.pk_layer," + "layer.pk_job," + "job.pk_show," + "job.pk_facility, " + + "layer.str_name " + "FROM " + "layer," + "job," + "show " + "WHERE " + + "layer.pk_job = job.pk_job " + "AND " + "job.pk_show = show.pk_show "; + + /** + * Maps a ResultSet to a LayerDetail + */ + public static final RowMapper LAYER_DETAIL_MAPPER = new RowMapper() { + public LayerDetail mapRow(ResultSet rs, int rowNum) throws SQLException { + LayerDetail layer = new LayerDetail(); + layer.chunkSize = rs.getInt("int_chunk_size"); + layer.command = rs.getString("str_cmd"); + layer.dispatchOrder = rs.getInt("int_dispatch_order"); + layer.id = rs.getString("pk_layer"); + layer.jobId = rs.getString("pk_job"); + layer.showId = rs.getString("pk_show"); + layer.facilityId = rs.getString("pk_facility"); + layer.name = rs.getString("str_name"); + layer.range = rs.getString("str_range"); + layer.minimumCores = rs.getInt("int_cores_min"); + layer.minimumMemory = rs.getLong("int_mem_min"); + layer.minimumGpus = rs.getInt("int_gpus_min"); + layer.minimumGpuMemory = rs.getLong("int_gpu_mem_min"); + layer.type = LayerType.valueOf(rs.getString("str_type")); + layer.tags = Sets.newHashSet(rs.getString("str_tags").replaceAll(" ", "").split("\\|")); + layer.services.addAll(Lists.newArrayList(rs.getString("str_services").split(","))); + layer.timeout = rs.getInt("int_timeout"); + layer.timeout_llu = rs.getInt("int_timeout_llu"); + return layer; + } + }; + + /** + * Maps a ResultSet to a LayerDetail + */ + private static final RowMapper LAYER_MAPPER = new RowMapper() { + public LayerEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + LayerEntity layer = new LayerEntity(); + layer.id = rs.getString("pk_layer"); + layer.jobId = rs.getString("pk_job"); + layer.showId = rs.getString("pk_show"); + layer.facilityId = rs.getString("pk_facility"); + layer.name = rs.getString("str_name"); + return layer; + } + }; + + @Override + public LayerDetail getLayerDetail(String id) { + LayerDetail layerDetail = getJdbcTemplate().queryForObject( + GET_LAYER_DETAIL + " AND layer.pk_layer=?", LAYER_DETAIL_MAPPER, id); + layerDetail.limits.addAll(getLimitNames(layerDetail)); + return layerDetail; + } + + @Override + public LayerDetail getLayerDetail(LayerInterface layer) { + LayerDetail layerDetail = + getJdbcTemplate().queryForObject(GET_LAYER_DETAIL + " AND layer.pk_layer=?", + LAYER_DETAIL_MAPPER, layer.getLayerId()); + layerDetail.limits.addAll(getLimitNames(layerDetail)); + return layerDetail; + } + + @Override + public LayerDetail findLayerDetail(JobInterface job, String name) { + LayerDetail layerDetail = getJdbcTemplate().queryForObject( + GET_LAYER_DETAIL + " AND layer.pk_job=? AND layer.str_name=?", LAYER_DETAIL_MAPPER, + job.getJobId(), name); + layerDetail.limits.addAll(getLimitNames(layerDetail)); + return layerDetail; + } + + @Override + public LayerInterface findLayer(JobInterface job, String name) { + try { + return getJdbcTemplate().queryForObject( + GET_LAYER + " AND layer.pk_job=? AND layer.str_name=?", LAYER_MAPPER, + job.getJobId(), name); + } catch (org.springframework.dao.EmptyResultDataAccessException e) { + throw new EmptyResultDataAccessException( + "The layer " + name + " was not found in " + job.getName() + e, 0); + } + } + + @Override + public List getLayerDetails(JobInterface job) { + List layers = getJdbcTemplate().query(GET_LAYER_DETAIL + " AND layer.pk_job=?", + LAYER_DETAIL_MAPPER, job.getJobId()); + layers.stream() + .forEach(layerDetail -> layerDetail.limits.addAll(getLimitNames(layerDetail))); + return layers; + } + + @Override + public List getLayers(JobInterface job) { + return getJdbcTemplate().query(GET_LAYER + " AND layer.pk_job=?", LAYER_MAPPER, + job.getJobId()); + } + + @Override + public LayerInterface getLayer(String id) { + return getJdbcTemplate().queryForObject(GET_LAYER + " AND layer.pk_layer=?", LAYER_MAPPER, + id); + } + + private static final String INSERT_LAYER = "INSERT INTO " + "layer " + "(" + "pk_layer, " + + "pk_job, " + "str_name, " + "str_cmd, " + "str_range, " + "int_chunk_size, " + + "int_dispatch_order, " + "str_tags, " + "str_type," + "int_cores_min, " + + "int_cores_max, " + "b_threadable, " + "int_mem_min, " + "int_gpus_min, " + + "int_gpus_max, " + "int_gpu_mem_min, " + "str_services, " + "int_timeout," + + "int_timeout_llu " + ") " + "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + + @Override + public void insertLayerDetail(LayerDetail l) { + l.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_LAYER, l.id, l.jobId, l.name, l.command, l.range, + l.chunkSize, l.dispatchOrder, StringUtils.join(l.tags, " | "), l.type.toString(), + l.minimumCores, l.maximumCores, l.isThreadable, l.minimumMemory, l.minimumGpus, + l.maximumGpus, l.minimumGpuMemory, StringUtils.join(l.services, ","), l.timeout, + l.timeout_llu); + } + + @Override + public void updateLayerMinMemory(LayerInterface layer, long val) { + if (val < MEM_RESERVED_MIN) { + val = MEM_RESERVED_MIN; + } + getJdbcTemplate().update("UPDATE layer SET int_mem_min=? WHERE pk_layer=?", val, + layer.getLayerId()); + } + + @Override + public void updateLayerMinGpuMemory(LayerInterface layer, long kb) { + getJdbcTemplate().update("UPDATE layer SET int_gpu_mem_min=? WHERE pk_layer=?", kb, + layer.getLayerId()); + } + + private static final String BALANCE_MEM = + "UPDATE " + "layer " + "SET " + "int_mem_min = ? " + "WHERE " + "pk_layer = ? " + "AND " + + "int_mem_min > ? " + "AND " + "b_optimize = true"; + + @Override + public boolean balanceLayerMinMemory(LayerInterface layer, long frameMaxRss) { + + /** + * Lowers the memory value on the frame when the maxrss is lower than the memory + * requirement. + */ + long maxrss = getJdbcTemplate().queryForObject( + "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", Long.class, + layer.getLayerId()); + + if (maxrss < frameMaxRss) { + maxrss = frameMaxRss; + } + if (maxrss < MEM_RESERVED_MIN) { + maxrss = MEM_RESERVED_MIN; + } else { + maxrss = maxrss + CueUtil.MB256; + } + + boolean result = + getJdbcTemplate().update(BALANCE_MEM, maxrss, layer.getLayerId(), maxrss) == 1; + if (result) { + logger.info(layer.getName() + " was balanced to " + maxrss); + } + return result; + } + + @Override + public void increaseLayerMinMemory(LayerInterface layer, long val) { + getJdbcTemplate().update( + "UPDATE layer SET int_mem_min=? WHERE pk_layer=? AND int_mem_min < ?", val, + layer.getLayerId(), val); + } + + @Override + public void increaseLayerMinGpuMemory(LayerInterface layer, long kb) { + getJdbcTemplate().update( + "UPDATE layer SET int_gpu_mem_min=? WHERE pk_layer=? AND int_gpu_mem_min < ?", kb, + layer.getLayerId(), kb); + } + + @Override + public void updateLayerMinCores(LayerInterface layer, int val) { + if (val < Dispatcher.CORE_POINTS_RESERVED_MIN) { + val = Dispatcher.CORE_POINTS_RESERVED_DEFAULT; + } + getJdbcTemplate().update("UPDATE layer SET int_cores_min=? WHERE pk_layer=?", val, + layer.getLayerId()); + } + + @Override + public void updateLayerMaxCores(LayerInterface layer, int val) { + getJdbcTemplate().update("UPDATE layer SET int_cores_max=? WHERE pk_layer=?", val, + layer.getLayerId()); + } + + @Override + public void updateLayerMinGpus(LayerInterface layer, int val) { + getJdbcTemplate().update("UPDATE layer SET int_gpus_min=? WHERE pk_layer=?", val, + layer.getLayerId()); + } + + @Override + public void updateLayerMaxGpus(LayerInterface layer, int val) { + getJdbcTemplate().update("UPDATE layer SET int_gpus_max=? WHERE pk_layer=?", val, + layer.getLayerId()); + } + + private static final String UPDATE_LAYER_MAX_RSS = + "UPDATE " + "layer_mem " + "SET " + "int_max_rss = ? " + "WHERE " + "pk_layer = ?"; + + @Override + public void updateLayerMaxRSS(LayerInterface layer, long val, boolean force) { + StringBuilder sb = new StringBuilder(UPDATE_LAYER_MAX_RSS); + Object[] options; + if (!force) { + options = new Object[] {val, layer.getLayerId(), val}; + sb.append(" AND int_max_rss < ?"); + } else { + options = new Object[] {val, layer.getLayerId()}; + } + getJdbcTemplate().update(sb.toString(), options); + } + + @Override + public void updateLayerTags(LayerInterface layer, Set tags) { + if (tags.size() == 0) { + throw new IllegalArgumentException("Layers must have at least one tag."); + } + StringBuilder sb = new StringBuilder(128); + for (String t : tags) { + if (t == null) { + continue; + } + if (t.length() < 1) { + continue; + } + sb.append(t + " | "); + } + sb.delete(sb.length() - 3, sb.length()); + if (sb.length() == 0) { + throw new IllegalArgumentException( + "Invalid layer tags, cannot contain null tags or " + "tags of zero length."); + } + getJdbcTemplate().update("UPDATE layer SET str_tags=? WHERE pk_layer=?", sb.toString(), + layer.getLayerId()); + } + + @Override + public void appendLayerTags(LayerInterface layer, String val) { + String appendTag = " | " + val; + String matchTag = "%" + val + "%"; + + getJdbcTemplate().update( + "UPDATE layer SET str_tags = str_tags || ? " + + "WHERE pk_layer=? AND str_tags NOT LIKE ?", + appendTag, layer.getLayerId(), matchTag); + } + + public FrameStateTotals getFrameStateTotals(LayerInterface layer) { + return getJdbcTemplate().queryForObject("SELECT * FROM layer_stat WHERE pk_layer=?", + new RowMapper() { + public FrameStateTotals mapRow(ResultSet rs, int rowNum) throws SQLException { + FrameStateTotals t = new FrameStateTotals(); + t.dead = rs.getInt("int_dead_count"); + t.depend = rs.getInt("int_depend_count"); + t.eaten = rs.getInt("int_eaten_count"); + t.running = rs.getInt("int_running_count"); + t.succeeded = rs.getInt("int_succeeded_count"); + t.waiting = rs.getInt("int_waiting_count"); + t.total = rs.getInt("int_total_count"); + return t; + } + }, layer.getLayerId()); + } + + private static final String GET_EXECUTION_SUMMARY = "SELECT " + + "layer_usage.int_core_time_success," + "layer_usage.int_core_time_fail," + + "layer_usage.int_gpu_time_success," + "layer_usage.int_gpu_time_fail," + + "layer_usage.int_clock_time_success," + "layer_mem.int_max_rss " + "FROM " + "layer," + + "layer_usage, " + "layer_mem " + "WHERE " + "layer.pk_layer = layer_usage.pk_layer " + + "AND " + "layer.pk_layer = layer_mem.pk_layer " + "AND " + "layer.pk_layer = ?"; + + @Override + public ExecutionSummary getExecutionSummary(LayerInterface layer) { + return getJdbcTemplate().queryForObject(GET_EXECUTION_SUMMARY, + new RowMapper() { + public ExecutionSummary mapRow(ResultSet rs, int rowNum) throws SQLException { + ExecutionSummary e = new ExecutionSummary(); + e.coreTimeSuccess = rs.getLong("int_core_time_success"); + e.coreTimeFail = rs.getLong("int_core_time_fail"); + e.coreTime = e.coreTimeSuccess + e.coreTimeFail; + e.gpuTimeSuccess = rs.getLong("int_gpu_time_success"); + e.gpuTimeFail = rs.getLong("int_gpu_time_fail"); + e.gpuTime = e.gpuTimeSuccess + e.gpuTimeFail; + e.highMemoryKb = rs.getLong("int_max_rss"); + return e; + } + }, layer.getLayerId()); + } + + private static final String INSERT_LAYER_ENV = "INSERT INTO " + "layer_env " + "(" + + "pk_layer_env, pk_layer, pk_job, str_key, str_value " + ") " + "VALUES (?,?,?,?,?)"; + + @Override + public void insertLayerEnvironment(LayerInterface layer, Map env) { + for (Map.Entry e : env.entrySet()) { + String pk = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_LAYER_ENV, pk, layer.getLayerId(), layer.getJobId(), + e.getKey(), e.getValue()); + } + } + + @Override + public void insertLayerEnvironment(LayerInterface layer, String key, String value) { + String pk = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_LAYER_ENV, pk, layer.getLayerId(), layer.getJobId(), key, + value); + } + + @Override + public Map getLayerEnvironment(LayerInterface layer) { + Map result = new HashMap(); + List> _result = getJdbcTemplate().queryForList( + "SELECT str_key, str_value FROM layer_env WHERE pk_layer=?", layer.getLayerId()); + + for (Map o : _result) { + result.put((String) o.get("str_key"), (String) o.get("str_value")); + } + return result; + } + + private static final String FIND_PAST_MAX_RSS = + "SELECT " + "layer_mem.int_max_rss " + "FROM " + "layer, " + "layer_mem, " + + "layer_stat " + "WHERE " + "layer.pk_layer = layer_stat.pk_layer " + "AND " + + "layer.pk_layer = layer_mem.pk_layer " + "AND " + "layer.pk_job = ? " + "AND " + + "layer.str_name = ? " + "AND " + + "layer_stat.int_succeeded_count >= ceil(layer_stat.int_total_count * .5) "; + + @Override + public long findPastMaxRSS(JobInterface job, String name) { + try { + long maxRss = getJdbcTemplate().queryForObject(FIND_PAST_MAX_RSS, Long.class, + job.getJobId(), name); + if (maxRss >= MEM_RESERVED_MIN) { + return maxRss; + } else { + return MEM_RESERVED_MIN; + } + } catch (EmptyResultDataAccessException e) { + // Actually want to return 0 here, which means + // there is no past history. + return 0; + } + } + + @Override + public void updateTags(JobInterface job, String tags, LayerType type) { + getJdbcTemplate().update("UPDATE layer SET str_tags=? WHERE pk_job=? AND str_type=?", tags, + job.getJobId(), type.toString()); + } + + @Override + public void updateMinMemory(JobInterface job, long mem, LayerType type) { + if (mem < MEM_RESERVED_MIN) { + mem = MEM_RESERVED_MIN; + } + getJdbcTemplate().update("UPDATE layer SET int_mem_min=? WHERE pk_job=? AND str_type=?", + mem, job.getJobId(), type.toString()); + } + + @Override + public void updateMinGpuMemory(JobInterface job, long kb, LayerType type) { + getJdbcTemplate().update("UPDATE layer SET int_gpu_mem_min=? WHERE pk_job=? AND str_type=?", + kb, job.getJobId(), type.toString()); + } + + @Override + public void updateMinCores(JobInterface job, int cores, LayerType type) { + getJdbcTemplate().update("UPDATE layer SET int_cores_min=? WHERE pk_job=? AND str_type=?", + cores, job.getJobId(), type.toString()); + } + + @Override + public void updateMaxCores(JobInterface job, int cores, LayerType type) { + getJdbcTemplate().update("UPDATE layer SET int_cores_max=? WHERE pk_job=? AND str_type=?", + cores, job.getJobId(), type.toString()); + } + + @Override + public void updateMinGpus(JobInterface job, int gpus, LayerType type) { + getJdbcTemplate().update("UPDATE layer SET int_gpus_min=? WHERE pk_job=? AND str_type=?", + gpus, job.getJobId(), type.toString()); + } + + @Override + public void updateThreadable(LayerInterface layer, boolean threadable) { + getJdbcTemplate().update("UPDATE layer SET b_threadable=? WHERE pk_layer=?", threadable, + layer.getLayerId()); + } + + @Override + public void updateTimeout(LayerInterface layer, int timeout) { + getJdbcTemplate().update("UPDATE layer SET int_timeout=? WHERE pk_layer=?", timeout, + layer.getLayerId()); + } + + @Override + public void updateTimeoutLLU(LayerInterface layer, int timeout_llu) { + getJdbcTemplate().update("UPDATE layer SET int_timeout_llu=? WHERE pk_layer=?", timeout_llu, + layer.getLayerId()); + } + + @Override + public void enableMemoryOptimizer(LayerInterface layer, boolean value) { + getJdbcTemplate().update("UPDATE layer SET b_optimize=? WHERE pk_layer=?", value, + layer.getLayerId()); + } + + private static final String IS_OPTIMIZABLE = "SELECT " + "COUNT(1) " + "FROM " + "layer, " + + "layer_stat, " + "layer_usage " + "WHERE " + "layer.pk_layer = layer_stat.pk_layer " + + "AND " + "layer.pk_layer = layer_usage.pk_layer " + "AND " + "layer.pk_layer = ? " + + "AND " + "layer.int_cores_min = 100 " + "AND " + "layer.int_gpus_min = 0 " + "AND " + + "str_tags LIKE '%general%' " + "AND " + "str_tags NOT LIKE '%util%' " + "AND " + + "layer_stat.int_succeeded_count >= ? " + "AND " + + "(layer_usage.int_core_time_success / layer_stat.int_succeeded_count) <= ?"; + + @Override + public boolean isOptimizable(LayerInterface l, int succeeded, float avg) { + if (succeeded < 1) { + throw new IllegalArgumentException( + "Succeeded frames option " + "must be greater than zero"); + } + return getJdbcTemplate().queryForObject(IS_OPTIMIZABLE, Integer.class, l.getLayerId(), + succeeded, avg) > 0; + } + + private static final String THREAD_STATS = + "SELECT " + "avg(interval_to_seconds(ts_stopped - ts_started)) AS avg, " + "int_cores, " + + "int_gpus " + "FROM " + "frame " + "WHERE " + "frame.pk_layer = ? " + "AND " + + "frame.int_checkpoint_count = 0 " + "AND " + "int_cores > 0 " + "AND " + + "int_gpus > 0 " + "GROUP BY " + "int_cores, " + "int_gpus " + "ORDER BY " + + "int_cores DESC "; + + @Override + public List getThreadStats(LayerInterface layer) { + + return getJdbcTemplate().query(THREAD_STATS, new RowMapper() { + public ThreadStats mapRow(ResultSet rs, int rowNum) throws SQLException { + ThreadStats s = new ThreadStats(); + s.setThreads(rs.getInt("int_cores") / 100); + s.setAvgFrameTime(rs.getInt("avg")); + return s; + } }, layer.getLayerId()); - } - - private static final String INSERT_LAYER_ENV = "INSERT INTO " + "layer_env " + "(" - + "pk_layer_env, pk_layer, pk_job, str_key, str_value " + ") " + "VALUES (?,?,?,?,?)"; - - @Override - public void insertLayerEnvironment(LayerInterface layer, Map env) { - for (Map.Entry e : env.entrySet()) { - String pk = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_LAYER_ENV, pk, layer.getLayerId(), layer.getJobId(), - e.getKey(), e.getValue()); - } - } - - @Override - public void insertLayerEnvironment(LayerInterface layer, String key, String value) { - String pk = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_LAYER_ENV, pk, layer.getLayerId(), layer.getJobId(), key, - value); - } - - @Override - public Map getLayerEnvironment(LayerInterface layer) { - Map result = new HashMap(); - List> _result = getJdbcTemplate().queryForList( - "SELECT str_key, str_value FROM layer_env WHERE pk_layer=?", layer.getLayerId()); - - for (Map o : _result) { - result.put((String) o.get("str_key"), (String) o.get("str_value")); - } - return result; - } - - private static final String FIND_PAST_MAX_RSS = "SELECT " + "layer_mem.int_max_rss " + "FROM " - + "layer, " + "layer_mem, " + "layer_stat " + "WHERE " - + "layer.pk_layer = layer_stat.pk_layer " + "AND " + "layer.pk_layer = layer_mem.pk_layer " - + "AND " + "layer.pk_job = ? " + "AND " + "layer.str_name = ? " + "AND " - + "layer_stat.int_succeeded_count >= ceil(layer_stat.int_total_count * .5) "; - - @Override - public long findPastMaxRSS(JobInterface job, String name) { - try { - long maxRss = - getJdbcTemplate().queryForObject(FIND_PAST_MAX_RSS, Long.class, job.getJobId(), name); - if (maxRss >= MEM_RESERVED_MIN) { - return maxRss; - } else { - return MEM_RESERVED_MIN; - } - } catch (EmptyResultDataAccessException e) { - // Actually want to return 0 here, which means - // there is no past history. - return 0; - } - } - - @Override - public void updateTags(JobInterface job, String tags, LayerType type) { - getJdbcTemplate().update("UPDATE layer SET str_tags=? WHERE pk_job=? AND str_type=?", tags, - job.getJobId(), type.toString()); - } - - @Override - public void updateMinMemory(JobInterface job, long mem, LayerType type) { - if (mem < MEM_RESERVED_MIN) { - mem = MEM_RESERVED_MIN; - } - getJdbcTemplate().update("UPDATE layer SET int_mem_min=? WHERE pk_job=? AND str_type=?", mem, - job.getJobId(), type.toString()); - } - - @Override - public void updateMinGpuMemory(JobInterface job, long kb, LayerType type) { - getJdbcTemplate().update("UPDATE layer SET int_gpu_mem_min=? WHERE pk_job=? AND str_type=?", kb, - job.getJobId(), type.toString()); - } - - @Override - public void updateMinCores(JobInterface job, int cores, LayerType type) { - getJdbcTemplate().update("UPDATE layer SET int_cores_min=? WHERE pk_job=? AND str_type=?", - cores, job.getJobId(), type.toString()); - } - - @Override - public void updateMaxCores(JobInterface job, int cores, LayerType type) { - getJdbcTemplate().update("UPDATE layer SET int_cores_max=? WHERE pk_job=? AND str_type=?", - cores, job.getJobId(), type.toString()); - } - - @Override - public void updateMinGpus(JobInterface job, int gpus, LayerType type) { - getJdbcTemplate().update("UPDATE layer SET int_gpus_min=? WHERE pk_job=? AND str_type=?", gpus, - job.getJobId(), type.toString()); - } - - @Override - public void updateThreadable(LayerInterface layer, boolean threadable) { - getJdbcTemplate().update("UPDATE layer SET b_threadable=? WHERE pk_layer=?", threadable, - layer.getLayerId()); - } - - @Override - public void updateTimeout(LayerInterface layer, int timeout) { - getJdbcTemplate().update("UPDATE layer SET int_timeout=? WHERE pk_layer=?", timeout, - layer.getLayerId()); - } - - @Override - public void updateTimeoutLLU(LayerInterface layer, int timeout_llu) { - getJdbcTemplate().update("UPDATE layer SET int_timeout_llu=? WHERE pk_layer=?", timeout_llu, - layer.getLayerId()); - } - - @Override - public void enableMemoryOptimizer(LayerInterface layer, boolean value) { - getJdbcTemplate().update("UPDATE layer SET b_optimize=? WHERE pk_layer=?", value, - layer.getLayerId()); - } - - private static final String IS_OPTIMIZABLE = "SELECT " + "COUNT(1) " + "FROM " + "layer, " - + "layer_stat, " + "layer_usage " + "WHERE " + "layer.pk_layer = layer_stat.pk_layer " - + "AND " + "layer.pk_layer = layer_usage.pk_layer " + "AND " + "layer.pk_layer = ? " + "AND " - + "layer.int_cores_min = 100 " + "AND " + "layer.int_gpus_min = 0 " + "AND " - + "str_tags LIKE '%general%' " + "AND " + "str_tags NOT LIKE '%util%' " + "AND " - + "layer_stat.int_succeeded_count >= ? " + "AND " - + "(layer_usage.int_core_time_success / layer_stat.int_succeeded_count) <= ?"; - - @Override - public boolean isOptimizable(LayerInterface l, int succeeded, float avg) { - if (succeeded < 1) { - throw new IllegalArgumentException("Succeeded frames option " + "must be greater than zero"); - } - return getJdbcTemplate().queryForObject(IS_OPTIMIZABLE, Integer.class, l.getLayerId(), - succeeded, avg) > 0; - } - - private static final String THREAD_STATS = - "SELECT " + "avg(interval_to_seconds(ts_stopped - ts_started)) AS avg, " + "int_cores, " - + "int_gpus " + "FROM " + "frame " + "WHERE " + "frame.pk_layer = ? " + "AND " - + "frame.int_checkpoint_count = 0 " + "AND " + "int_cores > 0 " + "AND " + "int_gpus > 0 " - + "GROUP BY " + "int_cores, " + "int_gpus " + "ORDER BY " + "int_cores DESC "; - - @Override - public List getThreadStats(LayerInterface layer) { - - return getJdbcTemplate().query(THREAD_STATS, new RowMapper() { - public ThreadStats mapRow(ResultSet rs, int rowNum) throws SQLException { - ThreadStats s = new ThreadStats(); - s.setThreads(rs.getInt("int_cores") / 100); - s.setAvgFrameTime(rs.getInt("avg")); - return s; - } - }, layer.getLayerId()); - } - - @Override - public void updateUsage(LayerInterface layer, ResourceUsage usage, int exitStatus) { - - if (exitStatus == 0) { - - getJdbcTemplate().update( - "UPDATE " + "layer_usage " + "SET " + "int_core_time_success = int_core_time_success + ?," - + "int_gpu_time_success = int_gpu_time_success + ?," - + "int_clock_time_success = int_clock_time_success + ?," - + "int_frame_success_count = int_frame_success_count + 1 " + "WHERE " - + "pk_layer = ? ", - usage.getCoreTimeSeconds(), usage.getGpuTimeSeconds(), usage.getClockTimeSeconds(), - layer.getLayerId()); - - getJdbcTemplate().update( - "UPDATE " + "layer_usage " + "SET " + "int_clock_time_high = ? " + "WHERE " - + "pk_layer = ? " + "AND " + "int_clock_time_high < ?", - usage.getClockTimeSeconds(), layer.getLayerId(), usage.getClockTimeSeconds()); - - getJdbcTemplate().update( - "UPDATE " + "layer_usage " + "SET " + "int_clock_time_low = ? " + "WHERE " - + "pk_layer = ? " + "AND " + "(? < int_clock_time_low OR int_clock_time_low = 0)", - usage.getClockTimeSeconds(), layer.getLayerId(), usage.getClockTimeSeconds()); - } else { - getJdbcTemplate().update( - "UPDATE " + "layer_usage " + "SET " + "int_core_time_fail = int_core_time_fail + ?," - + "int_clock_time_fail = int_clock_time_fail + ?," - + "int_frame_fail_count = int_frame_fail_count + 1 " + "WHERE " + "pk_layer = ? ", - usage.getCoreTimeSeconds(), usage.getClockTimeSeconds(), layer.getLayerId()); - } - } - - private static final String INSERT_LIMIT = - "INSERT INTO " + "layer_limit (pk_layer_limit,pk_layer,pk_limit_record)" + "VALUES (?,?,?)"; - - private static final String GET_LIMITS = "SELECT " + "limit_record.pk_limit_record, " - + "limit_record.str_name, " + "limit_record.int_max_value " + "FROM " + "layer_limit," - + "limit_record " + "WHERE " + "layer_limit.pk_layer = ? " - + "AND limit_record.pk_limit_record = layer_limit.pk_limit_record"; - - private static final String GET_LIMIT_NAMES = "SELECT " + "limit_record.str_name " + "FROM " - + "layer_limit, " + "limit_record " + "WHERE " + "layer_limit.pk_layer = ? " - + "AND limit_record.pk_limit_record = layer_limit.pk_limit_record"; - - private static final RowMapper LIMIT_MAPPER = new RowMapper() { - public LimitEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - LimitEntity limit = new LimitEntity(); - limit.id = rs.getString("pk_limit_record"); - limit.name = rs.getString("str_name"); - limit.maxValue = rs.getInt("int_max_value"); - return limit; - } - }; - - private static final RowMapper LIMIT_NAME_MAPPER = new RowMapper() { - public String mapRow(ResultSet rs, int rowNum) throws SQLException { - return rs.getString("str_name"); - } - }; - - @Override - public void addLimit(LayerInterface layer, String limitId) { - getJdbcTemplate().update(INSERT_LIMIT, UUID.randomUUID().toString(), layer.getLayerId(), - limitId); - } - - @Override - public void dropLimit(LayerInterface layer, String limitId) { - getJdbcTemplate().update("DELETE FROM layer_limit WHERE pk_limit_record = ? AND pk_layer = ?", - limitId, layer.getLayerId()); - } - - @Override - public List getLimits(LayerInterface layer) { - return getJdbcTemplate().query(GET_LIMITS, LIMIT_MAPPER, layer.getLayerId()); - } - - @Override - public List getLimitNames(LayerInterface layer) { - return getJdbcTemplate().query(GET_LIMIT_NAMES, LIMIT_NAME_MAPPER, layer.getLayerId()); - } + } + + @Override + public void updateUsage(LayerInterface layer, ResourceUsage usage, int exitStatus) { + + if (exitStatus == 0) { + + getJdbcTemplate().update( + "UPDATE " + "layer_usage " + "SET " + + "int_core_time_success = int_core_time_success + ?," + + "int_gpu_time_success = int_gpu_time_success + ?," + + "int_clock_time_success = int_clock_time_success + ?," + + "int_frame_success_count = int_frame_success_count + 1 " + "WHERE " + + "pk_layer = ? ", + usage.getCoreTimeSeconds(), usage.getGpuTimeSeconds(), + usage.getClockTimeSeconds(), layer.getLayerId()); + + getJdbcTemplate().update( + "UPDATE " + "layer_usage " + "SET " + "int_clock_time_high = ? " + "WHERE " + + "pk_layer = ? " + "AND " + "int_clock_time_high < ?", + usage.getClockTimeSeconds(), layer.getLayerId(), usage.getClockTimeSeconds()); + + getJdbcTemplate().update( + "UPDATE " + "layer_usage " + "SET " + "int_clock_time_low = ? " + "WHERE " + + "pk_layer = ? " + "AND " + + "(? < int_clock_time_low OR int_clock_time_low = 0)", + usage.getClockTimeSeconds(), layer.getLayerId(), usage.getClockTimeSeconds()); + } else { + getJdbcTemplate().update( + "UPDATE " + "layer_usage " + "SET " + + "int_core_time_fail = int_core_time_fail + ?," + + "int_clock_time_fail = int_clock_time_fail + ?," + + "int_frame_fail_count = int_frame_fail_count + 1 " + "WHERE " + + "pk_layer = ? ", + usage.getCoreTimeSeconds(), usage.getClockTimeSeconds(), layer.getLayerId()); + } + } + + private static final String INSERT_LIMIT = "INSERT INTO " + + "layer_limit (pk_layer_limit,pk_layer,pk_limit_record)" + "VALUES (?,?,?)"; + + private static final String GET_LIMITS = "SELECT " + "limit_record.pk_limit_record, " + + "limit_record.str_name, " + "limit_record.int_max_value " + "FROM " + "layer_limit," + + "limit_record " + "WHERE " + "layer_limit.pk_layer = ? " + + "AND limit_record.pk_limit_record = layer_limit.pk_limit_record"; + + private static final String GET_LIMIT_NAMES = "SELECT " + "limit_record.str_name " + "FROM " + + "layer_limit, " + "limit_record " + "WHERE " + "layer_limit.pk_layer = ? " + + "AND limit_record.pk_limit_record = layer_limit.pk_limit_record"; + + private static final RowMapper LIMIT_MAPPER = new RowMapper() { + public LimitEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + LimitEntity limit = new LimitEntity(); + limit.id = rs.getString("pk_limit_record"); + limit.name = rs.getString("str_name"); + limit.maxValue = rs.getInt("int_max_value"); + return limit; + } + }; + + private static final RowMapper LIMIT_NAME_MAPPER = new RowMapper() { + public String mapRow(ResultSet rs, int rowNum) throws SQLException { + return rs.getString("str_name"); + } + }; + + @Override + public void addLimit(LayerInterface layer, String limitId) { + getJdbcTemplate().update(INSERT_LIMIT, UUID.randomUUID().toString(), layer.getLayerId(), + limitId); + } + + @Override + public void dropLimit(LayerInterface layer, String limitId) { + getJdbcTemplate().update( + "DELETE FROM layer_limit WHERE pk_limit_record = ? AND pk_layer = ?", limitId, + layer.getLayerId()); + } + + @Override + public List getLimits(LayerInterface layer) { + return getJdbcTemplate().query(GET_LIMITS, LIMIT_MAPPER, layer.getLayerId()); + } + + @Override + public List getLimitNames(LayerInterface layer) { + return getJdbcTemplate().query(GET_LIMIT_NAMES, LIMIT_NAME_MAPPER, layer.getLayerId()); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LimitDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LimitDaoJdbc.java index 6e623c7fa..494470600 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LimitDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LimitDaoJdbc.java @@ -28,65 +28,65 @@ public class LimitDaoJdbc extends JdbcDaoSupport implements LimitDao { - public static final RowMapper LIMIT_MAPPER = new RowMapper() { - public LimitEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - LimitEntity limit = new LimitEntity(); - limit.id = rs.getString("pk_limit_record"); - limit.name = rs.getString("str_name"); - limit.maxValue = rs.getInt("int_max_value"); - limit.currentRunning = rs.getInt("int_current_running"); - return limit; - } - }; + public static final RowMapper LIMIT_MAPPER = new RowMapper() { + public LimitEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + LimitEntity limit = new LimitEntity(); + limit.id = rs.getString("pk_limit_record"); + limit.name = rs.getString("str_name"); + limit.maxValue = rs.getInt("int_max_value"); + limit.currentRunning = rs.getInt("int_current_running"); + return limit; + } + }; - @Override - public String createLimit(String name, int maxValue) { - String limitId = SqlUtil.genKeyRandom(); - getJdbcTemplate().update("INSERT INTO " + "limit_record " - + "(pk_limit_record,str_name, int_max_value) " + "VALUES " + "(?,?,?)", limitId, name, - maxValue); - return limitId; - } + @Override + public String createLimit(String name, int maxValue) { + String limitId = SqlUtil.genKeyRandom(); + getJdbcTemplate().update("INSERT INTO " + "limit_record " + + "(pk_limit_record,str_name, int_max_value) " + "VALUES " + "(?,?,?)", limitId, + name, maxValue); + return limitId; + } - @Override - public void deleteLimit(LimitInterface limit) { - getJdbcTemplate().update("DELETE FROM " + "limit_record " + "WHERE " + "pk_limit_record=?", - limit.getId()); - } + @Override + public void deleteLimit(LimitInterface limit) { + getJdbcTemplate().update("DELETE FROM " + "limit_record " + "WHERE " + "pk_limit_record=?", + limit.getId()); + } - @Override - public LimitEntity findLimit(String name) { - String findLimitQuery = GET_LIMIT_BASE + "WHERE " + "limit_record.str_name=? " + "GROUP BY " - + "limit_record.str_name, " + "limit_record.pk_limit_record, " - + "limit_record.int_max_value"; - return getJdbcTemplate().queryForObject(findLimitQuery, LIMIT_MAPPER, name); - } + @Override + public LimitEntity findLimit(String name) { + String findLimitQuery = GET_LIMIT_BASE + "WHERE " + "limit_record.str_name=? " + "GROUP BY " + + "limit_record.str_name, " + "limit_record.pk_limit_record, " + + "limit_record.int_max_value"; + return getJdbcTemplate().queryForObject(findLimitQuery, LIMIT_MAPPER, name); + } - @Override - public LimitEntity getLimit(String id) { - String getLimitQuery = GET_LIMIT_BASE + "WHERE " + "limit_record.pk_limit_record=? " - + "GROUP BY " + "limit_record.str_name, " + "limit_record.pk_limit_record, " - + "limit_record.int_max_value"; - return getJdbcTemplate().queryForObject(getLimitQuery, LIMIT_MAPPER, id); - } + @Override + public LimitEntity getLimit(String id) { + String getLimitQuery = GET_LIMIT_BASE + "WHERE " + "limit_record.pk_limit_record=? " + + "GROUP BY " + "limit_record.str_name, " + "limit_record.pk_limit_record, " + + "limit_record.int_max_value"; + return getJdbcTemplate().queryForObject(getLimitQuery, LIMIT_MAPPER, id); + } - @Override - public void setLimitName(LimitInterface limit, String name) { - getJdbcTemplate().update( - "UPDATE " + "limit_record " + "SET " + "str_name = ? " + "WHERE " + "pk_limit_record = ?", - name, limit.getId()); - } + @Override + public void setLimitName(LimitInterface limit, String name) { + getJdbcTemplate().update("UPDATE " + "limit_record " + "SET " + "str_name = ? " + "WHERE " + + "pk_limit_record = ?", name, limit.getId()); + } - public void setMaxValue(LimitInterface limit, int maxValue) { - getJdbcTemplate().update("UPDATE " + "limit_record " + "SET " + "int_max_value = ? " + "WHERE " - + "pk_limit_record = ?", maxValue, limit.getId()); - } + public void setMaxValue(LimitInterface limit, int maxValue) { + getJdbcTemplate().update("UPDATE " + "limit_record " + "SET " + "int_max_value = ? " + + "WHERE " + "pk_limit_record = ?", maxValue, limit.getId()); + } - private static final String GET_LIMIT_BASE = "SELECT " + "limit_record.pk_limit_record, " - + "limit_record.str_name, " + "limit_record.int_max_value," - + "SUM(layer_stat.int_running_count) AS int_current_running " + "FROM " + "limit_record " - + "LEFT JOIN " + "layer_limit ON layer_limit.pk_limit_record = limit_record.pk_limit_record " - + "LEFT JOIN " + "layer ON layer.pk_layer = layer_limit.pk_layer " + "LEFT JOIN " - + "layer_stat ON layer_stat.pk_layer = layer.pk_layer "; + private static final String GET_LIMIT_BASE = "SELECT " + "limit_record.pk_limit_record, " + + "limit_record.str_name, " + "limit_record.int_max_value," + + "SUM(layer_stat.int_running_count) AS int_current_running " + "FROM " + + "limit_record " + "LEFT JOIN " + + "layer_limit ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + + "LEFT JOIN " + "layer ON layer.pk_layer = layer_limit.pk_layer " + "LEFT JOIN " + + "layer_stat ON layer_stat.pk_layer = layer.pk_layer "; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/MaintenanceDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/MaintenanceDaoJdbc.java index 84d7943d6..50ca6bfba 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/MaintenanceDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/MaintenanceDaoJdbc.java @@ -23,35 +23,36 @@ public class MaintenanceDaoJdbc extends JdbcDaoSupport implements MaintenanceDao { - private static final String HOST_DOWN_INTERVAL = "interval '300' second"; - - private static final String UPDATE_HOSTS_DOWN = - "UPDATE " + "host_stat " + "SET " + "str_state = ? " + "WHERE " + "str_state = 'UP' " + "AND " - + "current_timestamp - ts_ping > " + HOST_DOWN_INTERVAL; - - public int setUpHostsToDown() { - return getJdbcTemplate().update(UPDATE_HOSTS_DOWN, HardwareState.DOWN.toString()); - } - - public static final String LOCK_TASK = - "UPDATE " + "task_lock " + "SET " + "int_lock = ?, " + "ts_lastrun = current_timestamp " - + "WHERE " + "str_name = ? " + "AND " + "(int_lock = ? OR ? - int_lock > int_timeout)"; - - public boolean lockTask(MaintenanceTask task) { - long now = System.currentTimeMillis(); - return getJdbcTemplate().update(LOCK_TASK, now, task.toString(), 0, now) == 1; - } - - public static final String LOCK_TASK_MIN = "UPDATE " + "task_lock " + "SET " + "int_lock = ?, " - + "ts_lastrun = current_timestamp " + "WHERE " + "str_name= ? " + "AND " + "int_lock = ? " - + "AND " + "interval_to_seconds(current_timestamp - ts_lastrun) > ? "; - - public boolean lockTask(MaintenanceTask task, int minutes) { - long now = System.currentTimeMillis(); - return getJdbcTemplate().update(LOCK_TASK_MIN, now, task.toString(), 0, minutes * 60) == 1; - } - - public void unlockTask(MaintenanceTask task) { - getJdbcTemplate().update("UPDATE task_lock SET int_lock = 0 WHERE str_name=?", task.toString()); - } + private static final String HOST_DOWN_INTERVAL = "interval '300' second"; + + private static final String UPDATE_HOSTS_DOWN = + "UPDATE " + "host_stat " + "SET " + "str_state = ? " + "WHERE " + "str_state = 'UP' " + + "AND " + "current_timestamp - ts_ping > " + HOST_DOWN_INTERVAL; + + public int setUpHostsToDown() { + return getJdbcTemplate().update(UPDATE_HOSTS_DOWN, HardwareState.DOWN.toString()); + } + + public static final String LOCK_TASK = "UPDATE " + "task_lock " + "SET " + "int_lock = ?, " + + "ts_lastrun = current_timestamp " + "WHERE " + "str_name = ? " + "AND " + + "(int_lock = ? OR ? - int_lock > int_timeout)"; + + public boolean lockTask(MaintenanceTask task) { + long now = System.currentTimeMillis(); + return getJdbcTemplate().update(LOCK_TASK, now, task.toString(), 0, now) == 1; + } + + public static final String LOCK_TASK_MIN = "UPDATE " + "task_lock " + "SET " + "int_lock = ?, " + + "ts_lastrun = current_timestamp " + "WHERE " + "str_name= ? " + "AND " + + "int_lock = ? " + "AND " + "interval_to_seconds(current_timestamp - ts_lastrun) > ? "; + + public boolean lockTask(MaintenanceTask task, int minutes) { + long now = System.currentTimeMillis(); + return getJdbcTemplate().update(LOCK_TASK_MIN, now, task.toString(), 0, minutes * 60) == 1; + } + + public void unlockTask(MaintenanceTask task) { + getJdbcTemplate().update("UPDATE task_lock SET int_lock = 0 WHERE str_name=?", + task.toString()); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/MatcherDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/MatcherDaoJdbc.java index 0f5f95184..f50af2e5f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/MatcherDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/MatcherDaoJdbc.java @@ -32,56 +32,58 @@ public class MatcherDaoJdbc extends JdbcDaoSupport implements MatcherDao { - private static final String INSERT_MATCHER = "INSERT INTO " + "matcher " + "( " - + "pk_matcher,pk_filter,str_subject,str_match,str_value" + ") VALUES (?,?,?,?,?)"; - - public void insertMatcher(MatcherEntity matcher) { - matcher.id = SqlUtil.genKeyRandom(); - - getJdbcTemplate().update(INSERT_MATCHER, matcher.id, matcher.getFilterId(), - matcher.subject.toString(), matcher.type.toString(), matcher.value); - } - - public void deleteMatcher(MatcherInterface matcher) { - getJdbcTemplate().update("DELETE FROM matcher WHERE pk_matcher=?", matcher.getMatcherId()); - } - - private static final String GET_MATCHER = "SELECT " + "matcher.*, " + "filter.pk_show " + "FROM " - + "matcher, " + "filter " + "WHERE " + "matcher.pk_filter = filter.pk_filter"; - - public MatcherEntity getMatcher(String id) { - return getJdbcTemplate().queryForObject(GET_MATCHER + " AND matcher.pk_matcher=?", - MATCHER_DETAIL_MAPPER, id); - } - - public MatcherEntity getMatcher(MatcherInterface matcher) { - return getJdbcTemplate().queryForObject(GET_MATCHER + " AND matcher.pk_matcher=?", - MATCHER_DETAIL_MAPPER, matcher.getMatcherId()); - } - - public List getMatchers(FilterInterface filter) { - return getJdbcTemplate().query(GET_MATCHER + " AND filter.pk_filter=? ORDER BY ts_created ASC", - MATCHER_DETAIL_MAPPER, filter.getFilterId()); - } - - public void updateMatcher(MatcherEntity matcher) { - getJdbcTemplate().update( - "UPDATE matcher SET str_subject=?,str_match=?,str_value=? WHERE pk_matcher=?", - matcher.subject.toString(), matcher.type.toString(), matcher.value, matcher.getMatcherId()); - } - - public static final RowMapper MATCHER_DETAIL_MAPPER = - new RowMapper() { - public MatcherEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - MatcherEntity matcher = new MatcherEntity(); - matcher.id = rs.getString("pk_matcher"); - matcher.showId = rs.getString("pk_show"); - matcher.filterId = rs.getString("pk_filter"); - matcher.name = null; - matcher.subject = MatchSubject.valueOf(rs.getString("str_subject")); - matcher.type = MatchType.valueOf(rs.getString("str_match")); - matcher.value = rs.getString("str_value"); - return matcher; - } - }; + private static final String INSERT_MATCHER = "INSERT INTO " + "matcher " + "( " + + "pk_matcher,pk_filter,str_subject,str_match,str_value" + ") VALUES (?,?,?,?,?)"; + + public void insertMatcher(MatcherEntity matcher) { + matcher.id = SqlUtil.genKeyRandom(); + + getJdbcTemplate().update(INSERT_MATCHER, matcher.id, matcher.getFilterId(), + matcher.subject.toString(), matcher.type.toString(), matcher.value); + } + + public void deleteMatcher(MatcherInterface matcher) { + getJdbcTemplate().update("DELETE FROM matcher WHERE pk_matcher=?", matcher.getMatcherId()); + } + + private static final String GET_MATCHER = "SELECT " + "matcher.*, " + "filter.pk_show " + + "FROM " + "matcher, " + "filter " + "WHERE " + "matcher.pk_filter = filter.pk_filter"; + + public MatcherEntity getMatcher(String id) { + return getJdbcTemplate().queryForObject(GET_MATCHER + " AND matcher.pk_matcher=?", + MATCHER_DETAIL_MAPPER, id); + } + + public MatcherEntity getMatcher(MatcherInterface matcher) { + return getJdbcTemplate().queryForObject(GET_MATCHER + " AND matcher.pk_matcher=?", + MATCHER_DETAIL_MAPPER, matcher.getMatcherId()); + } + + public List getMatchers(FilterInterface filter) { + return getJdbcTemplate().query( + GET_MATCHER + " AND filter.pk_filter=? ORDER BY ts_created ASC", + MATCHER_DETAIL_MAPPER, filter.getFilterId()); + } + + public void updateMatcher(MatcherEntity matcher) { + getJdbcTemplate().update( + "UPDATE matcher SET str_subject=?,str_match=?,str_value=? WHERE pk_matcher=?", + matcher.subject.toString(), matcher.type.toString(), matcher.value, + matcher.getMatcherId()); + } + + public static final RowMapper MATCHER_DETAIL_MAPPER = + new RowMapper() { + public MatcherEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + MatcherEntity matcher = new MatcherEntity(); + matcher.id = rs.getString("pk_matcher"); + matcher.showId = rs.getString("pk_show"); + matcher.filterId = rs.getString("pk_filter"); + matcher.name = null; + matcher.subject = MatchSubject.valueOf(rs.getString("str_subject")); + matcher.type = MatchType.valueOf(rs.getString("str_match")); + matcher.value = rs.getString("str_value"); + return matcher; + } + }; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/NestedWhiteboardDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/NestedWhiteboardDaoJdbc.java index 6d2a0853a..45767b4e2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/NestedWhiteboardDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/NestedWhiteboardDaoJdbc.java @@ -43,362 +43,384 @@ public class NestedWhiteboardDaoJdbc extends JdbcDaoSupport implements NestedWhiteboardDao { - private class CachedJobWhiteboardMapper { - public final long time; - public NestedJobWhiteboardMapper mapper; + private class CachedJobWhiteboardMapper { + public final long time; + public NestedJobWhiteboardMapper mapper; - public CachedJobWhiteboardMapper(NestedJobWhiteboardMapper result) { - this.mapper = result; - this.time = System.currentTimeMillis(); - } - } - - private static final int CACHE_TIMEOUT = 5000; - private final ConcurrentHashMap jobCache = - new ConcurrentHashMap(20); - - public static final String GET_NESTED_GROUPS = "SELECT " + "show.pk_show, " - + "show.str_name AS str_show, " + "facility.str_name AS facility_name, " - + "dept.str_name AS dept_name, " + "folder.pk_folder, " + "folder.pk_parent_folder, " - + "folder.str_name AS group_name, " + "folder.int_job_priority as int_def_job_priority, " - + "folder.int_job_min_cores as int_def_job_min_cores, " - + "folder.int_job_max_cores as int_def_job_max_cores, " - + "folder.int_job_min_gpus as int_def_job_min_gpus, " - + "folder.int_job_max_gpus as int_def_job_max_gpus, " - + "folder_resource.int_min_cores AS folder_min_cores, " - + "folder_resource.int_max_cores AS folder_max_cores, " - + "folder_resource.int_min_gpus AS folder_min_gpus, " - + "folder_resource.int_max_gpus AS folder_max_gpus, " + "folder_level.int_level, " - + "job.pk_job, " + "job.str_name, " + "job.str_shot, " + "job.str_user, " + "job.str_state, " - + "job.str_log_dir, " + "job.int_uid, " + "job_resource.int_priority, " + "job.ts_started, " - + "job.ts_stopped, " + "job.ts_updated, " + "job.b_paused, " + "job.b_autoeat, " - + "job.b_comment, " + "COALESCE(str_os, '') AS str_os, " + "job.int_frame_count, " - + "job.int_layer_count, " + "job_stat.int_waiting_count, " + "job_stat.int_running_count, " - + "job_stat.int_dead_count, " + "job_stat.int_eaten_count," + "job_stat.int_depend_count, " - + "job_stat.int_succeeded_count, " + "job_usage.int_core_time_success, " - + "job_usage.int_core_time_fail, " + "job_usage.int_gpu_time_success, " - + "job_usage.int_gpu_time_fail, " + "job_usage.int_frame_success_count, " - + "job_usage.int_frame_fail_count, " + "job_usage.int_clock_time_high, " - + "job_usage.int_clock_time_success, " - + "(job_resource.int_cores + job_resource.int_local_cores) AS int_cores, " - + "(job_resource.int_gpus + job_resource.int_local_gpus) AS int_gpus, " - + "job_resource.int_min_cores, " + "job_resource.int_min_gpus, " - + "job_resource.int_max_cores, " + "job_resource.int_max_gpus, " + "job_mem.int_max_rss " - + "FROM " + "show, " + "dept, " + "folder_level, " + "folder_resource, " + "folder " - + "LEFT JOIN " + "job " + "ON " - + " (folder.pk_folder = job.pk_folder AND job.str_state='PENDING') " + "LEFT JOIN " - + "facility " + "ON " + "(job.pk_facility = facility.pk_facility) " + "LEFT JOIN " - + "job_stat " + "ON " + "(job.pk_job = job_stat.pk_job) " + "LEFT JOIN " + "job_resource " - + "ON " + "(job.pk_job = job_resource.pk_job) " + "LEFT JOIN " + "job_usage " + "ON " - + "(job.pk_job = job_usage.pk_job) " + "LEFT JOIN " + "job_mem " + "ON " - + "(job.pk_job = job_mem.pk_job) " + "WHERE " + "show.pk_show = folder.pk_show " + "AND " - + "folder.pk_folder = folder_level.pk_folder " + "AND " - + "folder.pk_folder = folder_resource.pk_folder " + "AND " + "folder.pk_dept = dept.pk_dept "; - - private class ChildrenEntry { - String key; - int level; - List children; - String name; - - public ChildrenEntry(String key, int level, String name) { - this.key = key; - this.level = level; - this.children = new ArrayList<>(); - this.name = name; - } - - public List getChildren() { - return children; + public CachedJobWhiteboardMapper(NestedJobWhiteboardMapper result) { + this.mapper = result; + this.time = System.currentTimeMillis(); + } } - public void addChild(String child) { - children.add(child); - } + private static final int CACHE_TIMEOUT = 5000; + private final ConcurrentHashMap jobCache = + new ConcurrentHashMap(20); + + public static final String GET_NESTED_GROUPS = "SELECT " + "show.pk_show, " + + "show.str_name AS str_show, " + "facility.str_name AS facility_name, " + + "dept.str_name AS dept_name, " + "folder.pk_folder, " + "folder.pk_parent_folder, " + + "folder.str_name AS group_name, " + + "folder.int_job_priority as int_def_job_priority, " + + "folder.int_job_min_cores as int_def_job_min_cores, " + + "folder.int_job_max_cores as int_def_job_max_cores, " + + "folder.int_job_min_gpus as int_def_job_min_gpus, " + + "folder.int_job_max_gpus as int_def_job_max_gpus, " + + "folder_resource.int_min_cores AS folder_min_cores, " + + "folder_resource.int_max_cores AS folder_max_cores, " + + "folder_resource.int_min_gpus AS folder_min_gpus, " + + "folder_resource.int_max_gpus AS folder_max_gpus, " + "folder_level.int_level, " + + "job.pk_job, " + "job.str_name, " + "job.str_shot, " + "job.str_user, " + + "job.str_state, " + "job.str_log_dir, " + "job.int_uid, " + + "job_resource.int_priority, " + "job.ts_started, " + "job.ts_stopped, " + + "job.ts_updated, " + "job.b_paused, " + "job.b_autoeat, " + "job.b_comment, " + + "COALESCE(str_os, '') AS str_os, " + "job.int_frame_count, " + "job.int_layer_count, " + + "job_stat.int_waiting_count, " + "job_stat.int_running_count, " + + "job_stat.int_dead_count, " + "job_stat.int_eaten_count," + + "job_stat.int_depend_count, " + "job_stat.int_succeeded_count, " + + "job_usage.int_core_time_success, " + "job_usage.int_core_time_fail, " + + "job_usage.int_gpu_time_success, " + "job_usage.int_gpu_time_fail, " + + "job_usage.int_frame_success_count, " + "job_usage.int_frame_fail_count, " + + "job_usage.int_clock_time_high, " + "job_usage.int_clock_time_success, " + + "(job_resource.int_cores + job_resource.int_local_cores) AS int_cores, " + + "(job_resource.int_gpus + job_resource.int_local_gpus) AS int_gpus, " + + "job_resource.int_min_cores, " + "job_resource.int_min_gpus, " + + "job_resource.int_max_cores, " + "job_resource.int_max_gpus, " + + "job_mem.int_max_rss " + "FROM " + "show, " + "dept, " + "folder_level, " + + "folder_resource, " + "folder " + "LEFT JOIN " + "job " + "ON " + + " (folder.pk_folder = job.pk_folder AND job.str_state='PENDING') " + "LEFT JOIN " + + "facility " + "ON " + "(job.pk_facility = facility.pk_facility) " + "LEFT JOIN " + + "job_stat " + "ON " + "(job.pk_job = job_stat.pk_job) " + "LEFT JOIN " + + "job_resource " + "ON " + "(job.pk_job = job_resource.pk_job) " + "LEFT JOIN " + + "job_usage " + "ON " + "(job.pk_job = job_usage.pk_job) " + "LEFT JOIN " + "job_mem " + + "ON " + "(job.pk_job = job_mem.pk_job) " + "WHERE " + "show.pk_show = folder.pk_show " + + "AND " + "folder.pk_folder = folder_level.pk_folder " + "AND " + + "folder.pk_folder = folder_resource.pk_folder " + "AND " + + "folder.pk_dept = dept.pk_dept "; + + private class ChildrenEntry { + String key; + int level; + List children; + String name; + + public ChildrenEntry(String key, int level, String name) { + this.key = key; + this.level = level; + this.children = new ArrayList<>(); + this.name = name; + } - public String getKey() { - return key; - } + public List getChildren() { + return children; + } - public String getName() { - return name; - } + public void addChild(String child) { + children.add(child); + } - public int compareTo(ChildrenEntry o) { - // Invert order - return Integer.compare(o.level, this.level); - } + public String getKey() { + return key; + } - @Override - public String toString() { - StringBuilder out = new StringBuilder(); - String spacing = " ".repeat(Math.max(0, this.level + 1)); - out.append(spacing); - out.append(key + "(c " + name + ")"); - for (String id : children) { - out.append("\n " + spacing + id.substring(0, 4)); - } - return out.toString(); - } - } - - class NestedJobWhiteboardMapper implements RowMapper { - public Map groups = new HashMap(50); - public Map childrenMap = new HashMap(); - public String rootGroupID; - - @Override - public NestedGroup mapRow(ResultSet rs, int rowNum) throws SQLException { - String groupId = rs.getString("pk_folder"); - NestedGroup group; - if (!groups.containsKey(groupId)) { - group = NestedGroup.newBuilder().setId(rs.getString("pk_folder")) - .setName(rs.getString("group_name")) - .setDefaultJobPriority(rs.getInt("int_def_job_priority")) - .setDefaultJobMinCores(Convert.coreUnitsToCores(rs.getInt("int_def_job_min_cores"))) - .setDefaultJobMaxCores(Convert.coreUnitsToCores(rs.getInt("int_def_job_max_cores"))) - .setDefaultJobMinGpus(rs.getInt("int_def_job_min_gpus")) - .setDefaultJobMaxGpus(rs.getInt("int_def_job_max_gpus")) - .setMaxCores(Convert.coreUnitsToCores(rs.getInt("folder_max_cores"))) - .setMinCores(Convert.coreUnitsToCores(rs.getInt("folder_min_cores"))) - .setMaxGpus(rs.getInt("folder_max_gpus")).setMinGpus(rs.getInt("folder_min_gpus")) - .setLevel(rs.getInt("int_level")).setDepartment(rs.getString("dept_name")).build(); - - String parentGroupId = rs.getString("pk_parent_folder"); - if (parentGroupId != null) { - ChildrenEntry childrenEntry = childrenMap.get(parentGroupId); - if (childrenEntry == null) { - childrenEntry = - new ChildrenEntry(parentGroupId, group.getLevel() - 1, rs.getString("group_name")); - childrenEntry.addChild(groupId); - childrenMap.put(parentGroupId, childrenEntry); - } else { - childrenEntry.addChild(groupId); - } - } else { - rootGroupID = rs.getString("pk_folder"); + public String getName() { + return name; } - groups.put(groupId, group); - } else { - group = groups.get(groupId); - } - if (rs.getString("pk_job") != null) { - GroupStats oldStats = group.getStats(); - JobStats jobStats = WhiteboardDaoJdbc.mapJobStats(rs); - GroupStats groupStats = GroupStats.newBuilder() - .setDeadFrames(oldStats.getDeadFrames() + jobStats.getDeadFrames()) - .setRunningFrames(oldStats.getRunningFrames() + jobStats.getRunningFrames()) - .setWaitingFrames(oldStats.getWaitingFrames() + jobStats.getWaitingFrames()) - .setDependFrames(oldStats.getDependFrames() + jobStats.getDependFrames()) - .setReservedCores(oldStats.getReservedCores() + jobStats.getReservedCores()) - .setPendingJobs(oldStats.getPendingJobs() + 1).build(); - - group = group.toBuilder().setStats(groupStats).addJobs(rs.getString("pk_job")).build(); - groups.put(groupId, group); - } - return group; - } - } - - private NestedJobWhiteboardMapper updateConnections(NestedJobWhiteboardMapper mapper) { - ArrayList orderedChildren = new ArrayList<>(mapper.childrenMap.values()); - orderedChildren.sort(ChildrenEntry::compareTo); - - for (ChildrenEntry entry : orderedChildren) { - NestedGroup group = mapper.groups.get(entry.getKey()); - NestedGroupSeq.Builder childrenBuilder = NestedGroupSeq.newBuilder(); - for (String childId : entry.getChildren()) { - NestedGroup child = mapper.groups.get(childId); - child = child.toBuilder().setParent(group).build(); - childrenBuilder.addNestedGroups(child); - mapper.groups.put(childId, child); - } - group = group.toBuilder().setGroups(childrenBuilder.build()).build(); - mapper.groups.put(entry.getKey(), group); - } - return mapper; - } - public NestedGroup getJobWhiteboard(ShowInterface show) { + public int compareTo(ChildrenEntry o) { + // Invert order + return Integer.compare(o.level, this.level); + } - CachedJobWhiteboardMapper cachedMapper = jobCache.get(show.getShowId()); - if (cachedMapper != null) { - if (System.currentTimeMillis() - cachedMapper.time < CACHE_TIMEOUT) { - return cachedMapper.mapper.groups.get(cachedMapper.mapper.rootGroupID); - } + @Override + public String toString() { + StringBuilder out = new StringBuilder(); + String spacing = " ".repeat(Math.max(0, this.level + 1)); + out.append(spacing); + out.append(key + "(c " + name + ")"); + for (String id : children) { + out.append("\n " + spacing + id.substring(0, 4)); + } + return out.toString(); + } } - NestedJobWhiteboardMapper mapper = new NestedJobWhiteboardMapper(); - getJdbcTemplate().query( - GET_NESTED_GROUPS + " AND show.pk_show=? ORDER BY folder_level.int_level ASC", mapper, - show.getShowId()); - - mapper = updateConnections(mapper); - jobCache.put(show.getShowId(), new CachedJobWhiteboardMapper(mapper)); - return mapper.groups.get(mapper.rootGroupID); - } - - private static final NestedJob mapResultSetToJob(ResultSet rs) throws SQLException { - - NestedJob.Builder jobBuilder = NestedJob.newBuilder().setId(rs.getString("pk_job")) - .setLogDir(rs.getString("str_log_dir")) - .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_max_cores"))) - .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) - .setMaxGpus(rs.getInt("int_max_cores")).setMinGpus(rs.getInt("int_min_cores")) - .setName(rs.getString("str_name")).setPriority(rs.getInt("int_priority")) - .setShot(rs.getString("str_shot")).setShow(rs.getString("str_show")) - .setOs(rs.getString("str_os")).setFacility(rs.getString("facility_name")) - .setGroup(rs.getString("group_name")).setState(JobState.valueOf(rs.getString("str_state"))) - .setUser(rs.getString("str_user")).setIsPaused(rs.getBoolean("b_paused")) - .setHasComment(rs.getBoolean("b_comment")).setAutoEat(rs.getBoolean("b_autoeat")) - .setStartTime((int) (rs.getTimestamp("ts_started").getTime() / 1000)) - .setStats(WhiteboardDaoJdbc.mapJobStats(rs)); - - int uid = rs.getInt("int_uid"); - if (!rs.wasNull()) { - jobBuilder.setUid(uid); + class NestedJobWhiteboardMapper implements RowMapper { + public Map groups = new HashMap(50); + public Map childrenMap = new HashMap(); + public String rootGroupID; + + @Override + public NestedGroup mapRow(ResultSet rs, int rowNum) throws SQLException { + String groupId = rs.getString("pk_folder"); + NestedGroup group; + if (!groups.containsKey(groupId)) { + group = NestedGroup.newBuilder().setId(rs.getString("pk_folder")) + .setName(rs.getString("group_name")) + .setDefaultJobPriority(rs.getInt("int_def_job_priority")) + .setDefaultJobMinCores( + Convert.coreUnitsToCores(rs.getInt("int_def_job_min_cores"))) + .setDefaultJobMaxCores( + Convert.coreUnitsToCores(rs.getInt("int_def_job_max_cores"))) + .setDefaultJobMinGpus(rs.getInt("int_def_job_min_gpus")) + .setDefaultJobMaxGpus(rs.getInt("int_def_job_max_gpus")) + .setMaxCores(Convert.coreUnitsToCores(rs.getInt("folder_max_cores"))) + .setMinCores(Convert.coreUnitsToCores(rs.getInt("folder_min_cores"))) + .setMaxGpus(rs.getInt("folder_max_gpus")) + .setMinGpus(rs.getInt("folder_min_gpus")).setLevel(rs.getInt("int_level")) + .setDepartment(rs.getString("dept_name")).build(); + + String parentGroupId = rs.getString("pk_parent_folder"); + if (parentGroupId != null) { + ChildrenEntry childrenEntry = childrenMap.get(parentGroupId); + if (childrenEntry == null) { + childrenEntry = new ChildrenEntry(parentGroupId, group.getLevel() - 1, + rs.getString("group_name")); + childrenEntry.addChild(groupId); + childrenMap.put(parentGroupId, childrenEntry); + } else { + childrenEntry.addChild(groupId); + } + } else { + rootGroupID = rs.getString("pk_folder"); + } + groups.put(groupId, group); + } else { + group = groups.get(groupId); + } + if (rs.getString("pk_job") != null) { + GroupStats oldStats = group.getStats(); + JobStats jobStats = WhiteboardDaoJdbc.mapJobStats(rs); + GroupStats groupStats = GroupStats.newBuilder() + .setDeadFrames(oldStats.getDeadFrames() + jobStats.getDeadFrames()) + .setRunningFrames(oldStats.getRunningFrames() + jobStats.getRunningFrames()) + .setWaitingFrames(oldStats.getWaitingFrames() + jobStats.getWaitingFrames()) + .setDependFrames(oldStats.getDependFrames() + jobStats.getDependFrames()) + .setReservedCores(oldStats.getReservedCores() + jobStats.getReservedCores()) + .setPendingJobs(oldStats.getPendingJobs() + 1).build(); + + group = group.toBuilder().setStats(groupStats).addJobs(rs.getString("pk_job")) + .build(); + groups.put(groupId, group); + } + return group; + } } - Timestamp ts = rs.getTimestamp("ts_stopped"); - if (ts != null) { - jobBuilder.setStopTime((int) (ts.getTime() / 1000)); - } else { - jobBuilder.setStopTime(0); + private NestedJobWhiteboardMapper updateConnections(NestedJobWhiteboardMapper mapper) { + ArrayList orderedChildren = new ArrayList<>(mapper.childrenMap.values()); + orderedChildren.sort(ChildrenEntry::compareTo); + + for (ChildrenEntry entry : orderedChildren) { + NestedGroup group = mapper.groups.get(entry.getKey()); + NestedGroupSeq.Builder childrenBuilder = NestedGroupSeq.newBuilder(); + for (String childId : entry.getChildren()) { + NestedGroup child = mapper.groups.get(childId); + child = child.toBuilder().setParent(group).build(); + childrenBuilder.addNestedGroups(child); + mapper.groups.put(childId, child); + } + group = group.toBuilder().setGroups(childrenBuilder.build()).build(); + mapper.groups.put(entry.getKey(), group); + } + return mapper; } - return jobBuilder.build(); - } - - private static final String GET_HOSTS = "SELECT " + "alloc.str_name AS alloc_name, " - + "host.pk_host, " + "host.str_name AS host_name, " + "host_stat.str_state AS host_state, " - + "host.b_nimby, " + "host_stat.ts_booted, " + "host_stat.ts_ping, " + "host.int_cores, " - + "host.int_cores_idle, " + "host.int_gpus, " + "host.int_gpus_idle, " + "host.int_gpu_mem, " - + "host.int_gpu_mem_idle, " + "host.int_mem, " + "host.int_mem_idle, " - + "host.str_lock_state, " + "host.str_tags, " + "host.b_comment, " + "host.int_thread_mode, " - + "host_stat.str_os, " + "host_stat.int_mem_total, " + "host_stat.int_mem_free, " - + "host_stat.int_swap_total, " + "host_stat.int_swap_free, " + "host_stat.int_mcp_total, " - + "host_stat.int_mcp_free, " + "host_stat.int_gpu_mem_total, " - + "host_stat.int_gpu_mem_free, " + "host_stat.int_load, " + "proc.pk_proc, " - + "proc.int_cores_reserved AS proc_cores, " + "proc.int_gpus_reserved AS proc_gpus, " - + "proc.int_mem_reserved AS proc_memory, " + "proc.int_mem_used AS used_memory, " - + "proc.int_mem_max_used AS max_memory, " + "proc.int_gpu_mem_reserved AS proc_gpu_memory, " - + "proc.ts_ping, " + "proc.ts_booked, " + "proc.ts_dispatched, " + "proc.b_unbooked, " - + "redirect.str_name AS str_redirect, " + "job.str_name AS job_name, " + "job.str_log_dir, " - + "show.str_name AS show_name, " + "frame.str_name AS frame_name " + "FROM " + "alloc, " - + "host_stat, " + "host " + "LEFT JOIN " + "proc " + "ON " + "(proc.pk_host = host.pk_host) " - + "LEFT JOIN " + "frame " + "ON " + "(proc.pk_frame = frame.pk_frame) " + "LEFT JOIN " - + "job " + "ON " + "(proc.pk_job = job.pk_job) " + "LEFT JOIN " + "show " + "ON " - + "(proc.pk_show = show.pk_show) " + "LEFT JOIN " + "redirect " + "ON " - + "(proc.pk_proc = redirect.pk_proc) " + "WHERE " + "host.pk_alloc = alloc.pk_alloc " + "AND " - + "host.pk_host = host_stat.pk_host "; - - /** - * Caches a the host whiteboard. This class is not thread safe so you have to synchronize calls to - * the "cache" method on your own. - */ - class CachedHostWhiteboard { - /** - * Number of seconds till the cache expires - */ - private static final int CACHE_EXPIRE_TIME_MS = 10000; + public NestedGroup getJobWhiteboard(ShowInterface show) { - /** - * The host whiteboard we're caching - */ - private NestedHostSeq hostWhiteboard; + CachedJobWhiteboardMapper cachedMapper = jobCache.get(show.getShowId()); + if (cachedMapper != null) { + if (System.currentTimeMillis() - cachedMapper.time < CACHE_TIMEOUT) { + return cachedMapper.mapper.groups.get(cachedMapper.mapper.rootGroupID); + } + } - /** - * The time in which the cache expires. - */ - private long expireTime = 0l; + NestedJobWhiteboardMapper mapper = new NestedJobWhiteboardMapper(); + getJdbcTemplate().query( + GET_NESTED_GROUPS + " AND show.pk_show=? ORDER BY folder_level.int_level ASC", + mapper, show.getShowId()); - public void cache(List hostWhiteboard) { - this.hostWhiteboard = NestedHostSeq.newBuilder().addAllNestedHosts(hostWhiteboard).build(); - expireTime = System.currentTimeMillis() + CACHE_EXPIRE_TIME_MS; + mapper = updateConnections(mapper); + jobCache.put(show.getShowId(), new CachedJobWhiteboardMapper(mapper)); + return mapper.groups.get(mapper.rootGroupID); } - public NestedHostSeq get() { - return hostWhiteboard; - } + private static final NestedJob mapResultSetToJob(ResultSet rs) throws SQLException { + + NestedJob.Builder jobBuilder = NestedJob.newBuilder().setId(rs.getString("pk_job")) + .setLogDir(rs.getString("str_log_dir")) + .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_max_cores"))) + .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) + .setMaxGpus(rs.getInt("int_max_cores")).setMinGpus(rs.getInt("int_min_cores")) + .setName(rs.getString("str_name")).setPriority(rs.getInt("int_priority")) + .setShot(rs.getString("str_shot")).setShow(rs.getString("str_show")) + .setOs(rs.getString("str_os")).setFacility(rs.getString("facility_name")) + .setGroup(rs.getString("group_name")) + .setState(JobState.valueOf(rs.getString("str_state"))) + .setUser(rs.getString("str_user")).setIsPaused(rs.getBoolean("b_paused")) + .setHasComment(rs.getBoolean("b_comment")).setAutoEat(rs.getBoolean("b_autoeat")) + .setStartTime((int) (rs.getTimestamp("ts_started").getTime() / 1000)) + .setStats(WhiteboardDaoJdbc.mapJobStats(rs)); + + int uid = rs.getInt("int_uid"); + if (!rs.wasNull()) { + jobBuilder.setUid(uid); + } - public boolean isExpired() { - return System.currentTimeMillis() > expireTime; + Timestamp ts = rs.getTimestamp("ts_stopped"); + if (ts != null) { + jobBuilder.setStopTime((int) (ts.getTime() / 1000)); + } else { + jobBuilder.setStopTime(0); + } + return jobBuilder.build(); } - } - /** - * The CachedHostWhiteboard holds onto the result of the last host whiteboard query for about 10 - * seconds, returning the same result to all subsequent requests. - */ - private final CachedHostWhiteboard cachedHostWhiteboard = new CachedHostWhiteboard(); + private static final String GET_HOSTS = "SELECT " + "alloc.str_name AS alloc_name, " + + "host.pk_host, " + "host.str_name AS host_name, " + + "host_stat.str_state AS host_state, " + "host.b_nimby, " + "host_stat.ts_booted, " + + "host_stat.ts_ping, " + "host.int_cores, " + "host.int_cores_idle, " + + "host.int_gpus, " + "host.int_gpus_idle, " + "host.int_gpu_mem, " + + "host.int_gpu_mem_idle, " + "host.int_mem, " + "host.int_mem_idle, " + + "host.str_lock_state, " + "host.str_tags, " + "host.b_comment, " + + "host.int_thread_mode, " + "host_stat.str_os, " + "host_stat.int_mem_total, " + + "host_stat.int_mem_free, " + "host_stat.int_swap_total, " + + "host_stat.int_swap_free, " + "host_stat.int_mcp_total, " + "host_stat.int_mcp_free, " + + "host_stat.int_gpu_mem_total, " + "host_stat.int_gpu_mem_free, " + + "host_stat.int_load, " + "proc.pk_proc, " + "proc.int_cores_reserved AS proc_cores, " + + "proc.int_gpus_reserved AS proc_gpus, " + "proc.int_mem_reserved AS proc_memory, " + + "proc.int_mem_used AS used_memory, " + "proc.int_mem_max_used AS max_memory, " + + "proc.int_gpu_mem_reserved AS proc_gpu_memory, " + "proc.ts_ping, " + + "proc.ts_booked, " + "proc.ts_dispatched, " + "proc.b_unbooked, " + + "redirect.str_name AS str_redirect, " + "job.str_name AS job_name, " + + "job.str_log_dir, " + "show.str_name AS show_name, " + "frame.str_name AS frame_name " + + "FROM " + "alloc, " + "host_stat, " + "host " + "LEFT JOIN " + "proc " + "ON " + + "(proc.pk_host = host.pk_host) " + "LEFT JOIN " + "frame " + "ON " + + "(proc.pk_frame = frame.pk_frame) " + "LEFT JOIN " + "job " + "ON " + + "(proc.pk_job = job.pk_job) " + "LEFT JOIN " + "show " + "ON " + + "(proc.pk_show = show.pk_show) " + "LEFT JOIN " + "redirect " + "ON " + + "(proc.pk_proc = redirect.pk_proc) " + "WHERE " + "host.pk_alloc = alloc.pk_alloc " + + "AND " + "host.pk_host = host_stat.pk_host "; + + /** + * Caches a the host whiteboard. This class is not thread safe so you have to synchronize calls + * to the "cache" method on your own. + */ + class CachedHostWhiteboard { + + /** + * Number of seconds till the cache expires + */ + private static final int CACHE_EXPIRE_TIME_MS = 10000; + + /** + * The host whiteboard we're caching + */ + private NestedHostSeq hostWhiteboard; + + /** + * The time in which the cache expires. + */ + private long expireTime = 0l; + + public void cache(List hostWhiteboard) { + this.hostWhiteboard = + NestedHostSeq.newBuilder().addAllNestedHosts(hostWhiteboard).build(); + expireTime = System.currentTimeMillis() + CACHE_EXPIRE_TIME_MS; + } - public NestedHostSeq getHostWhiteboard() { + public NestedHostSeq get() { + return hostWhiteboard; + } - if (!cachedHostWhiteboard.isExpired()) { - return cachedHostWhiteboard.get(); + public boolean isExpired() { + return System.currentTimeMillis() > expireTime; + } } - /* - * Ensures only 1 thread is doing the query, other threads will wait and then return the result - * of the thead that actually did the query. + /** + * The CachedHostWhiteboard holds onto the result of the last host whiteboard query for about 10 + * seconds, returning the same result to all subsequent requests. */ - synchronized (cachedHostWhiteboard) { + private final CachedHostWhiteboard cachedHostWhiteboard = new CachedHostWhiteboard(); - if (!cachedHostWhiteboard.isExpired()) { - return cachedHostWhiteboard.get(); - } - - final List result = new ArrayList(3000); - final Map hosts = new HashMap(3000); - final Map procs = new HashMap(8000); - - getJdbcTemplate().query(GET_HOSTS, new RowMapper() { - - public NestedHost mapRow(ResultSet rs, int row) throws SQLException { - NestedHost host; - String hid = rs.getString("pk_host"); - if (!hosts.containsKey(hid)) { - host = WhiteboardDaoJdbc.mapNestedHostBuilder(rs).build(); - hosts.put(hid, host); - result.add(host); - } else { - host = hosts.get(hid); - } - - String pid = rs.getString("pk_proc"); - if (pid != null) { - NestedProc proc; - if (!procs.containsKey(pid)) { - proc = NestedProc.newBuilder().setId(pid) - .setName(CueUtil.buildProcName(host.getName(), rs.getInt("proc_cores"), - rs.getInt("proc_gpus"))) - .setReservedCores(Convert.coreUnitsToCores(rs.getInt("proc_cores"))) - .setReservedGpus(rs.getInt("proc_gpus")) - .setReservedMemory(rs.getLong("proc_memory")) - .setReservedGpuMemory(rs.getLong("proc_gpu_memory")) - .setUsedMemory(rs.getLong("used_memory")).setFrameName(rs.getString("frame_name")) - .setJobName(rs.getString("job_name")).setShowName(rs.getString("show_name")) - .setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)) - .setBookedTime((int) (rs.getTimestamp("ts_booked").getTime() / 1000)) - .setDispatchTime((int) (rs.getTimestamp("ts_dispatched").getTime() / 1000)) - .setUnbooked(rs.getBoolean("b_unbooked")) - .setLogPath(String.format("%s/%s.%s.rqlog", rs.getString("str_log_dir"), - rs.getString("job_name"), rs.getString("frame_name"))) - .setRedirectTarget(rs.getString("str_redirect")).setParent(host).build(); - - host = host.toBuilder() - .setProcs(host.getProcs().toBuilder().addNestedProcs(proc).build()).build(); - procs.put(pid, proc); - } else { - proc = procs.get(pid); - } - } - return null; + public NestedHostSeq getHostWhiteboard() { + + if (!cachedHostWhiteboard.isExpired()) { + return cachedHostWhiteboard.get(); } - }); - cachedHostWhiteboard.cache(result); + /* + * Ensures only 1 thread is doing the query, other threads will wait and then return the + * result of the thead that actually did the query. + */ + synchronized (cachedHostWhiteboard) { + + if (!cachedHostWhiteboard.isExpired()) { + return cachedHostWhiteboard.get(); + } + + final List result = new ArrayList(3000); + final Map hosts = new HashMap(3000); + final Map procs = new HashMap(8000); + + getJdbcTemplate().query(GET_HOSTS, new RowMapper() { + + public NestedHost mapRow(ResultSet rs, int row) throws SQLException { + NestedHost host; + String hid = rs.getString("pk_host"); + if (!hosts.containsKey(hid)) { + host = WhiteboardDaoJdbc.mapNestedHostBuilder(rs).build(); + hosts.put(hid, host); + result.add(host); + } else { + host = hosts.get(hid); + } + + String pid = rs.getString("pk_proc"); + if (pid != null) { + NestedProc proc; + if (!procs.containsKey(pid)) { + proc = NestedProc.newBuilder().setId(pid) + .setName(CueUtil.buildProcName(host.getName(), + rs.getInt("proc_cores"), rs.getInt("proc_gpus"))) + .setReservedCores( + Convert.coreUnitsToCores(rs.getInt("proc_cores"))) + .setReservedGpus(rs.getInt("proc_gpus")) + .setReservedMemory(rs.getLong("proc_memory")) + .setReservedGpuMemory(rs.getLong("proc_gpu_memory")) + .setUsedMemory(rs.getLong("used_memory")) + .setFrameName(rs.getString("frame_name")) + .setJobName(rs.getString("job_name")) + .setShowName(rs.getString("show_name")) + .setPingTime( + (int) (rs.getTimestamp("ts_ping").getTime() / 1000)) + .setBookedTime( + (int) (rs.getTimestamp("ts_booked").getTime() / 1000)) + .setDispatchTime( + (int) (rs.getTimestamp("ts_dispatched").getTime() + / 1000)) + .setUnbooked(rs.getBoolean("b_unbooked")) + .setLogPath(String.format("%s/%s.%s.rqlog", + rs.getString("str_log_dir"), rs.getString("job_name"), + rs.getString("frame_name"))) + .setRedirectTarget(rs.getString("str_redirect")).setParent(host) + .build(); + + host = host.toBuilder().setProcs( + host.getProcs().toBuilder().addNestedProcs(proc).build()) + .build(); + procs.put(pid, proc); + } else { + proc = procs.get(pid); + } + } + return null; + } + }); + + cachedHostWhiteboard.cache(result); + } + return cachedHostWhiteboard.get(); } - return cachedHostWhiteboard.get(); - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/OwnerDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/OwnerDaoJdbc.java index d6a888362..1d5087510 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/OwnerDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/OwnerDaoJdbc.java @@ -31,65 +31,67 @@ public class OwnerDaoJdbc extends JdbcDaoSupport implements OwnerDao { - public static final RowMapper OWNER_MAPPER = new RowMapper() { - public OwnerEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - OwnerEntity o = new OwnerEntity(); - o.id = rs.getString("pk_owner"); - o.name = rs.getString("str_username"); - return o; + public static final RowMapper OWNER_MAPPER = new RowMapper() { + public OwnerEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + OwnerEntity o = new OwnerEntity(); + o.id = rs.getString("pk_owner"); + o.name = rs.getString("str_username"); + return o; + } + }; + + @Override + public boolean deleteOwner(Entity owner) { + return getJdbcTemplate().update("DELETE FROM owner WHERE pk_owner = ?", owner.getId()) > 0; } - }; - - @Override - public boolean deleteOwner(Entity owner) { - return getJdbcTemplate().update("DELETE FROM owner WHERE pk_owner = ?", owner.getId()) > 0; - } - - private static final String QUERY_FOR_OWNER = - "SELECT " + "owner.pk_owner," + "owner.str_username " + "FROM " + "owner "; - - @Override - public OwnerEntity findOwner(String name) { - try { - return getJdbcTemplate().queryForObject(QUERY_FOR_OWNER + " WHERE str_username = ?", - OWNER_MAPPER, name); - } catch (EmptyResultDataAccessException e) { - throw new EmptyResultDataAccessException("Failed to find owner: " + name, 1); + + private static final String QUERY_FOR_OWNER = + "SELECT " + "owner.pk_owner," + "owner.str_username " + "FROM " + "owner "; + + @Override + public OwnerEntity findOwner(String name) { + try { + return getJdbcTemplate().queryForObject(QUERY_FOR_OWNER + " WHERE str_username = ?", + OWNER_MAPPER, name); + } catch (EmptyResultDataAccessException e) { + throw new EmptyResultDataAccessException("Failed to find owner: " + name, 1); + } + } + + @Override + public OwnerEntity getOwner(String id) { + return getJdbcTemplate().queryForObject(QUERY_FOR_OWNER + " WHERE pk_owner = ?", + OWNER_MAPPER, id); + } + + @Override + public OwnerEntity getOwner(HostInterface host) { + return getJdbcTemplate() + .queryForObject( + QUERY_FOR_OWNER + "WHERE " + "pk_owner = (" + "SELECT " + "pk_owner " + + "FROM " + "deed " + "WHERE " + "pk_host = ?)", + OWNER_MAPPER, host.getHostId()); + } + + public boolean isOwner(OwnerEntity owner, HostInterface host) { + return getJdbcTemplate().queryForObject( + "SELECT COUNT(1) FROM host, deed" + + " WHERE host.pk_host = deed.pk_host AND deed.pk_owner=?", + Integer.class, owner.getId()) > 0; + } + + private static final String INSERT_OWNER = "INSERT INTO " + "owner " + "(" + "pk_owner," + + "pk_show," + "str_username " + ") " + "VALUES (?,?,?)"; + + @Override + public void insertOwner(OwnerEntity owner, ShowInterface show) { + owner.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_OWNER, owner.id, show.getShowId(), owner.name); + } + + @Override + public void updateShow(Entity owner, ShowInterface show) { + getJdbcTemplate().update("UPDATE owner SET pk_show = ? WHERE pk_owner = ?", + show.getShowId(), owner.getId()); } - } - - @Override - public OwnerEntity getOwner(String id) { - return getJdbcTemplate().queryForObject(QUERY_FOR_OWNER + " WHERE pk_owner = ?", OWNER_MAPPER, - id); - } - - @Override - public OwnerEntity getOwner(HostInterface host) { - return getJdbcTemplate().queryForObject(QUERY_FOR_OWNER + "WHERE " + "pk_owner = (" + "SELECT " - + "pk_owner " + "FROM " + "deed " + "WHERE " + "pk_host = ?)", OWNER_MAPPER, - host.getHostId()); - } - - public boolean isOwner(OwnerEntity owner, HostInterface host) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM host, deed" - + " WHERE host.pk_host = deed.pk_host AND deed.pk_owner=?", - Integer.class, owner.getId()) > 0; - } - - private static final String INSERT_OWNER = "INSERT INTO " + "owner " + "(" + "pk_owner," - + "pk_show," + "str_username " + ") " + "VALUES (?,?,?)"; - - @Override - public void insertOwner(OwnerEntity owner, ShowInterface show) { - owner.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_OWNER, owner.id, show.getShowId(), owner.name); - } - - @Override - public void updateShow(Entity owner, ShowInterface show) { - getJdbcTemplate().update("UPDATE owner SET pk_show = ? WHERE pk_owner = ?", show.getShowId(), - owner.getId()); - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/PointDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/PointDaoJdbc.java index d37b86cbc..0493c7448 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/PointDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/PointDaoJdbc.java @@ -32,121 +32,125 @@ public class PointDaoJdbc extends JdbcDaoSupport implements PointDao { - @Override - public void insertPointConf(PointDetail t) { - t.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update("INSERT INTO point (pk_point, pk_show, pk_dept) VALUES (?,?,?)", t.id, - t.getShowId(), t.getDepartmentId()); - } - - @Override - public boolean isManaged(ShowInterface show, DepartmentInterface dept) { - try { - return getJdbcTemplate().queryForObject( - "SELECT b_managed FROM point WHERE pk_show=? and pk_dept=?", Integer.class, - show.getShowId(), dept.getDepartmentId()) == 1; - } catch (org.springframework.dao.DataRetrievalFailureException e) { - return false; + @Override + public void insertPointConf(PointDetail t) { + t.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update("INSERT INTO point (pk_point, pk_show, pk_dept) VALUES (?,?,?)", + t.id, t.getShowId(), t.getDepartmentId()); } - } - - @Override - public PointDetail insertPointConf(ShowInterface show, DepartmentInterface dept) { - PointDetail r = new PointDetail(); - r.deptId = dept.getId(); - r.showId = show.getShowId(); - r.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update("INSERT INTO point (pk_point, pk_show, pk_dept) VALUES (?,?,?)", r.id, - r.getShowId(), r.getDepartmentId()); - return r; - } - - @Override - public boolean pointConfExists(ShowInterface show, DepartmentInterface dept) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM point WHERE pk_show=? AND pk_dept=?", Integer.class, show.getShowId(), - dept.getDepartmentId()) > 0; - } - - private static final String UPDATE_TI_MANAGED = "UPDATE " + "point " + "SET " - + "b_managed = true," + "str_ti_task=?, " + "int_min_cores=? " + "WHERE " + "pk_point=?"; - - @Override - public void updateEnableManaged(PointInterface p, String task, int coreUnits) { - getJdbcTemplate().update(UPDATE_TI_MANAGED, task, coreUnits, p.getPointId()); - } - - private static final String UPDATE_DISABLE_TI_MANAGED = "UPDATE " + "point " + "SET " - + "b_managed = false," + "str_ti_task=null, " + "int_min_cores=0 " + "WHERE " + "pk_point=?"; - - @Override - public void updateDisableManaged(PointInterface p) { - getJdbcTemplate().update(UPDATE_DISABLE_TI_MANAGED, p.getPointId()); - } - - private static final RowMapper DEPARTMENT_CONFIG_DETAIL_MAPPER = - new RowMapper() { - public PointDetail mapRow(ResultSet rs, int rowNum) throws SQLException { - PointDetail rpd = new PointDetail(); - rpd.deptId = rs.getString("pk_dept"); - rpd.showId = rs.getString("pk_show"); - rpd.id = rs.getString("pk_point"); - rpd.cores = rs.getInt("int_min_cores"); - rpd.tiTask = rs.getString("str_ti_task"); - return rpd; + + @Override + public boolean isManaged(ShowInterface show, DepartmentInterface dept) { + try { + return getJdbcTemplate().queryForObject( + "SELECT b_managed FROM point WHERE pk_show=? and pk_dept=?", Integer.class, + show.getShowId(), dept.getDepartmentId()) == 1; + } catch (org.springframework.dao.DataRetrievalFailureException e) { + return false; } - }; - - private static final String GET_DEPARTMENT_CONFIG_DETAIL = - "SELECT " + "pk_point," + "pk_dept," + "pk_show," + "str_ti_task," + "int_min_cores " - + "FROM " + "point " + "WHERE " + "pk_point = ?"; - - @Override - public PointDetail getPointConfDetail(String id) { - return getJdbcTemplate().queryForObject(GET_DEPARTMENT_CONFIG_DETAIL, - DEPARTMENT_CONFIG_DETAIL_MAPPER, id); - } - - private static final String GET_POINT_CONFIG_DETAIL_BY_SHOW_DEPT = - "SELECT " + "pk_point," + "pk_dept," + "pk_show," + "str_ti_task," + "int_min_cores, " - + "b_managed " + "FROM " + "point " + "WHERE " + "pk_show = ? " + "AND " + "pk_dept = ? "; - - @Override - public PointDetail getPointConfigDetail(ShowInterface show, DepartmentInterface dept) { - return getJdbcTemplate().queryForObject(GET_POINT_CONFIG_DETAIL_BY_SHOW_DEPT, - DEPARTMENT_CONFIG_DETAIL_MAPPER, show.getShowId(), dept.getDepartmentId()); - } - - private static final String UPDATE_TI_MANAGED_CORES = - "UPDATE " + "point " + "SET " + "int_min_cores=? " + "WHERE " + "pk_point=?"; - - @Override - public void updateManagedCores(PointInterface cdept, int cores) { - getJdbcTemplate().update(UPDATE_TI_MANAGED_CORES, cores, cdept.getPointId()); - - } - - private static final String GET_MANAGED_POINT_CONFS = - "SELECT " + "pk_point," + "pk_dept," + "pk_show," + "str_ti_task," + "int_min_cores, " - + "b_managed " + "FROM " + "point " + "WHERE " + "b_managed = true "; - - @Override - public List getManagedPointConfs() { - return getJdbcTemplate().query(GET_MANAGED_POINT_CONFS, DEPARTMENT_CONFIG_DETAIL_MAPPER); - } - - @Override - public void updatePointConfUpdateTime(PointInterface t) { - getJdbcTemplate().update("UPDATE point SET ts_updated=current_timestamp WHERE pk_point=?", - t.getPointId()); - } - - private static final String IS_OVER_MIN_CORES = "SELECT " + "COUNT(1) " + "FROM " + "job," - + "point p " + "WHERE " + "job.pk_show = p.pk_show " + "AND " + "job.pk_dept = p.pk_dept " - + "AND " + "p.int_cores > p.int_min_cores " + "AND " + "job.pk_job = ?"; - - @Override - public boolean isOverMinCores(JobInterface job) { - return getJdbcTemplate().queryForObject(IS_OVER_MIN_CORES, Integer.class, job.getJobId()) > 0; - } + } + + @Override + public PointDetail insertPointConf(ShowInterface show, DepartmentInterface dept) { + PointDetail r = new PointDetail(); + r.deptId = dept.getId(); + r.showId = show.getShowId(); + r.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update("INSERT INTO point (pk_point, pk_show, pk_dept) VALUES (?,?,?)", + r.id, r.getShowId(), r.getDepartmentId()); + return r; + } + + @Override + public boolean pointConfExists(ShowInterface show, DepartmentInterface dept) { + return getJdbcTemplate().queryForObject( + "SELECT COUNT(1) FROM point WHERE pk_show=? AND pk_dept=?", Integer.class, + show.getShowId(), dept.getDepartmentId()) > 0; + } + + private static final String UPDATE_TI_MANAGED = + "UPDATE " + "point " + "SET " + "b_managed = true," + "str_ti_task=?, " + + "int_min_cores=? " + "WHERE " + "pk_point=?"; + + @Override + public void updateEnableManaged(PointInterface p, String task, int coreUnits) { + getJdbcTemplate().update(UPDATE_TI_MANAGED, task, coreUnits, p.getPointId()); + } + + private static final String UPDATE_DISABLE_TI_MANAGED = + "UPDATE " + "point " + "SET " + "b_managed = false," + "str_ti_task=null, " + + "int_min_cores=0 " + "WHERE " + "pk_point=?"; + + @Override + public void updateDisableManaged(PointInterface p) { + getJdbcTemplate().update(UPDATE_DISABLE_TI_MANAGED, p.getPointId()); + } + + private static final RowMapper DEPARTMENT_CONFIG_DETAIL_MAPPER = + new RowMapper() { + public PointDetail mapRow(ResultSet rs, int rowNum) throws SQLException { + PointDetail rpd = new PointDetail(); + rpd.deptId = rs.getString("pk_dept"); + rpd.showId = rs.getString("pk_show"); + rpd.id = rs.getString("pk_point"); + rpd.cores = rs.getInt("int_min_cores"); + rpd.tiTask = rs.getString("str_ti_task"); + return rpd; + } + }; + + private static final String GET_DEPARTMENT_CONFIG_DETAIL = + "SELECT " + "pk_point," + "pk_dept," + "pk_show," + "str_ti_task," + "int_min_cores " + + "FROM " + "point " + "WHERE " + "pk_point = ?"; + + @Override + public PointDetail getPointConfDetail(String id) { + return getJdbcTemplate().queryForObject(GET_DEPARTMENT_CONFIG_DETAIL, + DEPARTMENT_CONFIG_DETAIL_MAPPER, id); + } + + private static final String GET_POINT_CONFIG_DETAIL_BY_SHOW_DEPT = "SELECT " + "pk_point," + + "pk_dept," + "pk_show," + "str_ti_task," + "int_min_cores, " + "b_managed " + "FROM " + + "point " + "WHERE " + "pk_show = ? " + "AND " + "pk_dept = ? "; + + @Override + public PointDetail getPointConfigDetail(ShowInterface show, DepartmentInterface dept) { + return getJdbcTemplate().queryForObject(GET_POINT_CONFIG_DETAIL_BY_SHOW_DEPT, + DEPARTMENT_CONFIG_DETAIL_MAPPER, show.getShowId(), dept.getDepartmentId()); + } + + private static final String UPDATE_TI_MANAGED_CORES = + "UPDATE " + "point " + "SET " + "int_min_cores=? " + "WHERE " + "pk_point=?"; + + @Override + public void updateManagedCores(PointInterface cdept, int cores) { + getJdbcTemplate().update(UPDATE_TI_MANAGED_CORES, cores, cdept.getPointId()); + + } + + private static final String GET_MANAGED_POINT_CONFS = + "SELECT " + "pk_point," + "pk_dept," + "pk_show," + "str_ti_task," + "int_min_cores, " + + "b_managed " + "FROM " + "point " + "WHERE " + "b_managed = true "; + + @Override + public List getManagedPointConfs() { + return getJdbcTemplate().query(GET_MANAGED_POINT_CONFS, DEPARTMENT_CONFIG_DETAIL_MAPPER); + } + + @Override + public void updatePointConfUpdateTime(PointInterface t) { + getJdbcTemplate().update("UPDATE point SET ts_updated=current_timestamp WHERE pk_point=?", + t.getPointId()); + } + + private static final String IS_OVER_MIN_CORES = + "SELECT " + "COUNT(1) " + "FROM " + "job," + "point p " + "WHERE " + + "job.pk_show = p.pk_show " + "AND " + "job.pk_dept = p.pk_dept " + "AND " + + "p.int_cores > p.int_min_cores " + "AND " + "job.pk_job = ?"; + + @Override + public boolean isOverMinCores(JobInterface job) { + return getJdbcTemplate().queryForObject(IS_OVER_MIN_CORES, Integer.class, + job.getJobId()) > 0; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ProcDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ProcDaoJdbc.java index 364791313..a35f75c7b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ProcDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ProcDaoJdbc.java @@ -49,591 +49,616 @@ public class ProcDaoJdbc extends JdbcDaoSupport implements ProcDao { - @Autowired - private Environment env; - - private static final String VERIFY_RUNNING_PROC = "SELECT " + "proc.pk_frame " + "FROM " - + "proc, " + "job " + "WHERE " + "proc.pk_job = job.pk_job " + "AND " - + "job.str_state = 'PENDING' " + "AND " + "proc.pk_proc= ? "; - - public boolean verifyRunningProc(String procId, String frameId) { - try { - String pk_frame = getJdbcTemplate().queryForObject(VERIFY_RUNNING_PROC, String.class, procId); - if (pk_frame != null) { - return pk_frame.equals(frameId); - } else { + @Autowired + private Environment env; + + private static final String VERIFY_RUNNING_PROC = "SELECT " + "proc.pk_frame " + "FROM " + + "proc, " + "job " + "WHERE " + "proc.pk_job = job.pk_job " + "AND " + + "job.str_state = 'PENDING' " + "AND " + "proc.pk_proc= ? "; + + public boolean verifyRunningProc(String procId, String frameId) { + try { + String pk_frame = + getJdbcTemplate().queryForObject(VERIFY_RUNNING_PROC, String.class, procId); + if (pk_frame != null) { + return pk_frame.equals(frameId); + } else { + return false; + } + } catch (org.springframework.dao.EmptyResultDataAccessException e) { + // EAT + } return false; - } - } catch (org.springframework.dao.EmptyResultDataAccessException e) { - // EAT - } - return false; - } - - private static final String DELETE_VIRTUAL_PROC = - "DELETE FROM " + "proc " + "WHERE " + "pk_proc=?"; - - public boolean deleteVirtualProc(VirtualProc proc) { - if (getJdbcTemplate().update(DELETE_VIRTUAL_PROC, proc.getProcId()) == 0) { - logger.info("failed to delete " + proc + " , proc does not exist."); - return false; - } - // update all of the resource counts. - procDestroyed(proc); - return true; - } - - private static final String INSERT_VIRTUAL_PROC = "INSERT INTO " + "proc " + "( " + "pk_proc, " - + "pk_host, " + "pk_show, " + "pk_layer," + "pk_job," + "pk_frame, " + "int_cores_reserved, " - + "int_mem_reserved, " + "int_mem_pre_reserved, " + "int_mem_used, " + "int_gpus_reserved, " - + "int_gpu_mem_reserved, " + "int_gpu_mem_pre_reserved, " + "int_gpu_mem_used, " + "b_local " - + ") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) "; - - public void insertVirtualProc(VirtualProc proc) { - proc.id = SqlUtil.genKeyRandom(); - long memReservedMin = env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); - long memGpuReservedMin = - env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_min", Long.class); - int result = 0; - try { - result = getJdbcTemplate().update(INSERT_VIRTUAL_PROC, proc.getProcId(), proc.getHostId(), - proc.getShowId(), proc.getLayerId(), proc.getJobId(), proc.getFrameId(), - proc.coresReserved, proc.memoryReserved, proc.memoryReserved, memReservedMin, - proc.gpusReserved, proc.gpuMemoryReserved, proc.gpuMemoryReserved, memGpuReservedMin, - proc.isLocalDispatch); - - // Update all of the resource counts - procCreated(proc); - } catch (org.springframework.dao.DataIntegrityViolationException d) { - /* - * This means the frame is already running. If that is the case, don't delete it, just set - * pk_frame to null or the orphaned proc handler will catch it. - */ - throw new ResourceDuplicationFailureException( - "The frame " + proc.getFrameId() + " is already assigned to a proc."); - } catch (Exception e) { - String msg = - "unable to book proc " + proc.getName() + " on frame " + proc.getFrameId() + " , " + e; - throw new ResourceReservationFailureException(msg, e); - } - - if (result == 0) { - String msg = "unable to book proc " + proc.id + " the insert query succeeded but returned 0"; - throw new ResourceReservationFailureException(msg); - } - } - - private static final String UPDATE_VIRTUAL_PROC_ASSIGN = "UPDATE " + "proc " + "SET " - + "pk_show = ?, " + "pk_job = ?, " + "pk_layer = ?, " + "pk_frame = ?, " - + "int_mem_used = 0, " + "int_mem_max_used = 0, " + "int_virt_used = 0, " - + "int_virt_max_used = 0, " + "ts_dispatched = current_timestamp " + "WHERE " + "pk_proc = ?"; - - public void updateVirtualProcAssignment(VirtualProc proc) { - - int result = 0; - try { - result = getJdbcTemplate().update(UPDATE_VIRTUAL_PROC_ASSIGN, proc.getShowId(), - proc.getJobId(), proc.getLayerId(), proc.getFrameId(), proc.getProcId()); - } catch (org.springframework.dao.DataIntegrityViolationException d) { - throw new ResourceDuplicationFailureException( - "The frame " + proc.getFrameId() + " is already assigned to " + "the proc " + proc); - } catch (Exception e) { - String msg = "unable to book proc " + proc.id + ", " + e; - throw new ResourceReservationFailureException(msg, e); - } - - /* - * If the proc was not updated then it has disappeared. - */ - if (result == 0) { - String msg = "unable to book proc " + proc.id + ", the proc no longer exists,"; - throw new ResourceReservationFailureException(msg); - } - } - - private static final String CLEAR_VIRTUAL_PROC_ASSIGN = - "UPDATE " + "proc " + "SET " + "pk_frame = NULL " + "WHERE " + "pk_proc = ?"; - - public boolean clearVirtualProcAssignment(ProcInterface proc) { - return getJdbcTemplate().update(CLEAR_VIRTUAL_PROC_ASSIGN, proc.getId()) == 1; - } - - private static final String CLEAR_VIRTUAL_PROC_ASSIGN_BY_FRAME = - "UPDATE " + "proc " + "SET " + "pk_frame = NULL " + "WHERE " + "pk_frame = ?"; - - public boolean clearVirtualProcAssignment(FrameInterface frame) { - return getJdbcTemplate().update(CLEAR_VIRTUAL_PROC_ASSIGN_BY_FRAME, frame.getFrameId()) == 1; - } - - private static final String UPDATE_PROC_MEMORY_USAGE = - "UPDATE " + "proc " + "SET " + "int_mem_used = ?, " + "int_mem_max_used = ?," - + "int_virt_used = ?, " + "int_virt_max_used = ?, " + "int_gpu_mem_used = ?, " - + "int_gpu_mem_max_used = ?, " + "int_swap_used = ?, " + "bytea_children = ?, " - + "ts_ping = current_timestamp " + "WHERE " + "pk_frame = ?"; - - @Override - public void updateProcMemoryUsage(FrameInterface f, long rss, long maxRss, long vss, long maxVss, - long usedGpuMemory, long maxUsedGpuMemory, long usedSwapMemory, byte[] children) { - /* - * This method is going to repeat for a proc every 1 minute, so if the proc is being touched by - * another thread, then return quietly without updating memory usage. - * - * If another thread is accessing the proc record, that means the proc is probably being booked - * to another frame, which makes this update invalid anyway. - */ - try { - if (getJdbcTemplate().queryForObject("SELECT pk_frame FROM proc WHERE pk_frame=? FOR UPDATE", - String.class, f.getFrameId()).equals(f.getFrameId())) { - - getJdbcTemplate().update(new PreparedStatementCreator() { - @Override - public PreparedStatement createPreparedStatement(Connection conn) throws SQLException { - PreparedStatement updateProc = conn.prepareStatement(UPDATE_PROC_MEMORY_USAGE); - updateProc.setLong(1, rss); - updateProc.setLong(2, maxRss); - updateProc.setLong(3, vss); - updateProc.setLong(4, maxVss); - updateProc.setLong(5, usedGpuMemory); - updateProc.setLong(6, maxUsedGpuMemory); - updateProc.setLong(7, usedSwapMemory); - updateProc.setBytes(8, children); - updateProc.setString(9, f.getFrameId()); - return updateProc; - } - }); - } - } catch (DataAccessException dae) { - logger - .info("The proc for frame " + f + " could not be updated with new memory stats: " + dae); - } - } - - /** - * Maps a row to a VirtualProc object. - */ - public static final RowMapper VIRTUAL_PROC_MAPPER = new RowMapper() { - public VirtualProc mapRow(ResultSet rs, int rowNum) throws SQLException { - VirtualProc proc = new VirtualProc(); - proc.id = rs.getString("pk_proc"); - proc.hostId = rs.getString("pk_host"); - proc.showId = rs.getString("pk_show"); - proc.jobId = rs.getString("pk_job"); - proc.layerId = rs.getString("pk_layer"); - proc.frameId = rs.getString("pk_frame"); - proc.hostName = rs.getString("host_name"); - proc.allocationId = rs.getString("pk_alloc"); - proc.facilityId = rs.getString("pk_facility"); - proc.coresReserved = rs.getInt("int_cores_reserved"); - proc.memoryReserved = rs.getLong("int_mem_reserved"); - proc.memoryMax = rs.getLong("int_mem_max_used"); - proc.gpusReserved = rs.getInt("int_gpus_reserved"); - proc.gpuMemoryReserved = rs.getLong("int_gpu_mem_reserved"); - proc.gpuMemoryMax = rs.getLong("int_gpu_mem_max_used"); - proc.virtualMemoryMax = rs.getLong("int_virt_max_used"); - proc.virtualMemoryUsed = rs.getLong("int_virt_used"); - proc.memoryUsed = rs.getLong("int_mem_used"); - proc.unbooked = rs.getBoolean("b_unbooked"); - proc.isLocalDispatch = rs.getBoolean("b_local"); - proc.os = rs.getString("str_os"); - proc.childProcesses = rs.getBytes("bytea_children"); - return proc; - } - }; - - private static final String GET_VIRTUAL_PROC = "SELECT " + "proc.pk_proc," + "proc.pk_host," - + "proc.pk_show," + "proc.pk_job," + "proc.pk_layer," + "proc.pk_frame," + "proc.b_unbooked," - + "proc.b_local," + "host.pk_alloc, " + "alloc.pk_facility," + "proc.int_cores_reserved," - + "proc.int_mem_reserved," + "proc.int_mem_max_used," + "proc.int_mem_used," - + "proc.int_gpus_reserved," + "proc.int_gpu_mem_reserved," + "proc.int_gpu_mem_max_used," - + "proc.int_gpu_mem_used," + "proc.bytea_children," + "proc.int_virt_max_used," - + "proc.int_virt_used," + "host.str_name AS host_name, " - + "COALESCE(job.str_os, '') AS str_os " + "FROM " + "proc, " + "job, " + "host, " - + "host_stat, " + "alloc " + "WHERE " + "proc.pk_host = host.pk_host " + "AND " - + "host.pk_host = host_stat.pk_host " + "AND " + "host.pk_alloc = alloc.pk_alloc " + "AND " - + "job.pk_job = proc.pk_job "; - - public VirtualProc getVirtualProc(String id) { - return getJdbcTemplate().queryForObject(GET_VIRTUAL_PROC + " AND proc.pk_proc=? ", - VIRTUAL_PROC_MAPPER, id); - } - - public VirtualProc findVirtualProc(FrameInterface frame) { - return getJdbcTemplate().queryForObject(GET_VIRTUAL_PROC + " AND proc.pk_frame=? ", - VIRTUAL_PROC_MAPPER, frame.getFrameId()); - } - - private static final String GET_VIRTUAL_PROC_LIST = "SELECT " + "proc.*, " - + "host.str_name AS host_name, " + "host.pk_alloc, " + "COALESCE(job.str_os, '') AS str_os, " - + "alloc.pk_facility " + "FROM " + "proc, " + "frame, " + "host," + "host_stat, " + "alloc, " - + "layer," + "job, " + "folder, " + "show " + "WHERE " + "proc.pk_show = show.pk_show " - + "AND " + "proc.pk_host = host.pk_host " + "AND " + "host.pk_alloc = alloc.pk_alloc " - + "AND " + "host.pk_host = host_stat.pk_host " + "AND " + "proc.pk_job = job.pk_job " + "AND " - + "proc.pk_layer = layer.pk_layer " + "AND " + "proc.pk_frame = frame.pk_frame " + "AND " - + "job.pk_folder = folder.pk_folder "; - - public List findVirtualProcs(ProcSearchInterface r) { - return getJdbcTemplate().query(r.getFilteredQuery(GET_VIRTUAL_PROC_LIST), VIRTUAL_PROC_MAPPER, - r.getValuesArray()); - } - - @Override - public List findBookedVirtualProcs(ProcSearchInterface r) { - return getJdbcTemplate().query( - r.getFilteredQuery(GET_VIRTUAL_PROC_LIST + "AND proc.b_unbooked = false"), - VIRTUAL_PROC_MAPPER, r.getValuesArray()); - } - - public List findVirtualProcs(FrameSearchInterface r) { - return getJdbcTemplate().query(r.getFilteredQuery(GET_VIRTUAL_PROC_LIST), VIRTUAL_PROC_MAPPER, - r.getValuesArray()); - } - - public List findVirtualProcs(HostInterface host) { - return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND proc.pk_host=?", - VIRTUAL_PROC_MAPPER, host.getHostId()); - } - - public List findVirtualProcs(LayerInterface layer) { - return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND proc.pk_layer=?", - VIRTUAL_PROC_MAPPER, layer.getLayerId()); - } - - public List findVirtualProcs(JobInterface job) { - return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND proc.pk_job=?", - VIRTUAL_PROC_MAPPER, job.getJobId()); - } - - private static final String FIND_VIRTUAL_PROCS_LJA = GET_VIRTUAL_PROC_LIST + "AND proc.pk_job=( " - + "SELECT pk_job FROM host_local WHERE pk_host_local = ?) " + "AND proc.pk_host=(" - + "SELECT pk_host FROM host_local WHERE pk_host_local = ?) "; - - @Override - public List findVirtualProcs(LocalHostAssignment l) { - return getJdbcTemplate().query(FIND_VIRTUAL_PROCS_LJA, VIRTUAL_PROC_MAPPER, l.getId(), - l.getId()); - } - - public List findVirtualProcs(HardwareState state) { - return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND host_stat.str_state=?", - VIRTUAL_PROC_MAPPER, state.toString()); - } - - public void unbookVirtualProcs(List procs) { - List batchArgs = new ArrayList(procs.size()); - for (VirtualProc proc : procs) { - batchArgs.add(new Object[] {proc.id}); - } - - getJdbcTemplate().batchUpdate("UPDATE proc SET b_unbooked=true WHERE pk_proc=?", batchArgs); - } - - @Override - public boolean setUnbookState(ProcInterface proc, boolean unbooked) { - return getJdbcTemplate().update("UPDATE proc SET b_unbooked=? WHERE pk_proc=?", unbooked, - proc.getProcId()) == 1; - } - - @Override - public boolean setRedirectTarget(ProcInterface p, Redirect r) { - String name = null; - boolean unbooked = false; - if (r != null) { - name = r.getDestinationName(); - unbooked = true; - } - return getJdbcTemplate().update("UPDATE proc SET str_redirect=?, b_unbooked=? WHERE pk_proc=?", - name, unbooked, p.getProcId()) == 1; - } - - public void unbookProc(ProcInterface proc) { - getJdbcTemplate().update("UPDATE proc SET b_unbooked=true WHERE pk_proc=?", proc.getProcId()); - } - - public String getCurrentShowId(ProcInterface p) { - return getJdbcTemplate().queryForObject("SELECT pk_show FROM proc WHERE pk_proc=?", - String.class, p.getProcId()); - } - - public String getCurrentJobId(ProcInterface p) { - return getJdbcTemplate().queryForObject("SELECT pk_job FROM proc WHERE pk_proc=?", String.class, - p.getProcId()); - } - - public String getCurrentLayerId(ProcInterface p) { - return getJdbcTemplate().queryForObject("SELECT pk_layer FROM proc WHERE pk_proc=?", - String.class, p.getProcId()); - } - - public String getCurrentFrameId(ProcInterface p) { - return getJdbcTemplate().queryForObject("SELECT pk_frame FROM proc WHERE pk_proc=?", - String.class, p.getProcId()); - } - - private static final String ORPHANED_PROC_INTERVAL = "interval '300' second"; - private static final String GET_ORPHANED_PROC_LIST = "SELECT " + "proc.*, " - + "host.str_name AS host_name, " + "COALESCE(job.str_os, '') AS str_os, " + "host.pk_alloc, " - + "alloc.pk_facility " + "FROM " + "proc, " + "host, " + "host_stat," + "alloc, " + "job " - + "WHERE " + "proc.pk_host = host.pk_host " + "AND " + "host.pk_host = host_stat.pk_host " - + "AND " + "host.pk_alloc = alloc.pk_alloc " + "AND " + "job.pk_job = proc.pk_job " + "AND " - + "current_timestamp - proc.ts_ping > " + ORPHANED_PROC_INTERVAL; - - public List findOrphanedVirtualProcs() { - return getJdbcTemplate().query(GET_ORPHANED_PROC_LIST, VIRTUAL_PROC_MAPPER); - } - - public List findOrphanedVirtualProcs(int limit) { - return getJdbcTemplate().query(GET_ORPHANED_PROC_LIST + " LIMIT " + limit, VIRTUAL_PROC_MAPPER); - } - - private static final String IS_ORPHAN = - "SELECT " + "COUNT(1) " + "FROM " + "proc " + "WHERE " + "proc.pk_proc = ? " + "AND " - + "current_timestamp - proc.ts_ping > " + ORPHANED_PROC_INTERVAL; - - @Override - public boolean isOrphan(ProcInterface proc) { - return getJdbcTemplate().queryForObject(IS_ORPHAN, Integer.class, proc.getProcId()) == 1; - } - - public boolean increaseReservedMemory(ProcInterface p, long value) { - try { - return getJdbcTemplate().update( - "UPDATE proc SET int_mem_reserved=? WHERE pk_proc=? AND int_mem_reserved < ?", value, - p.getProcId(), value) == 1; - } catch (Exception e) { - // check by trigger erify_host_resources - throw new ResourceReservationFailureException( - "failed to increase memory reservation for proc " + p.getProcId() + " to " + value - + ", proc does not have that much memory to spare."); - } - } - - public long getReservedMemory(ProcInterface proc) { - return getJdbcTemplate().queryForObject("SELECT int_mem_reserved FROM proc WHERE pk_proc=?", - Long.class, proc.getProcId()); - } - - public long getReservedGpuMemory(ProcInterface proc) { - return getJdbcTemplate().queryForObject("SELECT int_gpu_mem_reserved FROM proc WHERE pk_proc=?", - Long.class, proc.getProcId()); - } - - private static final String FIND_UNDERUTILIZED_PROCS = - "SELECT " + "proc.pk_proc," + "proc.int_mem_reserved - layer_mem.int_max_rss AS free_mem " - + "FROM " + "proc," + "host, " + "layer_mem " + "WHERE " + "proc.pk_host = host.pk_host " - + "AND " + "proc.pk_layer = layer_mem.pk_layer " + "AND " + "layer_mem.int_max_rss > 0 " - + "AND " + "host.pk_host = ? " + "AND " + "proc.pk_proc != ? " + "AND " - + "proc.int_mem_reserved - layer_mem.int_max_rss > 0"; - - public boolean balanceUnderUtilizedProcs(ProcInterface targetProc, long targetMem) { - - List> result = getJdbcTemplate().queryForList(FIND_UNDERUTILIZED_PROCS, - targetProc.getHostId(), targetProc.getProcId()); - - if (result.size() == 0) { - logger.info("unable to find under utilized procs on host " + targetProc.getName()); - return false; - } - - final Map borrowMap = new HashMap(result.size()); - for (Map map : result) { - logger.info("creating borrow map for: " + (String) map.get("pk_proc")); - borrowMap.put((String) map.get("pk_proc"), 0l); - } - - long memBorrowedTotal = 0l; - int pass = 0; - int maxPasses = 3; - - while (true) { - // the amount of memory we're going to borrow per frame/proc - long memPerFrame = ((targetMem - memBorrowedTotal) / result.size()) + 1; - - // loop through all of our other running frames and try to borrow - // a little bit of memory from each one. - for (Map map : result) { - String pk_proc = (String) map.get("pk_proc"); - Long free_mem = (Long) map.get("free_mem"); + } + + private static final String DELETE_VIRTUAL_PROC = + "DELETE FROM " + "proc " + "WHERE " + "pk_proc=?"; + + public boolean deleteVirtualProc(VirtualProc proc) { + if (getJdbcTemplate().update(DELETE_VIRTUAL_PROC, proc.getProcId()) == 0) { + logger.info("failed to delete " + proc + " , proc does not exist."); + return false; + } + // update all of the resource counts. + procDestroyed(proc); + return true; + } + + private static final String INSERT_VIRTUAL_PROC = + "INSERT INTO " + "proc " + "( " + "pk_proc, " + "pk_host, " + "pk_show, " + "pk_layer," + + "pk_job," + "pk_frame, " + "int_cores_reserved, " + "int_mem_reserved, " + + "int_mem_pre_reserved, " + "int_mem_used, " + "int_gpus_reserved, " + + "int_gpu_mem_reserved, " + "int_gpu_mem_pre_reserved, " + "int_gpu_mem_used, " + + "b_local " + ") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) "; + + public void insertVirtualProc(VirtualProc proc) { + proc.id = SqlUtil.genKeyRandom(); long memReservedMin = - env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); - long available = free_mem - borrowMap.get(pk_proc) - memReservedMin; - if (available > memPerFrame) { - borrowMap.put(pk_proc, borrowMap.get(pk_proc) + memPerFrame); - memBorrowedTotal = memBorrowedTotal + memPerFrame; + env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); + long memGpuReservedMin = + env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_min", Long.class); + int result = 0; + try { + result = getJdbcTemplate().update(INSERT_VIRTUAL_PROC, proc.getProcId(), + proc.getHostId(), proc.getShowId(), proc.getLayerId(), proc.getJobId(), + proc.getFrameId(), proc.coresReserved, proc.memoryReserved, proc.memoryReserved, + memReservedMin, proc.gpusReserved, proc.gpuMemoryReserved, + proc.gpuMemoryReserved, memGpuReservedMin, proc.isLocalDispatch); + + // Update all of the resource counts + procCreated(proc); + } catch (org.springframework.dao.DataIntegrityViolationException d) { + /* + * This means the frame is already running. If that is the case, don't delete it, just + * set pk_frame to null or the orphaned proc handler will catch it. + */ + throw new ResourceDuplicationFailureException( + "The frame " + proc.getFrameId() + " is already assigned to a proc."); + } catch (Exception e) { + String msg = "unable to book proc " + proc.getName() + " on frame " + proc.getFrameId() + + " , " + e; + throw new ResourceReservationFailureException(msg, e); } - } - pass++; - - // If we were unable to borrow anything, just break - if (memBorrowedTotal == 0) { - break; - } - // If we got the memory we needed, break - if (memBorrowedTotal >= targetMem) { - break; - } - // If we've exceeded the number of tries in this loop, break - if (pass >= maxPasses) { - break; - } - } - - logger.info("attempted to borrow " + targetMem + " for host " + targetProc.getName() - + ", obtained " + memBorrowedTotal); - - if (memBorrowedTotal < targetMem) { - logger.info( - "mem borrowed " + memBorrowedTotal + " was less than the target memory of " + targetMem); - return false; - } - - /* - * This might fail... I'm not really sure if we should fail the whole operation or what. Just - * gonna let it ride for now. - */ - for (Map.Entry set : borrowMap.entrySet()) { - int success = getJdbcTemplate().update( - "UPDATE proc SET int_mem_reserved = int_mem_reserved - ? WHERE pk_proc=?", set.getValue(), - set.getKey()); - logger.info("transfering " + (set.getValue() * success) + " from " + set.getKey()); - } - - return true; - } - - public void updateReservedMemory(ProcInterface p, long value) { - getJdbcTemplate().update("UPDATE proc SET int_mem_reserved=? WHERE pk_proc=?", value, - p.getProcId()); - } - - /** - * Updates proc counts for the host, subscription, layer, job, folder, and proc point when a proc - * is destroyed. - * - * @param proc - */ - private void procDestroyed(VirtualProc proc) { - - getJdbcTemplate().update( - "UPDATE " + "host " + "SET " + "int_cores_idle = int_cores_idle + ?," - + "int_mem_idle = int_mem_idle + ?, " + "int_gpus_idle = int_gpus_idle + ?," - + "int_gpu_mem_idle = int_gpu_mem_idle + ? " + "WHERE " + "pk_host = ?", - proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, - proc.getHostId()); - - if (!proc.isLocalDispatch) { - getJdbcTemplate().update( - "UPDATE " + "subscription " + "SET " + "int_cores = int_cores - ?," - + "int_gpus = int_gpus - ? " + "WHERE " + "pk_show = ? " + "AND " + "pk_alloc = ?", - proc.coresReserved, proc.gpusReserved, proc.getShowId(), proc.getAllocationId()); - } - - getJdbcTemplate().update( - "UPDATE " + "layer_resource " + "SET " + "int_cores = int_cores - ?," - + "int_gpus = int_gpus - ? " + "WHERE " + "pk_layer = ?", - proc.coresReserved, proc.gpusReserved, proc.getLayerId()); - - if (!proc.isLocalDispatch) { - - getJdbcTemplate().update( - "UPDATE " + "job_resource " + "SET " + "int_cores = int_cores - ?," - + "int_gpus = int_gpus - ? " + "WHERE " + "pk_job = ?", - proc.coresReserved, proc.gpusReserved, proc.getJobId()); - - getJdbcTemplate().update( - "UPDATE " + "folder_resource " + "SET " + "int_cores = int_cores - ?," - + "int_gpus = int_gpus - ? " + "WHERE " + "pk_folder = " - + "(SELECT pk_folder FROM job WHERE pk_job=?)", - proc.coresReserved, proc.gpusReserved, proc.getJobId()); - - getJdbcTemplate().update( - "UPDATE " + "point " + "SET " + "int_cores = int_cores - ?, " + "int_gpus = int_gpus - ? " - + "WHERE " + "pk_dept = " + "(SELECT pk_dept FROM job WHERE pk_job=?) " + "AND " - + "pk_show = " + "(SELECT pk_show FROM job WHERE pk_job=?) ", - proc.coresReserved, proc.gpusReserved, proc.getJobId(), proc.getJobId()); - } - - if (proc.isLocalDispatch) { - - getJdbcTemplate().update( - "UPDATE " + "job_resource " + "SET " + "int_local_cores = int_local_cores - ?, " - + "int_local_gpus = int_local_gpus - ? " + "WHERE " + "pk_job = ?", - proc.coresReserved, proc.gpusReserved, proc.getJobId()); - - getJdbcTemplate().update( - "UPDATE " + "host_local " + "SET " + "int_cores_idle = int_cores_idle + ?, " - + "int_mem_idle = int_mem_idle + ?, " + "int_gpus_idle = int_gpus_idle + ?, " - + "int_gpu_mem_idle = int_gpu_mem_idle + ? " + "WHERE " + "pk_job = ? " + "AND " - + "pk_host = ? ", - proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, - proc.getJobId(), proc.getHostId()); - } - } - - /** - * Updates proc counts for the host, subscription, layer, job, folder, and proc point when a new - * proc is created. - * - * @param proc - */ - private void procCreated(VirtualProc proc) { - - getJdbcTemplate().update( - "UPDATE " + "host " + "SET " + "int_cores_idle = int_cores_idle - ?," - + "int_mem_idle = int_mem_idle - ?, " + "int_gpus_idle = int_gpus_idle - ?," - + "int_gpu_mem_idle = int_gpu_mem_idle - ? " + "WHERE " + "pk_host = ?", - proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, - proc.getHostId()); + + if (result == 0) { + String msg = + "unable to book proc " + proc.id + " the insert query succeeded but returned 0"; + throw new ResourceReservationFailureException(msg); + } + } + + private static final String UPDATE_VIRTUAL_PROC_ASSIGN = + "UPDATE " + "proc " + "SET " + "pk_show = ?, " + "pk_job = ?, " + "pk_layer = ?, " + + "pk_frame = ?, " + "int_mem_used = 0, " + "int_mem_max_used = 0, " + + "int_virt_used = 0, " + "int_virt_max_used = 0, " + + "ts_dispatched = current_timestamp " + "WHERE " + "pk_proc = ?"; + + public void updateVirtualProcAssignment(VirtualProc proc) { + + int result = 0; + try { + result = getJdbcTemplate().update(UPDATE_VIRTUAL_PROC_ASSIGN, proc.getShowId(), + proc.getJobId(), proc.getLayerId(), proc.getFrameId(), proc.getProcId()); + } catch (org.springframework.dao.DataIntegrityViolationException d) { + throw new ResourceDuplicationFailureException("The frame " + proc.getFrameId() + + " is already assigned to " + "the proc " + proc); + } catch (Exception e) { + String msg = "unable to book proc " + proc.id + ", " + e; + throw new ResourceReservationFailureException(msg, e); + } + + /* + * If the proc was not updated then it has disappeared. + */ + if (result == 0) { + String msg = "unable to book proc " + proc.id + ", the proc no longer exists,"; + throw new ResourceReservationFailureException(msg); + } + } + + private static final String CLEAR_VIRTUAL_PROC_ASSIGN = + "UPDATE " + "proc " + "SET " + "pk_frame = NULL " + "WHERE " + "pk_proc = ?"; + + public boolean clearVirtualProcAssignment(ProcInterface proc) { + return getJdbcTemplate().update(CLEAR_VIRTUAL_PROC_ASSIGN, proc.getId()) == 1; + } + + private static final String CLEAR_VIRTUAL_PROC_ASSIGN_BY_FRAME = + "UPDATE " + "proc " + "SET " + "pk_frame = NULL " + "WHERE " + "pk_frame = ?"; + + public boolean clearVirtualProcAssignment(FrameInterface frame) { + return getJdbcTemplate().update(CLEAR_VIRTUAL_PROC_ASSIGN_BY_FRAME, + frame.getFrameId()) == 1; + } + + private static final String UPDATE_PROC_MEMORY_USAGE = + "UPDATE " + "proc " + "SET " + "int_mem_used = ?, " + "int_mem_max_used = ?," + + "int_virt_used = ?, " + "int_virt_max_used = ?, " + "int_gpu_mem_used = ?, " + + "int_gpu_mem_max_used = ?, " + "int_swap_used = ?, " + "bytea_children = ?, " + + "ts_ping = current_timestamp " + "WHERE " + "pk_frame = ?"; + + @Override + public void updateProcMemoryUsage(FrameInterface f, long rss, long maxRss, long vss, + long maxVss, long usedGpuMemory, long maxUsedGpuMemory, long usedSwapMemory, + byte[] children) { + /* + * This method is going to repeat for a proc every 1 minute, so if the proc is being touched + * by another thread, then return quietly without updating memory usage. + * + * If another thread is accessing the proc record, that means the proc is probably being + * booked to another frame, which makes this update invalid anyway. + */ + try { + if (getJdbcTemplate() + .queryForObject("SELECT pk_frame FROM proc WHERE pk_frame=? FOR UPDATE", + String.class, f.getFrameId()) + .equals(f.getFrameId())) { + + getJdbcTemplate().update(new PreparedStatementCreator() { + @Override + public PreparedStatement createPreparedStatement(Connection conn) + throws SQLException { + PreparedStatement updateProc = + conn.prepareStatement(UPDATE_PROC_MEMORY_USAGE); + updateProc.setLong(1, rss); + updateProc.setLong(2, maxRss); + updateProc.setLong(3, vss); + updateProc.setLong(4, maxVss); + updateProc.setLong(5, usedGpuMemory); + updateProc.setLong(6, maxUsedGpuMemory); + updateProc.setLong(7, usedSwapMemory); + updateProc.setBytes(8, children); + updateProc.setString(9, f.getFrameId()); + return updateProc; + } + }); + } + } catch (DataAccessException dae) { + logger.info("The proc for frame " + f + " could not be updated with new memory stats: " + + dae); + } + } /** - * Not keeping track of local cores this way. + * Maps a row to a VirtualProc object. */ + public static final RowMapper VIRTUAL_PROC_MAPPER = new RowMapper() { + public VirtualProc mapRow(ResultSet rs, int rowNum) throws SQLException { + VirtualProc proc = new VirtualProc(); + proc.id = rs.getString("pk_proc"); + proc.hostId = rs.getString("pk_host"); + proc.showId = rs.getString("pk_show"); + proc.jobId = rs.getString("pk_job"); + proc.layerId = rs.getString("pk_layer"); + proc.frameId = rs.getString("pk_frame"); + proc.hostName = rs.getString("host_name"); + proc.allocationId = rs.getString("pk_alloc"); + proc.facilityId = rs.getString("pk_facility"); + proc.coresReserved = rs.getInt("int_cores_reserved"); + proc.memoryReserved = rs.getLong("int_mem_reserved"); + proc.memoryMax = rs.getLong("int_mem_max_used"); + proc.gpusReserved = rs.getInt("int_gpus_reserved"); + proc.gpuMemoryReserved = rs.getLong("int_gpu_mem_reserved"); + proc.gpuMemoryMax = rs.getLong("int_gpu_mem_max_used"); + proc.virtualMemoryMax = rs.getLong("int_virt_max_used"); + proc.virtualMemoryUsed = rs.getLong("int_virt_used"); + proc.memoryUsed = rs.getLong("int_mem_used"); + proc.unbooked = rs.getBoolean("b_unbooked"); + proc.isLocalDispatch = rs.getBoolean("b_local"); + proc.os = rs.getString("str_os"); + proc.childProcesses = rs.getBytes("bytea_children"); + return proc; + } + }; + + private static final String GET_VIRTUAL_PROC = "SELECT " + "proc.pk_proc," + "proc.pk_host," + + "proc.pk_show," + "proc.pk_job," + "proc.pk_layer," + "proc.pk_frame," + + "proc.b_unbooked," + "proc.b_local," + "host.pk_alloc, " + "alloc.pk_facility," + + "proc.int_cores_reserved," + "proc.int_mem_reserved," + "proc.int_mem_max_used," + + "proc.int_mem_used," + "proc.int_gpus_reserved," + "proc.int_gpu_mem_reserved," + + "proc.int_gpu_mem_max_used," + "proc.int_gpu_mem_used," + "proc.bytea_children," + + "proc.int_virt_max_used," + "proc.int_virt_used," + "host.str_name AS host_name, " + + "COALESCE(job.str_os, '') AS str_os " + "FROM " + "proc, " + "job, " + "host, " + + "host_stat, " + "alloc " + "WHERE " + "proc.pk_host = host.pk_host " + "AND " + + "host.pk_host = host_stat.pk_host " + "AND " + "host.pk_alloc = alloc.pk_alloc " + + "AND " + "job.pk_job = proc.pk_job "; + + public VirtualProc getVirtualProc(String id) { + return getJdbcTemplate().queryForObject(GET_VIRTUAL_PROC + " AND proc.pk_proc=? ", + VIRTUAL_PROC_MAPPER, id); + } + + public VirtualProc findVirtualProc(FrameInterface frame) { + return getJdbcTemplate().queryForObject(GET_VIRTUAL_PROC + " AND proc.pk_frame=? ", + VIRTUAL_PROC_MAPPER, frame.getFrameId()); + } + + private static final String GET_VIRTUAL_PROC_LIST = "SELECT " + "proc.*, " + + "host.str_name AS host_name, " + "host.pk_alloc, " + + "COALESCE(job.str_os, '') AS str_os, " + "alloc.pk_facility " + "FROM " + "proc, " + + "frame, " + "host," + "host_stat, " + "alloc, " + "layer," + "job, " + "folder, " + + "show " + "WHERE " + "proc.pk_show = show.pk_show " + "AND " + + "proc.pk_host = host.pk_host " + "AND " + "host.pk_alloc = alloc.pk_alloc " + "AND " + + "host.pk_host = host_stat.pk_host " + "AND " + "proc.pk_job = job.pk_job " + "AND " + + "proc.pk_layer = layer.pk_layer " + "AND " + "proc.pk_frame = frame.pk_frame " + + "AND " + "job.pk_folder = folder.pk_folder "; + + public List findVirtualProcs(ProcSearchInterface r) { + return getJdbcTemplate().query(r.getFilteredQuery(GET_VIRTUAL_PROC_LIST), + VIRTUAL_PROC_MAPPER, r.getValuesArray()); + } - if (!proc.isLocalDispatch) { - getJdbcTemplate().update( - "UPDATE " + "subscription " + "SET " + "int_cores = int_cores + ?," - + "int_gpus = int_gpus + ? " + "WHERE " + "pk_show = ? " + "AND " + "pk_alloc = ?", - proc.coresReserved, proc.gpusReserved, proc.getShowId(), proc.getAllocationId()); + @Override + public List findBookedVirtualProcs(ProcSearchInterface r) { + return getJdbcTemplate().query( + r.getFilteredQuery(GET_VIRTUAL_PROC_LIST + "AND proc.b_unbooked = false"), + VIRTUAL_PROC_MAPPER, r.getValuesArray()); } - getJdbcTemplate().update( - "UPDATE " + "layer_resource " + "SET " + "int_cores = int_cores + ?," - + "int_gpus = int_gpus + ? " + "WHERE " + "pk_layer = ?", - proc.coresReserved, proc.gpusReserved, proc.getLayerId()); + public List findVirtualProcs(FrameSearchInterface r) { + return getJdbcTemplate().query(r.getFilteredQuery(GET_VIRTUAL_PROC_LIST), + VIRTUAL_PROC_MAPPER, r.getValuesArray()); + } - if (!proc.isLocalDispatch) { + public List findVirtualProcs(HostInterface host) { + return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND proc.pk_host=?", + VIRTUAL_PROC_MAPPER, host.getHostId()); + } - getJdbcTemplate().update( - "UPDATE " + "job_resource " + "SET " + "int_cores = int_cores + ?," - + "int_gpus = int_gpus + ? " + "WHERE " + "pk_job = ?", - proc.coresReserved, proc.gpusReserved, proc.getJobId()); + public List findVirtualProcs(LayerInterface layer) { + return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND proc.pk_layer=?", + VIRTUAL_PROC_MAPPER, layer.getLayerId()); + } - getJdbcTemplate().update( - "UPDATE " + "folder_resource " + "SET " + "int_cores = int_cores + ?," - + "int_gpus = int_gpus + ? " + "WHERE " + "pk_folder = " - + "(SELECT pk_folder FROM job WHERE pk_job=?)", - proc.coresReserved, proc.gpusReserved, proc.getJobId()); + public List findVirtualProcs(JobInterface job) { + return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND proc.pk_job=?", + VIRTUAL_PROC_MAPPER, job.getJobId()); + } - getJdbcTemplate().update( - "UPDATE " + "point " + "SET " + "int_cores = int_cores + ?," + "int_gpus = int_gpus + ? " - + "WHERE " + "pk_dept = " + "(SELECT pk_dept FROM job WHERE pk_job=?) " + "AND " - + "pk_show = " + "(SELECT pk_show FROM job WHERE pk_job=?) ", - proc.coresReserved, proc.gpusReserved, proc.getJobId(), proc.getJobId()); + private static final String FIND_VIRTUAL_PROCS_LJA = GET_VIRTUAL_PROC_LIST + + "AND proc.pk_job=( " + "SELECT pk_job FROM host_local WHERE pk_host_local = ?) " + + "AND proc.pk_host=(" + "SELECT pk_host FROM host_local WHERE pk_host_local = ?) "; + + @Override + public List findVirtualProcs(LocalHostAssignment l) { + return getJdbcTemplate().query(FIND_VIRTUAL_PROCS_LJA, VIRTUAL_PROC_MAPPER, l.getId(), + l.getId()); } - if (proc.isLocalDispatch) { + public List findVirtualProcs(HardwareState state) { + return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND host_stat.str_state=?", + VIRTUAL_PROC_MAPPER, state.toString()); + } - getJdbcTemplate().update( - "UPDATE " + "job_resource " + "SET " + "int_local_cores = int_local_cores + ?," - + "int_local_gpus = int_local_gpus + ? " + "WHERE " + "pk_job = ?", - proc.coresReserved, proc.gpusReserved, proc.getJobId()); + public void unbookVirtualProcs(List procs) { + List batchArgs = new ArrayList(procs.size()); + for (VirtualProc proc : procs) { + batchArgs.add(new Object[] {proc.id}); + } + + getJdbcTemplate().batchUpdate("UPDATE proc SET b_unbooked=true WHERE pk_proc=?", batchArgs); + } + + @Override + public boolean setUnbookState(ProcInterface proc, boolean unbooked) { + return getJdbcTemplate().update("UPDATE proc SET b_unbooked=? WHERE pk_proc=?", unbooked, + proc.getProcId()) == 1; + } + + @Override + public boolean setRedirectTarget(ProcInterface p, Redirect r) { + String name = null; + boolean unbooked = false; + if (r != null) { + name = r.getDestinationName(); + unbooked = true; + } + return getJdbcTemplate().update( + "UPDATE proc SET str_redirect=?, b_unbooked=? WHERE pk_proc=?", name, unbooked, + p.getProcId()) == 1; + } + + public void unbookProc(ProcInterface proc) { + getJdbcTemplate().update("UPDATE proc SET b_unbooked=true WHERE pk_proc=?", + proc.getProcId()); + } + + public String getCurrentShowId(ProcInterface p) { + return getJdbcTemplate().queryForObject("SELECT pk_show FROM proc WHERE pk_proc=?", + String.class, p.getProcId()); + } + + public String getCurrentJobId(ProcInterface p) { + return getJdbcTemplate().queryForObject("SELECT pk_job FROM proc WHERE pk_proc=?", + String.class, p.getProcId()); + } - getJdbcTemplate().update( - "UPDATE " + "host_local " + "SET " + "int_cores_idle = int_cores_idle - ?, " - + "int_mem_idle = int_mem_idle - ?," + "int_gpus_idle = int_gpus_idle - ?, " - + "int_gpu_mem_idle = int_gpu_mem_idle - ? " + "WHERE " + "pk_job = ? " + "AND " - + "pk_host = ?", - proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, - proc.getJobId(), proc.getHostId()); + public String getCurrentLayerId(ProcInterface p) { + return getJdbcTemplate().queryForObject("SELECT pk_layer FROM proc WHERE pk_proc=?", + String.class, p.getProcId()); + } + + public String getCurrentFrameId(ProcInterface p) { + return getJdbcTemplate().queryForObject("SELECT pk_frame FROM proc WHERE pk_proc=?", + String.class, p.getProcId()); + } + + private static final String ORPHANED_PROC_INTERVAL = "interval '300' second"; + private static final String GET_ORPHANED_PROC_LIST = "SELECT " + "proc.*, " + + "host.str_name AS host_name, " + "COALESCE(job.str_os, '') AS str_os, " + + "host.pk_alloc, " + "alloc.pk_facility " + "FROM " + "proc, " + "host, " + + "host_stat," + "alloc, " + "job " + "WHERE " + "proc.pk_host = host.pk_host " + "AND " + + "host.pk_host = host_stat.pk_host " + "AND " + "host.pk_alloc = alloc.pk_alloc " + + "AND " + "job.pk_job = proc.pk_job " + "AND " + "current_timestamp - proc.ts_ping > " + + ORPHANED_PROC_INTERVAL; + + public List findOrphanedVirtualProcs() { + return getJdbcTemplate().query(GET_ORPHANED_PROC_LIST, VIRTUAL_PROC_MAPPER); + } + + public List findOrphanedVirtualProcs(int limit) { + return getJdbcTemplate().query(GET_ORPHANED_PROC_LIST + " LIMIT " + limit, + VIRTUAL_PROC_MAPPER); + } + + private static final String IS_ORPHAN = + "SELECT " + "COUNT(1) " + "FROM " + "proc " + "WHERE " + "proc.pk_proc = ? " + "AND " + + "current_timestamp - proc.ts_ping > " + ORPHANED_PROC_INTERVAL; + + @Override + public boolean isOrphan(ProcInterface proc) { + return getJdbcTemplate().queryForObject(IS_ORPHAN, Integer.class, proc.getProcId()) == 1; + } + + public boolean increaseReservedMemory(ProcInterface p, long value) { + try { + return getJdbcTemplate().update( + "UPDATE proc SET int_mem_reserved=? WHERE pk_proc=? AND int_mem_reserved < ?", + value, p.getProcId(), value) == 1; + } catch (Exception e) { + // check by trigger erify_host_resources + throw new ResourceReservationFailureException( + "failed to increase memory reservation for proc " + p.getProcId() + " to " + + value + ", proc does not have that much memory to spare."); + } + } + + public long getReservedMemory(ProcInterface proc) { + return getJdbcTemplate().queryForObject("SELECT int_mem_reserved FROM proc WHERE pk_proc=?", + Long.class, proc.getProcId()); + } + + public long getReservedGpuMemory(ProcInterface proc) { + return getJdbcTemplate().queryForObject( + "SELECT int_gpu_mem_reserved FROM proc WHERE pk_proc=?", Long.class, + proc.getProcId()); + } + + private static final String FIND_UNDERUTILIZED_PROCS = "SELECT " + "proc.pk_proc," + + "proc.int_mem_reserved - layer_mem.int_max_rss AS free_mem " + "FROM " + "proc," + + "host, " + "layer_mem " + "WHERE " + "proc.pk_host = host.pk_host " + "AND " + + "proc.pk_layer = layer_mem.pk_layer " + "AND " + "layer_mem.int_max_rss > 0 " + "AND " + + "host.pk_host = ? " + "AND " + "proc.pk_proc != ? " + "AND " + + "proc.int_mem_reserved - layer_mem.int_max_rss > 0"; + + public boolean balanceUnderUtilizedProcs(ProcInterface targetProc, long targetMem) { + + List> result = getJdbcTemplate().queryForList(FIND_UNDERUTILIZED_PROCS, + targetProc.getHostId(), targetProc.getProcId()); + + if (result.size() == 0) { + logger.info("unable to find under utilized procs on host " + targetProc.getName()); + return false; + } + + final Map borrowMap = new HashMap(result.size()); + for (Map map : result) { + logger.info("creating borrow map for: " + (String) map.get("pk_proc")); + borrowMap.put((String) map.get("pk_proc"), 0l); + } + + long memBorrowedTotal = 0l; + int pass = 0; + int maxPasses = 3; + + while (true) { + // the amount of memory we're going to borrow per frame/proc + long memPerFrame = ((targetMem - memBorrowedTotal) / result.size()) + 1; + + // loop through all of our other running frames and try to borrow + // a little bit of memory from each one. + for (Map map : result) { + String pk_proc = (String) map.get("pk_proc"); + Long free_mem = (Long) map.get("free_mem"); + long memReservedMin = + env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); + long available = free_mem - borrowMap.get(pk_proc) - memReservedMin; + if (available > memPerFrame) { + borrowMap.put(pk_proc, borrowMap.get(pk_proc) + memPerFrame); + memBorrowedTotal = memBorrowedTotal + memPerFrame; + } + } + pass++; + + // If we were unable to borrow anything, just break + if (memBorrowedTotal == 0) { + break; + } + // If we got the memory we needed, break + if (memBorrowedTotal >= targetMem) { + break; + } + // If we've exceeded the number of tries in this loop, break + if (pass >= maxPasses) { + break; + } + } + + logger.info("attempted to borrow " + targetMem + " for host " + targetProc.getName() + + ", obtained " + memBorrowedTotal); + + if (memBorrowedTotal < targetMem) { + logger.info("mem borrowed " + memBorrowedTotal + " was less than the target memory of " + + targetMem); + return false; + } + + /* + * This might fail... I'm not really sure if we should fail the whole operation or what. + * Just gonna let it ride for now. + */ + for (Map.Entry set : borrowMap.entrySet()) { + int success = getJdbcTemplate().update( + "UPDATE proc SET int_mem_reserved = int_mem_reserved - ? WHERE pk_proc=?", + set.getValue(), set.getKey()); + logger.info("transfering " + (set.getValue() * success) + " from " + set.getKey()); + } + + return true; + } + + public void updateReservedMemory(ProcInterface p, long value) { + getJdbcTemplate().update("UPDATE proc SET int_mem_reserved=? WHERE pk_proc=?", value, + p.getProcId()); + } + + /** + * Updates proc counts for the host, subscription, layer, job, folder, and proc point when a + * proc is destroyed. + * + * @param proc + */ + private void procDestroyed(VirtualProc proc) { + + getJdbcTemplate().update( + "UPDATE " + "host " + "SET " + "int_cores_idle = int_cores_idle + ?," + + "int_mem_idle = int_mem_idle + ?, " + "int_gpus_idle = int_gpus_idle + ?," + + "int_gpu_mem_idle = int_gpu_mem_idle + ? " + "WHERE " + "pk_host = ?", + proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, + proc.getHostId()); + + if (!proc.isLocalDispatch) { + getJdbcTemplate().update( + "UPDATE " + "subscription " + "SET " + "int_cores = int_cores - ?," + + "int_gpus = int_gpus - ? " + "WHERE " + "pk_show = ? " + "AND " + + "pk_alloc = ?", + proc.coresReserved, proc.gpusReserved, proc.getShowId(), + proc.getAllocationId()); + } + + getJdbcTemplate().update( + "UPDATE " + "layer_resource " + "SET " + "int_cores = int_cores - ?," + + "int_gpus = int_gpus - ? " + "WHERE " + "pk_layer = ?", + proc.coresReserved, proc.gpusReserved, proc.getLayerId()); + + if (!proc.isLocalDispatch) { + + getJdbcTemplate().update( + "UPDATE " + "job_resource " + "SET " + "int_cores = int_cores - ?," + + "int_gpus = int_gpus - ? " + "WHERE " + "pk_job = ?", + proc.coresReserved, proc.gpusReserved, proc.getJobId()); + + getJdbcTemplate().update( + "UPDATE " + "folder_resource " + "SET " + "int_cores = int_cores - ?," + + "int_gpus = int_gpus - ? " + "WHERE " + "pk_folder = " + + "(SELECT pk_folder FROM job WHERE pk_job=?)", + proc.coresReserved, proc.gpusReserved, proc.getJobId()); + + getJdbcTemplate().update( + "UPDATE " + "point " + "SET " + "int_cores = int_cores - ?, " + + "int_gpus = int_gpus - ? " + "WHERE " + "pk_dept = " + + "(SELECT pk_dept FROM job WHERE pk_job=?) " + "AND " + "pk_show = " + + "(SELECT pk_show FROM job WHERE pk_job=?) ", + proc.coresReserved, proc.gpusReserved, proc.getJobId(), proc.getJobId()); + } + + if (proc.isLocalDispatch) { + + getJdbcTemplate().update( + "UPDATE " + "job_resource " + "SET " + "int_local_cores = int_local_cores - ?, " + + "int_local_gpus = int_local_gpus - ? " + "WHERE " + "pk_job = ?", + proc.coresReserved, proc.gpusReserved, proc.getJobId()); + + getJdbcTemplate().update( + "UPDATE " + "host_local " + "SET " + "int_cores_idle = int_cores_idle + ?, " + + "int_mem_idle = int_mem_idle + ?, " + + "int_gpus_idle = int_gpus_idle + ?, " + + "int_gpu_mem_idle = int_gpu_mem_idle + ? " + "WHERE " + "pk_job = ? " + + "AND " + "pk_host = ? ", + proc.coresReserved, proc.memoryReserved, proc.gpusReserved, + proc.gpuMemoryReserved, proc.getJobId(), proc.getHostId()); + } + } + + /** + * Updates proc counts for the host, subscription, layer, job, folder, and proc point when a new + * proc is created. + * + * @param proc + */ + private void procCreated(VirtualProc proc) { + + getJdbcTemplate().update( + "UPDATE " + "host " + "SET " + "int_cores_idle = int_cores_idle - ?," + + "int_mem_idle = int_mem_idle - ?, " + "int_gpus_idle = int_gpus_idle - ?," + + "int_gpu_mem_idle = int_gpu_mem_idle - ? " + "WHERE " + "pk_host = ?", + proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, + proc.getHostId()); + + /** + * Not keeping track of local cores this way. + */ + + if (!proc.isLocalDispatch) { + getJdbcTemplate().update( + "UPDATE " + "subscription " + "SET " + "int_cores = int_cores + ?," + + "int_gpus = int_gpus + ? " + "WHERE " + "pk_show = ? " + "AND " + + "pk_alloc = ?", + proc.coresReserved, proc.gpusReserved, proc.getShowId(), + proc.getAllocationId()); + } + + getJdbcTemplate().update( + "UPDATE " + "layer_resource " + "SET " + "int_cores = int_cores + ?," + + "int_gpus = int_gpus + ? " + "WHERE " + "pk_layer = ?", + proc.coresReserved, proc.gpusReserved, proc.getLayerId()); + + if (!proc.isLocalDispatch) { + + getJdbcTemplate().update( + "UPDATE " + "job_resource " + "SET " + "int_cores = int_cores + ?," + + "int_gpus = int_gpus + ? " + "WHERE " + "pk_job = ?", + proc.coresReserved, proc.gpusReserved, proc.getJobId()); + + getJdbcTemplate().update( + "UPDATE " + "folder_resource " + "SET " + "int_cores = int_cores + ?," + + "int_gpus = int_gpus + ? " + "WHERE " + "pk_folder = " + + "(SELECT pk_folder FROM job WHERE pk_job=?)", + proc.coresReserved, proc.gpusReserved, proc.getJobId()); + + getJdbcTemplate().update( + "UPDATE " + "point " + "SET " + "int_cores = int_cores + ?," + + "int_gpus = int_gpus + ? " + "WHERE " + "pk_dept = " + + "(SELECT pk_dept FROM job WHERE pk_job=?) " + "AND " + "pk_show = " + + "(SELECT pk_show FROM job WHERE pk_job=?) ", + proc.coresReserved, proc.gpusReserved, proc.getJobId(), proc.getJobId()); + } + + if (proc.isLocalDispatch) { + + getJdbcTemplate().update( + "UPDATE " + "job_resource " + "SET " + "int_local_cores = int_local_cores + ?," + + "int_local_gpus = int_local_gpus + ? " + "WHERE " + "pk_job = ?", + proc.coresReserved, proc.gpusReserved, proc.getJobId()); + + getJdbcTemplate().update( + "UPDATE " + "host_local " + "SET " + "int_cores_idle = int_cores_idle - ?, " + + "int_mem_idle = int_mem_idle - ?," + + "int_gpus_idle = int_gpus_idle - ?, " + + "int_gpu_mem_idle = int_gpu_mem_idle - ? " + "WHERE " + "pk_job = ? " + + "AND " + "pk_host = ?", + proc.coresReserved, proc.memoryReserved, proc.gpusReserved, + proc.gpuMemoryReserved, proc.getJobId(), proc.getHostId()); + } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/RedirectDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/RedirectDaoJdbc.java index fe89fe91e..ce1b127c1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/RedirectDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/RedirectDaoJdbc.java @@ -27,60 +27,59 @@ import com.imageworks.spcue.grpc.host.RedirectType; public class RedirectDaoJdbc extends JdbcDaoSupport implements RedirectDao { - @Override - public boolean containsKey(String key) { - return getJdbcTemplate().queryForObject("SELECT count(1) FROM redirect WHERE pk_proc = ?", - Integer.class, key) > 0; - } - - @Override - public int countRedirectsWithGroup(String groupId) { - return getJdbcTemplate().queryForObject("SELECT count(1) FROM redirect WHERE str_group_id = ?", - Integer.class, groupId); - } + @Override + public boolean containsKey(String key) { + return getJdbcTemplate().queryForObject("SELECT count(1) FROM redirect WHERE pk_proc = ?", + Integer.class, key) > 0; + } - @Override - public int deleteExpired() { - long cutoff = System.currentTimeMillis() - Redirect.EXPIRE_TIME; - return getJdbcTemplate().update("DELETE FROM redirect WHERE lng_creation_time < ?", cutoff); - } + @Override + public int countRedirectsWithGroup(String groupId) { + return getJdbcTemplate().queryForObject( + "SELECT count(1) FROM redirect WHERE str_group_id = ?", Integer.class, groupId); + } - @Override - public void put(String key, Redirect r) { - getJdbcTemplate().update( - "INSERT INTO redirect (" + "pk_proc, " + "str_group_id, " + "int_type, " - + "str_destination_id, " + "str_name, " + "lng_creation_time" - + ") VALUES (?, ?, ?, ?, ?, ?) " + "ON CONFLICT (pk_proc) " + "DO UPDATE SET " - + "str_group_id = EXCLUDED.str_group_id, " + "int_type = EXCLUDED.int_type, " - + "str_destination_id = EXCLUDED.str_destination_id, " - + "str_name = EXCLUDED.str_name, " + "lng_creation_time = EXCLUDED.lng_creation_time", - key, r.getGroupId(), r.getType().getNumber(), r.getDestinationId(), r.getDestinationName(), - r.getCreationTime()); - } + @Override + public int deleteExpired() { + long cutoff = System.currentTimeMillis() - Redirect.EXPIRE_TIME; + return getJdbcTemplate().update("DELETE FROM redirect WHERE lng_creation_time < ?", cutoff); + } - @Override - public Redirect remove(String key) { - Redirect r = null; - try { - r = getJdbcTemplate() - .queryForObject( - "SELECT str_group_id, int_type, str_destination_id, str_name, lng_creation_time " - + "FROM redirect " + "WHERE pk_proc = ? " + "FOR UPDATE", - new RowMapper() { - @Override - public Redirect mapRow(ResultSet rs, int rowNum) throws SQLException { - return new Redirect(rs.getString("str_group_id"), - RedirectType.forNumber(rs.getInt("int_type")), - rs.getString("str_destination_id"), rs.getString("str_name"), - rs.getLong("lng_creation_time")); - } - }, key); - } catch (EmptyResultDataAccessException e) { - return null; + @Override + public void put(String key, Redirect r) { + getJdbcTemplate().update("INSERT INTO redirect (" + "pk_proc, " + "str_group_id, " + + "int_type, " + "str_destination_id, " + "str_name, " + "lng_creation_time" + + ") VALUES (?, ?, ?, ?, ?, ?) " + "ON CONFLICT (pk_proc) " + "DO UPDATE SET " + + "str_group_id = EXCLUDED.str_group_id, " + "int_type = EXCLUDED.int_type, " + + "str_destination_id = EXCLUDED.str_destination_id, " + + "str_name = EXCLUDED.str_name, " + + "lng_creation_time = EXCLUDED.lng_creation_time", key, r.getGroupId(), + r.getType().getNumber(), r.getDestinationId(), r.getDestinationName(), + r.getCreationTime()); } - getJdbcTemplate().update("DELETE FROM redirect WHERE pk_proc = ?", key); + @Override + public Redirect remove(String key) { + Redirect r = null; + try { + r = getJdbcTemplate().queryForObject( + "SELECT str_group_id, int_type, str_destination_id, str_name, lng_creation_time " + + "FROM redirect " + "WHERE pk_proc = ? " + "FOR UPDATE", + new RowMapper() { + @Override + public Redirect mapRow(ResultSet rs, int rowNum) throws SQLException { + return new Redirect(rs.getString("str_group_id"), + RedirectType.forNumber(rs.getInt("int_type")), + rs.getString("str_destination_id"), rs.getString("str_name"), + rs.getLong("lng_creation_time")); + } + }, key); + } catch (EmptyResultDataAccessException e) { + return null; + } + + getJdbcTemplate().update("DELETE FROM redirect WHERE pk_proc = ?", key); - return r; - } + return r; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ServiceDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ServiceDaoJdbc.java index 91362334e..0d963dee3 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ServiceDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ServiceDaoJdbc.java @@ -31,171 +31,179 @@ public class ServiceDaoJdbc extends JdbcDaoSupport implements ServiceDao { - private static final String SPLITTER = " \\| "; + private static final String SPLITTER = " \\| "; - private static final String JOINER = " | "; + private static final String JOINER = " | "; - public static LinkedHashSet splitTags(String tags) { - LinkedHashSet set = Sets.newLinkedHashSet(); - for (String s : tags.split(SPLITTER)) { - set.add(s.replaceAll(" ", "")); + public static LinkedHashSet splitTags(String tags) { + LinkedHashSet set = Sets.newLinkedHashSet(); + for (String s : tags.split(SPLITTER)) { + set.add(s.replaceAll(" ", "")); + } + return set; } - return set; - } - - public static String joinTags(LinkedHashSet tags) { - return StringUtils.join(tags, JOINER); - } - - public static final RowMapper SERVICE_MAPPER = new RowMapper() { - public ServiceEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - ServiceEntity s = new ServiceEntity(); - s.id = rs.getString("pk_service"); - s.name = rs.getString("str_name"); - s.minCores = rs.getInt("int_cores_min"); - s.maxCores = rs.getInt("int_cores_max"); - s.minMemory = rs.getLong("int_mem_min"); - s.minGpus = rs.getInt("int_gpus_min"); - s.maxGpus = rs.getInt("int_gpus_max"); - s.minGpuMemory = rs.getLong("int_gpu_mem_min"); - s.threadable = rs.getBoolean("b_threadable"); - s.tags = splitTags(rs.getString("str_tags")); - s.timeout = rs.getInt("int_timeout"); - s.timeout_llu = rs.getInt("int_timeout_llu"); - s.minMemoryIncrease = rs.getLong("int_min_memory_increase"); - return s; + + public static String joinTags(LinkedHashSet tags) { + return StringUtils.join(tags, JOINER); } - }; - - public static final RowMapper SERVICE_OVERRIDE_MAPPER = - new RowMapper() { - public ServiceOverrideEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - ServiceOverrideEntity s = new ServiceOverrideEntity(); - s.id = rs.getString("pk_show_service"); - s.name = rs.getString("str_name"); - s.minCores = rs.getInt("int_cores_min"); - s.maxCores = rs.getInt("int_cores_max"); - s.minMemory = rs.getLong("int_mem_min"); - s.minGpus = rs.getInt("int_gpus_min"); - s.maxGpus = rs.getInt("int_gpus_max"); - s.minGpuMemory = rs.getLong("int_gpu_mem_min"); - s.threadable = rs.getBoolean("b_threadable"); - s.tags = splitTags(rs.getString("str_tags")); - s.showId = rs.getString("pk_show"); - s.timeout = rs.getInt("int_timeout"); - s.timeout_llu = rs.getInt("int_timeout_llu"); - s.minMemoryIncrease = rs.getLong("int_min_memory_increase"); - return s; + + public static final RowMapper SERVICE_MAPPER = new RowMapper() { + public ServiceEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + ServiceEntity s = new ServiceEntity(); + s.id = rs.getString("pk_service"); + s.name = rs.getString("str_name"); + s.minCores = rs.getInt("int_cores_min"); + s.maxCores = rs.getInt("int_cores_max"); + s.minMemory = rs.getLong("int_mem_min"); + s.minGpus = rs.getInt("int_gpus_min"); + s.maxGpus = rs.getInt("int_gpus_max"); + s.minGpuMemory = rs.getLong("int_gpu_mem_min"); + s.threadable = rs.getBoolean("b_threadable"); + s.tags = splitTags(rs.getString("str_tags")); + s.timeout = rs.getInt("int_timeout"); + s.timeout_llu = rs.getInt("int_timeout_llu"); + s.minMemoryIncrease = rs.getLong("int_min_memory_increase"); + return s; } - }; - - private static final String QUERY_FOR_SERVICE = - "SELECT " + "service.pk_service," + "service.str_name," + "service.b_threadable," - + "service.int_cores_min," + "service.int_cores_max," + "service.int_mem_min," - + "service.int_gpus_min," + "service.int_gpus_max," + "service.int_gpu_mem_min," - + "service.str_tags, " + "service.int_timeout, " + "service.int_timeout_llu, " - + "service.int_min_memory_increase " + "FROM " + "service "; - - @Override - public ServiceEntity get(String id) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_SERVICE + " WHERE (pk_service=? OR str_name=?)", SERVICE_MAPPER, id, id); - } - - private static final String QUERY_FOR_SERVICE_OVER = "SELECT " + "show_service.pk_show_service," - + "show_service.str_name," + "show_service.b_threadable," + "show_service.int_cores_min," - + "show_service.int_cores_max, " + "show_service.int_mem_min," + "show_service.int_gpus_min," - + "show_service.int_gpus_max, " + "show_service.int_gpu_mem_min," + "show_service.str_tags," - + "show_service.int_timeout," + "show_service.int_timeout_llu," - + "show_service.int_min_memory_increase," + "show.pk_show " + "FROM " + "show_service," - + "show " + "WHERE " + "show_service.pk_show = show.pk_show "; - - @Override - public ServiceOverrideEntity getOverride(String id, String show) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_SERVICE_OVER + " AND (show_service.pk_show_service=? OR show_service.str_name=?)" - + " AND (show.str_name=? OR show.pk_show=?)", - SERVICE_OVERRIDE_MAPPER, id, id, show, show); - } - - @Override - public ServiceOverrideEntity getOverride(String id) { - return getJdbcTemplate().queryForObject(QUERY_FOR_SERVICE_OVER - + " AND (show_service.pk_show_service=? " + "OR show_service.str_name=?)", - SERVICE_OVERRIDE_MAPPER, id, id); - } - - @Override - public boolean isOverridden(String service, String show) { - return getJdbcTemplate().queryForObject("SELECT COUNT(1) FROM show_service, show WHERE " - + "show_service.pk_show = show.pk_show = ? " - + "AND show_service.str_name=? and show.str_name=?", Integer.class, service, show) > 0; - } - - private static final String INSERT_SERVICE = "INSERT INTO " + "service " + "(" + "pk_service," - + "str_name," + "b_threadable," + "int_cores_min," + "int_cores_max, " + "int_mem_min," - + "int_gpus_min," + "int_gpus_max, " + "int_gpu_mem_min," + "str_tags," + "int_timeout," - + "int_timeout_llu, " + "int_min_memory_increase " + ") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)"; - - @Override - public void insert(ServiceEntity service) { - service.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_SERVICE, service.id, service.name, service.threadable, - service.minCores, service.maxCores, service.minMemory, service.minGpus, service.maxGpus, - service.minGpuMemory, StringUtils.join(service.tags.toArray(), " | "), service.timeout, - service.timeout_llu, service.minMemoryIncrease); - } - - private static final String INSERT_SERVICE_WITH_SHOW = "INSERT INTO " + "show_service " + "(" - + "pk_show_service," + "pk_show, " + "str_name," + "b_threadable," + "int_cores_min," - + "int_cores_max," + "int_mem_min," + "int_gpus_min," + "int_gpus_max," + "int_gpu_mem_min," - + "str_tags," + "int_timeout," + "int_timeout_llu, " + "int_min_memory_increase " - + ") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; - - @Override - public void insert(ServiceOverrideEntity service) { - service.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_SERVICE_WITH_SHOW, service.id, service.showId, service.name, - service.threadable, service.minCores, service.maxCores, service.minMemory, service.minGpus, - service.maxGpus, service.minGpuMemory, joinTags(service.tags), service.timeout, - service.timeout_llu, service.minMemoryIncrease); - } - - private static final String UPDATE_SERVICE = - "UPDATE " + "service " + "SET " + "str_name=?," + "b_threadable=?," + "int_cores_min=?," - + "int_cores_max=?," + "int_mem_min=?," + "int_gpus_min=?," + "int_gpus_max=?," - + "int_gpu_mem_min=?," + "str_tags=?," + "int_timeout=?," + "int_timeout_llu=?, " - + "int_min_memory_increase=? " + "WHERE " + "pk_service = ?"; - - @Override - public void update(ServiceEntity service) { - getJdbcTemplate().update(UPDATE_SERVICE, service.name, service.threadable, service.minCores, - service.maxCores, service.minMemory, service.minGpus, service.maxGpus, service.minGpuMemory, - joinTags(service.tags), service.timeout, service.timeout_llu, service.minMemoryIncrease, - service.getId()); - } - - private static final String UPDATE_SERVICE_WITH_SHOW = - "UPDATE " + "show_service " + "SET " + "str_name=?," + "b_threadable=?," + "int_cores_min=?," - + "int_cores_max=?," + "int_mem_min=?," + "int_gpus_min=?," + "int_gpus_max=?," - + "int_gpu_mem_min=?," + "str_tags=?," + "int_timeout=?," + "int_timeout_llu=?, " - + "int_min_memory_increase=? " + "WHERE " + "pk_show_service = ?"; - - @Override - public void update(ServiceOverrideEntity service) { - getJdbcTemplate().update(UPDATE_SERVICE_WITH_SHOW, service.name, service.threadable, - service.minCores, service.maxCores, service.minMemory, service.minGpus, service.maxGpus, - service.minGpuMemory, joinTags(service.tags), service.timeout, service.timeout_llu, - service.minMemoryIncrease, service.getId()); - } - - @Override - public void delete(ServiceEntity service) { - getJdbcTemplate().update("DELETE FROM service WHERE pk_service=?", service.getId()); - } - - @Override - public void delete(ServiceOverrideEntity service) { - getJdbcTemplate().update("DELETE FROM show_service WHERE pk_show_service=?", service.getId()); - } + }; + + public static final RowMapper SERVICE_OVERRIDE_MAPPER = + new RowMapper() { + public ServiceOverrideEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + ServiceOverrideEntity s = new ServiceOverrideEntity(); + s.id = rs.getString("pk_show_service"); + s.name = rs.getString("str_name"); + s.minCores = rs.getInt("int_cores_min"); + s.maxCores = rs.getInt("int_cores_max"); + s.minMemory = rs.getLong("int_mem_min"); + s.minGpus = rs.getInt("int_gpus_min"); + s.maxGpus = rs.getInt("int_gpus_max"); + s.minGpuMemory = rs.getLong("int_gpu_mem_min"); + s.threadable = rs.getBoolean("b_threadable"); + s.tags = splitTags(rs.getString("str_tags")); + s.showId = rs.getString("pk_show"); + s.timeout = rs.getInt("int_timeout"); + s.timeout_llu = rs.getInt("int_timeout_llu"); + s.minMemoryIncrease = rs.getLong("int_min_memory_increase"); + return s; + } + }; + + private static final String QUERY_FOR_SERVICE = + "SELECT " + "service.pk_service," + "service.str_name," + "service.b_threadable," + + "service.int_cores_min," + "service.int_cores_max," + "service.int_mem_min," + + "service.int_gpus_min," + "service.int_gpus_max," + "service.int_gpu_mem_min," + + "service.str_tags, " + "service.int_timeout, " + "service.int_timeout_llu, " + + "service.int_min_memory_increase " + "FROM " + "service "; + + @Override + public ServiceEntity get(String id) { + return getJdbcTemplate().queryForObject( + QUERY_FOR_SERVICE + " WHERE (pk_service=? OR str_name=?)", SERVICE_MAPPER, id, id); + } + + private static final String QUERY_FOR_SERVICE_OVER = + "SELECT " + "show_service.pk_show_service," + "show_service.str_name," + + "show_service.b_threadable," + "show_service.int_cores_min," + + "show_service.int_cores_max, " + "show_service.int_mem_min," + + "show_service.int_gpus_min," + "show_service.int_gpus_max, " + + "show_service.int_gpu_mem_min," + "show_service.str_tags," + + "show_service.int_timeout," + "show_service.int_timeout_llu," + + "show_service.int_min_memory_increase," + "show.pk_show " + "FROM " + + "show_service," + "show " + "WHERE " + "show_service.pk_show = show.pk_show "; + + @Override + public ServiceOverrideEntity getOverride(String id, String show) { + return getJdbcTemplate().queryForObject( + QUERY_FOR_SERVICE_OVER + + " AND (show_service.pk_show_service=? OR show_service.str_name=?)" + + " AND (show.str_name=? OR show.pk_show=?)", + SERVICE_OVERRIDE_MAPPER, id, id, show, show); + } + + @Override + public ServiceOverrideEntity getOverride(String id) { + return getJdbcTemplate().queryForObject(QUERY_FOR_SERVICE_OVER + + " AND (show_service.pk_show_service=? " + "OR show_service.str_name=?)", + SERVICE_OVERRIDE_MAPPER, id, id); + } + + @Override + public boolean isOverridden(String service, String show) { + return getJdbcTemplate().queryForObject( + "SELECT COUNT(1) FROM show_service, show WHERE " + + "show_service.pk_show = show.pk_show = ? " + + "AND show_service.str_name=? and show.str_name=?", + Integer.class, service, show) > 0; + } + + private static final String INSERT_SERVICE = "INSERT INTO " + "service " + "(" + "pk_service," + + "str_name," + "b_threadable," + "int_cores_min," + "int_cores_max, " + "int_mem_min," + + "int_gpus_min," + "int_gpus_max, " + "int_gpu_mem_min," + "str_tags," + "int_timeout," + + "int_timeout_llu, " + "int_min_memory_increase " + + ") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)"; + + @Override + public void insert(ServiceEntity service) { + service.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_SERVICE, service.id, service.name, service.threadable, + service.minCores, service.maxCores, service.minMemory, service.minGpus, + service.maxGpus, service.minGpuMemory, + StringUtils.join(service.tags.toArray(), " | "), service.timeout, + service.timeout_llu, service.minMemoryIncrease); + } + + private static final String INSERT_SERVICE_WITH_SHOW = "INSERT INTO " + "show_service " + "(" + + "pk_show_service," + "pk_show, " + "str_name," + "b_threadable," + "int_cores_min," + + "int_cores_max," + "int_mem_min," + "int_gpus_min," + "int_gpus_max," + + "int_gpu_mem_min," + "str_tags," + "int_timeout," + "int_timeout_llu, " + + "int_min_memory_increase " + ") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + + @Override + public void insert(ServiceOverrideEntity service) { + service.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_SERVICE_WITH_SHOW, service.id, service.showId, service.name, + service.threadable, service.minCores, service.maxCores, service.minMemory, + service.minGpus, service.maxGpus, service.minGpuMemory, joinTags(service.tags), + service.timeout, service.timeout_llu, service.minMemoryIncrease); + } + + private static final String UPDATE_SERVICE = "UPDATE " + "service " + "SET " + "str_name=?," + + "b_threadable=?," + "int_cores_min=?," + "int_cores_max=?," + "int_mem_min=?," + + "int_gpus_min=?," + "int_gpus_max=?," + "int_gpu_mem_min=?," + "str_tags=?," + + "int_timeout=?," + "int_timeout_llu=?, " + "int_min_memory_increase=? " + "WHERE " + + "pk_service = ?"; + + @Override + public void update(ServiceEntity service) { + getJdbcTemplate().update(UPDATE_SERVICE, service.name, service.threadable, service.minCores, + service.maxCores, service.minMemory, service.minGpus, service.maxGpus, + service.minGpuMemory, joinTags(service.tags), service.timeout, service.timeout_llu, + service.minMemoryIncrease, service.getId()); + } + + private static final String UPDATE_SERVICE_WITH_SHOW = "UPDATE " + "show_service " + "SET " + + "str_name=?," + "b_threadable=?," + "int_cores_min=?," + "int_cores_max=?," + + "int_mem_min=?," + "int_gpus_min=?," + "int_gpus_max=?," + "int_gpu_mem_min=?," + + "str_tags=?," + "int_timeout=?," + "int_timeout_llu=?, " + + "int_min_memory_increase=? " + "WHERE " + "pk_show_service = ?"; + + @Override + public void update(ServiceOverrideEntity service) { + getJdbcTemplate().update(UPDATE_SERVICE_WITH_SHOW, service.name, service.threadable, + service.minCores, service.maxCores, service.minMemory, service.minGpus, + service.maxGpus, service.minGpuMemory, joinTags(service.tags), service.timeout, + service.timeout_llu, service.minMemoryIncrease, service.getId()); + } + + @Override + public void delete(ServiceEntity service) { + getJdbcTemplate().update("DELETE FROM service WHERE pk_service=?", service.getId()); + } + + @Override + public void delete(ServiceOverrideEntity service) { + getJdbcTemplate().update("DELETE FROM show_service WHERE pk_show_service=?", + service.getId()); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ShowDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ShowDaoJdbc.java index e8578752b..3a5c3145c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ShowDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ShowDaoJdbc.java @@ -36,174 +36,176 @@ import com.imageworks.spcue.util.SqlUtil; public class ShowDaoJdbc extends JdbcDaoSupport implements ShowDao { - @Autowired - private Environment env; - - private static final RowMapper SHOW_MAPPER = new RowMapper() { - public ShowEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - ShowEntity show = new ShowEntity(); - show.name = rs.getString("str_name"); - show.id = rs.getString("pk_show"); - show.defaultMaxCores = rs.getInt("int_default_max_cores"); - show.defaultMinCores = rs.getInt("int_default_min_cores"); - show.defaultMaxGpus = rs.getInt("int_default_max_gpus"); - show.defaultMinGpus = rs.getInt("int_default_min_gpus"); - show.active = rs.getBoolean("b_active"); - - if (rs.getString("str_comment_email") != null) { - show.commentMail = rs.getString("str_comment_email").split(","); - } else { - show.commentMail = new String[0]; - } - return show; - } - }; - - private static final String GET_SHOW = - "SELECT " + "show.pk_show, " + "show.int_default_max_cores, " + "show.int_default_min_cores, " - + "show.int_default_max_gpus, " + "show.int_default_min_gpus, " + "show.str_name, " - + "show.b_active, " + "show.str_comment_email " + "FROM " + "show "; - - private static final String GET_SHOW_BY_ALIAS = - "SELECT " + "show.pk_show, " + "show.int_default_max_cores, " + "show.int_default_min_cores, " - + "show.int_default_max_gpus, " + "show.int_default_min_gpus, " + "show_alias.str_name, " - + "show.b_active, " + "show.str_comment_email " + "FROM " + "show, " + "show_alias " - + "WHERE " + "show.pk_show = show_alias.pk_show "; - - public ShowEntity findShowDetail(String name) { - try { - return getJdbcTemplate().queryForObject(GET_SHOW + "WHERE show.str_name=?", SHOW_MAPPER, - name); - } catch (EmptyResultDataAccessException e) { - return getJdbcTemplate().queryForObject(GET_SHOW_BY_ALIAS + "AND show_alias.str_name = ?", - SHOW_MAPPER, name); - } - } - - public ShowEntity getShowDetail(String id) { - return getJdbcTemplate().queryForObject(GET_SHOW + "WHERE show.pk_show=?", SHOW_MAPPER, id); - } - - private static final String GET_PREFERRED_SHOW = - "SELECT " + "show.pk_show, " + "show.int_default_max_cores, " + "show.int_default_min_cores, " - + "show.int_default_max_gpus, " + "show.int_default_min_gpus, " + "show.str_name, " - + "show.b_active, " + "show.str_comment_email " + "FROM " + "show, " + "owner," + "deed " - + "WHERE " + "show.pk_show = owner.pk_show " + "AND " + "deed.pk_owner = owner.pk_owner " - + "AND " + "deed.pk_host = ?"; - - public ShowEntity getShowDetail(HostInterface host) { - return getJdbcTemplate().queryForObject(GET_PREFERRED_SHOW, SHOW_MAPPER, host.getHostId()); - } - - private static final String INSERT_SHOW = "INSERT INTO show (pk_show,str_name) VALUES (?,?)"; - - private static final String INSERT_SHOW_STATS = "INSERT INTO show_stats " - + "(pk_show, int_frame_insert_count, int_job_insert_count, int_frame_success_count, int_frame_fail_count) " - + "VALUES (?, 0, 0, 0, 0)"; - - public void insertShow(ShowEntity show) { - show.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_SHOW, show.id, show.name); - getJdbcTemplate().update(INSERT_SHOW_STATS, show.id); - } - - private static final String SHOW_EXISTS = "SELECT " + "COUNT(show.pk_show) " + "FROM " - + "show LEFT JOIN show_alias ON (show.pk_show = show_alias.pk_show) " + "WHERE " - + "(show.str_name = ? OR show_alias.str_name = ?) "; - - public boolean showExists(String name) { - try { - return getJdbcTemplate().queryForObject(SHOW_EXISTS, Integer.class, name, name) >= 1; - } catch (DataAccessException e) { - return false; - } - } - - @Override - public void delete(ShowInterface s) { - getJdbcTemplate().update("DELETE FROM point WHERE pk_show=?", s.getShowId()); - getJdbcTemplate().update("DELETE FROM folder WHERE pk_show=?", s.getShowId()); - getJdbcTemplate().update("DELETE FROM folder WHERE pk_show=?", s.getShowId()); - getJdbcTemplate().update("DELETE FROM show_alias WHERE pk_show=?", s.getShowId()); - getJdbcTemplate().update("DELETE FROM show_stats WHERE pk_show=?", s.getShowId()); - getJdbcTemplate().update("DELETE FROM show WHERE pk_show=?", s.getShowId()); - } - - public void updateShowDefaultMinCores(ShowInterface s, int val) { - if (val < 0) { - String msg = "Invalid argument, default min cores " + val + "must be greater tham 0"; - throw new IllegalArgumentException(msg); - } - getJdbcTemplate().update("UPDATE show SET int_default_min_cores=? WHERE pk_show=?", val, - s.getShowId()); - } - - public void updateShowDefaultMaxCores(ShowInterface s, int val) { - if (val < 0) { - String msg = "Invalid argument, default max cores " + val + "must be greater tham 0"; - throw new IllegalArgumentException(msg); - } - getJdbcTemplate().update("UPDATE show SET int_default_max_cores=? WHERE pk_show=?", val, - s.getShowId()); - } - - public void updateShowDefaultMinGpus(ShowInterface s, int val) { - getJdbcTemplate().update("UPDATE show SET int_default_min_gpus=? WHERE pk_show=?", val, - s.getShowId()); - } - - public void updateShowDefaultMaxGpus(ShowInterface s, int val) { - getJdbcTemplate().update("UPDATE show SET int_default_max_gpus=? WHERE pk_show=?", val, - s.getShowId()); - } - - @Override - public void updateBookingEnabled(ShowInterface s, boolean enabled) { - getJdbcTemplate().update("UPDATE show SET b_booking_enabled = ? WHERE pk_show=?", enabled, - s.getShowId()); - } - - @Override - public void updateDispatchingEnabled(ShowInterface s, boolean enabled) { - getJdbcTemplate().update("UPDATE show SET b_dispatch_enabled = ? WHERE pk_show=?", enabled, - s.getShowId()); - } - - @Override - public void updateActive(ShowInterface s, boolean enabled) { - getJdbcTemplate().update("UPDATE show SET b_active= ? WHERE pk_show=?", enabled, s.getShowId()); - } - - @Override - public void updateShowCommentEmail(ShowInterface s, String[] email) { - getJdbcTemplate().update("UPDATE show SET str_comment_email = ? WHERE pk_show=?", - StringUtils.join(email, ","), s.getShowId()); - } - - @Override - public void updateShowsStatus() { - Stream protectedShowsRaw = - Arrays.stream(env.getProperty("protected_shows", String.class, "").split(",")); - String protectedShows = - protectedShowsRaw.map(show -> "'" + show + "'").collect(Collectors.joining(",")); - int maxShowStaleDays = env.getProperty("max_show_stale_days", Integer.class, -1); - - if (maxShowStaleDays > 0) { - getJdbcTemplate().update("UPDATE show SET b_active=false " - + "WHERE pk_show NOT IN (SELECT pk_show " - + " FROM (SELECT pk_show, count(pk_job) FROM job_history " + " WHERE " - + " (DATE_PART('days', NOW()) - DATE_PART('days', dt_last_modified)) < ? " - + "GROUP BY pk_show HAVING COUNT(pk_job) > 0) pk_show) " + " AND str_name NOT IN (?)", - maxShowStaleDays, protectedShows); - } - } - - @Override - public void updateFrameCounters(ShowInterface s, int exitStatus) { - String col = "int_frame_success_count = int_frame_success_count + 1"; - if (exitStatus > 0) { - col = "int_frame_fail_count = int_frame_fail_count + 1"; - } - getJdbcTemplate().update("UPDATE show_stats SET " + col + " WHERE pk_show=?", s.getShowId()); - } + @Autowired + private Environment env; + + private static final RowMapper SHOW_MAPPER = new RowMapper() { + public ShowEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + ShowEntity show = new ShowEntity(); + show.name = rs.getString("str_name"); + show.id = rs.getString("pk_show"); + show.defaultMaxCores = rs.getInt("int_default_max_cores"); + show.defaultMinCores = rs.getInt("int_default_min_cores"); + show.defaultMaxGpus = rs.getInt("int_default_max_gpus"); + show.defaultMinGpus = rs.getInt("int_default_min_gpus"); + show.active = rs.getBoolean("b_active"); + + if (rs.getString("str_comment_email") != null) { + show.commentMail = rs.getString("str_comment_email").split(","); + } else { + show.commentMail = new String[0]; + } + return show; + } + }; + + private static final String GET_SHOW = "SELECT " + "show.pk_show, " + + "show.int_default_max_cores, " + "show.int_default_min_cores, " + + "show.int_default_max_gpus, " + "show.int_default_min_gpus, " + "show.str_name, " + + "show.b_active, " + "show.str_comment_email " + "FROM " + "show "; + + private static final String GET_SHOW_BY_ALIAS = "SELECT " + "show.pk_show, " + + "show.int_default_max_cores, " + "show.int_default_min_cores, " + + "show.int_default_max_gpus, " + "show.int_default_min_gpus, " + + "show_alias.str_name, " + "show.b_active, " + "show.str_comment_email " + "FROM " + + "show, " + "show_alias " + "WHERE " + "show.pk_show = show_alias.pk_show "; + + public ShowEntity findShowDetail(String name) { + try { + return getJdbcTemplate().queryForObject(GET_SHOW + "WHERE show.str_name=?", SHOW_MAPPER, + name); + } catch (EmptyResultDataAccessException e) { + return getJdbcTemplate().queryForObject( + GET_SHOW_BY_ALIAS + "AND show_alias.str_name = ?", SHOW_MAPPER, name); + } + } + + public ShowEntity getShowDetail(String id) { + return getJdbcTemplate().queryForObject(GET_SHOW + "WHERE show.pk_show=?", SHOW_MAPPER, id); + } + + private static final String GET_PREFERRED_SHOW = "SELECT " + "show.pk_show, " + + "show.int_default_max_cores, " + "show.int_default_min_cores, " + + "show.int_default_max_gpus, " + "show.int_default_min_gpus, " + "show.str_name, " + + "show.b_active, " + "show.str_comment_email " + "FROM " + "show, " + "owner," + + "deed " + "WHERE " + "show.pk_show = owner.pk_show " + "AND " + + "deed.pk_owner = owner.pk_owner " + "AND " + "deed.pk_host = ?"; + + public ShowEntity getShowDetail(HostInterface host) { + return getJdbcTemplate().queryForObject(GET_PREFERRED_SHOW, SHOW_MAPPER, host.getHostId()); + } + + private static final String INSERT_SHOW = "INSERT INTO show (pk_show,str_name) VALUES (?,?)"; + + private static final String INSERT_SHOW_STATS = "INSERT INTO show_stats " + + "(pk_show, int_frame_insert_count, int_job_insert_count, int_frame_success_count, int_frame_fail_count) " + + "VALUES (?, 0, 0, 0, 0)"; + + public void insertShow(ShowEntity show) { + show.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_SHOW, show.id, show.name); + getJdbcTemplate().update(INSERT_SHOW_STATS, show.id); + } + + private static final String SHOW_EXISTS = "SELECT " + "COUNT(show.pk_show) " + "FROM " + + "show LEFT JOIN show_alias ON (show.pk_show = show_alias.pk_show) " + "WHERE " + + "(show.str_name = ? OR show_alias.str_name = ?) "; + + public boolean showExists(String name) { + try { + return getJdbcTemplate().queryForObject(SHOW_EXISTS, Integer.class, name, name) >= 1; + } catch (DataAccessException e) { + return false; + } + } + + @Override + public void delete(ShowInterface s) { + getJdbcTemplate().update("DELETE FROM point WHERE pk_show=?", s.getShowId()); + getJdbcTemplate().update("DELETE FROM folder WHERE pk_show=?", s.getShowId()); + getJdbcTemplate().update("DELETE FROM folder WHERE pk_show=?", s.getShowId()); + getJdbcTemplate().update("DELETE FROM show_alias WHERE pk_show=?", s.getShowId()); + getJdbcTemplate().update("DELETE FROM show_stats WHERE pk_show=?", s.getShowId()); + getJdbcTemplate().update("DELETE FROM show WHERE pk_show=?", s.getShowId()); + } + + public void updateShowDefaultMinCores(ShowInterface s, int val) { + if (val < 0) { + String msg = "Invalid argument, default min cores " + val + "must be greater tham 0"; + throw new IllegalArgumentException(msg); + } + getJdbcTemplate().update("UPDATE show SET int_default_min_cores=? WHERE pk_show=?", val, + s.getShowId()); + } + + public void updateShowDefaultMaxCores(ShowInterface s, int val) { + if (val < 0) { + String msg = "Invalid argument, default max cores " + val + "must be greater tham 0"; + throw new IllegalArgumentException(msg); + } + getJdbcTemplate().update("UPDATE show SET int_default_max_cores=? WHERE pk_show=?", val, + s.getShowId()); + } + + public void updateShowDefaultMinGpus(ShowInterface s, int val) { + getJdbcTemplate().update("UPDATE show SET int_default_min_gpus=? WHERE pk_show=?", val, + s.getShowId()); + } + + public void updateShowDefaultMaxGpus(ShowInterface s, int val) { + getJdbcTemplate().update("UPDATE show SET int_default_max_gpus=? WHERE pk_show=?", val, + s.getShowId()); + } + + @Override + public void updateBookingEnabled(ShowInterface s, boolean enabled) { + getJdbcTemplate().update("UPDATE show SET b_booking_enabled = ? WHERE pk_show=?", enabled, + s.getShowId()); + } + + @Override + public void updateDispatchingEnabled(ShowInterface s, boolean enabled) { + getJdbcTemplate().update("UPDATE show SET b_dispatch_enabled = ? WHERE pk_show=?", enabled, + s.getShowId()); + } + + @Override + public void updateActive(ShowInterface s, boolean enabled) { + getJdbcTemplate().update("UPDATE show SET b_active= ? WHERE pk_show=?", enabled, + s.getShowId()); + } + + @Override + public void updateShowCommentEmail(ShowInterface s, String[] email) { + getJdbcTemplate().update("UPDATE show SET str_comment_email = ? WHERE pk_show=?", + StringUtils.join(email, ","), s.getShowId()); + } + + @Override + public void updateShowsStatus() { + Stream protectedShowsRaw = + Arrays.stream(env.getProperty("protected_shows", String.class, "").split(",")); + String protectedShows = + protectedShowsRaw.map(show -> "'" + show + "'").collect(Collectors.joining(",")); + int maxShowStaleDays = env.getProperty("max_show_stale_days", Integer.class, -1); + + if (maxShowStaleDays > 0) { + getJdbcTemplate().update("UPDATE show SET b_active=false " + + "WHERE pk_show NOT IN (SELECT pk_show " + + " FROM (SELECT pk_show, count(pk_job) FROM job_history " + " WHERE " + + " (DATE_PART('days', NOW()) - DATE_PART('days', dt_last_modified)) < ? " + + "GROUP BY pk_show HAVING COUNT(pk_job) > 0) pk_show) " + + " AND str_name NOT IN (?)", maxShowStaleDays, protectedShows); + } + } + + @Override + public void updateFrameCounters(ShowInterface s, int exitStatus) { + String col = "int_frame_success_count = int_frame_success_count + 1"; + if (exitStatus > 0) { + col = "int_frame_fail_count = int_frame_fail_count + 1"; + } + getJdbcTemplate().update("UPDATE show_stats SET " + col + " WHERE pk_show=?", + s.getShowId()); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/SubscriptionDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/SubscriptionDaoJdbc.java index 08318f8c7..b8cacab67 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/SubscriptionDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/SubscriptionDaoJdbc.java @@ -34,129 +34,132 @@ public class SubscriptionDaoJdbc extends JdbcDaoSupport implements SubscriptionDao { - private static final String IS_SHOW_OVER_SIZE = - "SELECT " + "COUNT(1) " + "FROM " + "subscription s " + "WHERE " + "s.pk_show = ? " + "AND " - + "s.pk_alloc = ? " + "AND " + "s.int_cores > s.int_size "; - - public boolean isShowOverSize(ShowInterface show, AllocationInterface alloc) { - try { - return getJdbcTemplate().queryForObject(IS_SHOW_OVER_SIZE, Integer.class, show.getShowId(), - alloc.getAllocationId()) > 0; - } catch (EmptyResultDataAccessException e) { - return false; + private static final String IS_SHOW_OVER_SIZE = + "SELECT " + "COUNT(1) " + "FROM " + "subscription s " + "WHERE " + "s.pk_show = ? " + + "AND " + "s.pk_alloc = ? " + "AND " + "s.int_cores > s.int_size "; + + public boolean isShowOverSize(ShowInterface show, AllocationInterface alloc) { + try { + return getJdbcTemplate().queryForObject(IS_SHOW_OVER_SIZE, Integer.class, + show.getShowId(), alloc.getAllocationId()) > 0; + } catch (EmptyResultDataAccessException e) { + return false; + } } - } - - public boolean isShowOverSize(VirtualProc proc) { - try { - return getJdbcTemplate().queryForObject(IS_SHOW_OVER_SIZE, Integer.class, proc.getShowId(), - proc.getAllocationId()) > 0; - } catch (EmptyResultDataAccessException e) { - return false; + + public boolean isShowOverSize(VirtualProc proc) { + try { + return getJdbcTemplate().queryForObject(IS_SHOW_OVER_SIZE, Integer.class, + proc.getShowId(), proc.getAllocationId()) > 0; + } catch (EmptyResultDataAccessException e) { + return false; + } + } + + private static final String IS_SHOW_AT_OR_OVER_SIZE = + "SELECT " + "COUNT(1) " + "FROM " + "subscription s " + "WHERE " + "s.pk_show = ? " + + "AND " + "s.pk_alloc = ? " + "AND " + "s.int_cores >= s.int_size "; + + public boolean isShowAtOrOverSize(ShowInterface show, AllocationInterface alloc) { + try { + return getJdbcTemplate().queryForObject(IS_SHOW_AT_OR_OVER_SIZE, Integer.class, + show.getShowId(), alloc.getAllocationId()) > 0; + } catch (EmptyResultDataAccessException e) { + return false; + } + } + + private static final String IS_SHOW_OVER_BURST = + "SELECT " + "COUNT(1) " + "FROM " + "subscription s " + "WHERE " + "s.pk_show = ? " + + "AND " + "s.pk_alloc = ? " + "AND " + "s.int_cores + ? > s.int_burst"; + + @Override + public boolean isShowOverBurst(ShowInterface show, AllocationInterface alloc, int coreUnits) { + try { + return getJdbcTemplate().queryForObject(IS_SHOW_OVER_BURST, Integer.class, + show.getShowId(), alloc.getAllocationId(), coreUnits) > 0; + } catch (EmptyResultDataAccessException e) { + return true; + } + } + + private static final String IS_SHOW_AT_OR_OVER_BURST = + "SELECT " + "COUNT(1) " + "FROM " + "subscription s " + "WHERE " + "s.pk_show = ? " + + "AND " + "s.pk_alloc = ? " + "AND " + "s.int_cores >= s.int_burst"; + + @Override + public boolean isShowAtOrOverBurst(ShowInterface show, AllocationInterface alloc) { + try { + return getJdbcTemplate().queryForObject(IS_SHOW_AT_OR_OVER_BURST, Integer.class, + show.getShowId(), alloc.getAllocationId()) > 0; + } catch (EmptyResultDataAccessException e) { + return true; + } } - } - - private static final String IS_SHOW_AT_OR_OVER_SIZE = - "SELECT " + "COUNT(1) " + "FROM " + "subscription s " + "WHERE " + "s.pk_show = ? " + "AND " - + "s.pk_alloc = ? " + "AND " + "s.int_cores >= s.int_size "; - - public boolean isShowAtOrOverSize(ShowInterface show, AllocationInterface alloc) { - try { - return getJdbcTemplate().queryForObject(IS_SHOW_AT_OR_OVER_SIZE, Integer.class, - show.getShowId(), alloc.getAllocationId()) > 0; - } catch (EmptyResultDataAccessException e) { - return false; + + private static final String GET_SUB = "SELECT " + "subscription.pk_alloc," + + "subscription.pk_show," + "subscription.int_size," + "subscription.int_burst," + + "subscription.pk_subscription," + + "(alloc.str_name || '.' || show.str_name) AS str_name " + "FROM " + "subscription," + + "alloc," + "show," + "facility " + "WHERE " + "subscription.pk_show = show.pk_show " + + "AND " + "subscription.pk_alloc = alloc.pk_alloc " + "AND " + + "alloc.pk_facility = facility.pk_facility "; + + public static RowMapper SUB_MAPPER = new RowMapper() { + public SubscriptionEntity mapRow(ResultSet rs, int rowNum) throws SQLException { + SubscriptionEntity s = new SubscriptionEntity(); + s.allocationId = rs.getString("pk_alloc"); + s.burst = rs.getInt("int_burst"); + s.size = rs.getInt("int_size"); + s.name = rs.getString("str_name"); + s.showId = rs.getString("pk_show"); + s.id = rs.getString("pk_subscription"); + return s; + } + }; + + public SubscriptionEntity getSubscriptionDetail(String id) { + return getJdbcTemplate().queryForObject(GET_SUB + " AND pk_subscription=?", SUB_MAPPER, id); } - } - - private static final String IS_SHOW_OVER_BURST = - "SELECT " + "COUNT(1) " + "FROM " + "subscription s " + "WHERE " + "s.pk_show = ? " + "AND " - + "s.pk_alloc = ? " + "AND " + "s.int_cores + ? > s.int_burst"; - - @Override - public boolean isShowOverBurst(ShowInterface show, AllocationInterface alloc, int coreUnits) { - try { - return getJdbcTemplate().queryForObject(IS_SHOW_OVER_BURST, Integer.class, show.getShowId(), - alloc.getAllocationId(), coreUnits) > 0; - } catch (EmptyResultDataAccessException e) { - return true; + + private static final String INSERT_SUBSCRIPTION = "INSERT INTO " + "subscription " + "( " + + "pk_subscription, pk_alloc, pk_show, int_size, int_burst" + ") " + + "VALUES (?,?,?,?,?)"; + + public void insertSubscription(SubscriptionEntity detail) { + detail.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_SUBSCRIPTION, detail.id, detail.allocationId, detail.showId, + detail.size, detail.burst); } - } - - private static final String IS_SHOW_AT_OR_OVER_BURST = - "SELECT " + "COUNT(1) " + "FROM " + "subscription s " + "WHERE " + "s.pk_show = ? " + "AND " - + "s.pk_alloc = ? " + "AND " + "s.int_cores >= s.int_burst"; - - @Override - public boolean isShowAtOrOverBurst(ShowInterface show, AllocationInterface alloc) { - try { - return getJdbcTemplate().queryForObject(IS_SHOW_AT_OR_OVER_BURST, Integer.class, - show.getShowId(), alloc.getAllocationId()) > 0; - } catch (EmptyResultDataAccessException e) { - return true; + + private static final String HAS_RUNNING_PROCS = "SELECT " + "COUNT(1) " + "FROM " + + "subscription s " + "WHERE " + "s.pk_subscription=? " + "AND " + "s.int_cores > 0 "; + + public boolean hasRunningProcs(SubscriptionInterface sub) { + try { + return getJdbcTemplate().queryForObject(HAS_RUNNING_PROCS, Integer.class, + sub.getSubscriptionId()) > 0; + } catch (DataAccessException e) { + return false; + } } - } - - private static final String GET_SUB = "SELECT " + "subscription.pk_alloc," - + "subscription.pk_show," + "subscription.int_size," + "subscription.int_burst," - + "subscription.pk_subscription," + "(alloc.str_name || '.' || show.str_name) AS str_name " - + "FROM " + "subscription," + "alloc," + "show," + "facility " + "WHERE " - + "subscription.pk_show = show.pk_show " + "AND " + "subscription.pk_alloc = alloc.pk_alloc " - + "AND " + "alloc.pk_facility = facility.pk_facility "; - - public static RowMapper SUB_MAPPER = new RowMapper() { - public SubscriptionEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - SubscriptionEntity s = new SubscriptionEntity(); - s.allocationId = rs.getString("pk_alloc"); - s.burst = rs.getInt("int_burst"); - s.size = rs.getInt("int_size"); - s.name = rs.getString("str_name"); - s.showId = rs.getString("pk_show"); - s.id = rs.getString("pk_subscription"); - return s; + + public void deleteSubscription(SubscriptionInterface sub) { + if (hasRunningProcs(sub)) { + throw new EntityModificationError( + "You cannot delete a subscription with running procs"); + } + getJdbcTemplate().update("DELETE FROM subscription WHERE pk_subscription=?", + sub.getSubscriptionId()); } - }; - - public SubscriptionEntity getSubscriptionDetail(String id) { - return getJdbcTemplate().queryForObject(GET_SUB + " AND pk_subscription=?", SUB_MAPPER, id); - } - - private static final String INSERT_SUBSCRIPTION = "INSERT INTO " + "subscription " + "( " - + "pk_subscription, pk_alloc, pk_show, int_size, int_burst" + ") " + "VALUES (?,?,?,?,?)"; - - public void insertSubscription(SubscriptionEntity detail) { - detail.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_SUBSCRIPTION, detail.id, detail.allocationId, detail.showId, - detail.size, detail.burst); - } - - private static final String HAS_RUNNING_PROCS = "SELECT " + "COUNT(1) " + "FROM " - + "subscription s " + "WHERE " + "s.pk_subscription=? " + "AND " + "s.int_cores > 0 "; - - public boolean hasRunningProcs(SubscriptionInterface sub) { - try { - return getJdbcTemplate().queryForObject(HAS_RUNNING_PROCS, Integer.class, - sub.getSubscriptionId()) > 0; - } catch (DataAccessException e) { - return false; + + public void updateSubscriptionSize(SubscriptionInterface sub, int size) { + getJdbcTemplate().update("UPDATE subscription SET int_size=? WHERE pk_subscription=?", size, + sub.getSubscriptionId()); } - } - public void deleteSubscription(SubscriptionInterface sub) { - if (hasRunningProcs(sub)) { - throw new EntityModificationError("You cannot delete a subscription with running procs"); + public void updateSubscriptionBurst(SubscriptionInterface sub, int size) { + getJdbcTemplate().update("UPDATE subscription SET int_burst=? WHERE pk_subscription=?", + size, sub.getSubscriptionId()); } - getJdbcTemplate().update("DELETE FROM subscription WHERE pk_subscription=?", - sub.getSubscriptionId()); - } - - public void updateSubscriptionSize(SubscriptionInterface sub, int size) { - getJdbcTemplate().update("UPDATE subscription SET int_size=? WHERE pk_subscription=?", size, - sub.getSubscriptionId()); - } - - public void updateSubscriptionBurst(SubscriptionInterface sub, int size) { - getJdbcTemplate().update("UPDATE subscription SET int_burst=? WHERE pk_subscription=?", size, - sub.getSubscriptionId()); - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/TaskDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/TaskDaoJdbc.java index 330151240..2cc46757c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/TaskDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/TaskDaoJdbc.java @@ -37,160 +37,161 @@ */ public class TaskDaoJdbc extends JdbcDaoSupport implements TaskDao { - @Override - public void deleteTasks(PointInterface cdept) { - getJdbcTemplate().update("DELETE FROM task WHERE pk_point=?", cdept.getPointId()); - } - - @Override - public void deleteTasks(ShowInterface show, DepartmentInterface dept) { - getJdbcTemplate().update("DELETE FROM task WHERE pk_show=? AND pk_dept=?", show.getShowId(), - dept.getDepartmentId()); - } - - @Override - public void deleteTask(TaskInterface task) { - getJdbcTemplate().update("DELETE FROM task WHERE pk_task=?", task.getId()); - } - - @Override - public boolean isManaged(TaskInterface t) { - try { - return getJdbcTemplate().queryForObject( - "SELECT b_managed FROM point WHERE pk_show=? and pk_dept=?", Integer.class, t.getShowId(), - t.getDepartmentId()) == 1; - } catch (org.springframework.dao.DataRetrievalFailureException e) { - return false; + @Override + public void deleteTasks(PointInterface cdept) { + getJdbcTemplate().update("DELETE FROM task WHERE pk_point=?", cdept.getPointId()); } - } - - private static final String INSERT_TASK = "INSERT INTO " + "task " + "( " + "pk_task," - + "pk_point," + "str_shot," + "int_min_cores" + ") " + "VALUES (?,?,?,?)"; - - @Override - public void insertTask(TaskEntity task) { - task.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_TASK, task.id, task.getPointId(), task.shot, task.minCoreUnits); - } - - private static final String GET_TASK_DETAIL = - "SELECT " + "point.pk_dept," + "point.pk_show," + "point.pk_point," + "task.pk_task," - + "task.int_min_cores + task.int_adjust_cores AS int_min_cores," + "task.str_shot," - + "(task.str_shot || '.' || dept.str_name) AS str_name " + "FROM " + "point," + "task," - + "dept, " + "show " + "WHERE " + "point.pk_dept = dept.pk_dept " + "AND " - + "point.pk_show = show.pk_show " + "AND " + "point.pk_point = task.pk_point "; - - public static final RowMapper TASK_DETAIL_MAPPER = new RowMapper() { - public TaskEntity mapRow(ResultSet rs, int row) throws SQLException { - TaskEntity t = new TaskEntity(); - t.pointId = rs.getString("pk_point"); - t.deptId = rs.getString("pk_dept"); - t.showId = rs.getString("pk_show"); - t.id = rs.getString("pk_task"); - t.minCoreUnits = rs.getInt("int_min_cores"); - t.name = rs.getString("str_name"); - t.shot = rs.getString("str_shot"); - return t; + + @Override + public void deleteTasks(ShowInterface show, DepartmentInterface dept) { + getJdbcTemplate().update("DELETE FROM task WHERE pk_show=? AND pk_dept=?", show.getShowId(), + dept.getDepartmentId()); } - }; - - @Override - public TaskEntity getTaskDetail(String id) { - return getJdbcTemplate().queryForObject(GET_TASK_DETAIL + " AND task.pk_task=?", - TASK_DETAIL_MAPPER, id); - } - - @Override - public TaskEntity getTaskDetail(DepartmentInterface d, String shot) { - return getJdbcTemplate().queryForObject( - GET_TASK_DETAIL + " AND point.pk_dept = ? AND task.str_shot = ?", TASK_DETAIL_MAPPER, - d.getDepartmentId(), shot); - } - - @Override - public TaskEntity getTaskDetail(JobInterface j) { - Map map = getJdbcTemplate() - .queryForMap("SELECT pk_dept, str_shot FROM job WHERE job.pk_job=?", j.getJobId()); - - return getJdbcTemplate().queryForObject( - GET_TASK_DETAIL + " AND task.str_shot = ? AND point.pk_dept = ?", TASK_DETAIL_MAPPER, - map.get("str_shot").toString(), map.get("pk_dept").toString()); - } - - public void updateTaskMinCores(TaskInterface t, int value) { - if (value < 0) { - throw new IllegalArgumentException("min cores must be greater than or equal to 0"); + + @Override + public void deleteTask(TaskInterface task) { + getJdbcTemplate().update("DELETE FROM task WHERE pk_task=?", task.getId()); } - getJdbcTemplate().update("UPDATE task SET int_min_cores=? WHERE pk_task=?", value, - t.getTaskId()); - } - - @Override - public void adjustTaskMinCores(TaskInterface t, int value) { - if (value < 0) { - throw new IllegalArgumentException("min cores must be greater than or equal to 0"); + + @Override + public boolean isManaged(TaskInterface t) { + try { + return getJdbcTemplate().queryForObject( + "SELECT b_managed FROM point WHERE pk_show=? and pk_dept=?", Integer.class, + t.getShowId(), t.getDepartmentId()) == 1; + } catch (org.springframework.dao.DataRetrievalFailureException e) { + return false; + } } - getJdbcTemplate().update("UPDATE task SET int_adjust_cores = ? - int_min_cores WHERE pk_task=?", - value, t.getTaskId()); - } - - @Override - public void mergeTask(TaskEntity t) { - String pkTask = null; - try { - pkTask = - getJdbcTemplate() - .queryForObject( - "SELECT task.pk_task FROM task, point WHERE task.pk_point = point.pk_point AND " - + "task.str_shot = ? AND point.pk_point=?", - String.class, t.shot, t.getPointId()); - - } catch (EmptyResultDataAccessException dae) { - // Eat this, its possible that no task exists + + private static final String INSERT_TASK = "INSERT INTO " + "task " + "( " + "pk_task," + + "pk_point," + "str_shot," + "int_min_cores" + ") " + "VALUES (?,?,?,?)"; + + @Override + public void insertTask(TaskEntity task) { + task.id = SqlUtil.genKeyRandom(); + getJdbcTemplate().update(INSERT_TASK, task.id, task.getPointId(), task.shot, + task.minCoreUnits); } - // No need to do anything with this task. - if (pkTask == null && t.minCoreUnits == 0) { - return; + private static final String GET_TASK_DETAIL = "SELECT " + "point.pk_dept," + "point.pk_show," + + "point.pk_point," + "task.pk_task," + + "task.int_min_cores + task.int_adjust_cores AS int_min_cores," + "task.str_shot," + + "(task.str_shot || '.' || dept.str_name) AS str_name " + "FROM " + "point," + "task," + + "dept, " + "show " + "WHERE " + "point.pk_dept = dept.pk_dept " + "AND " + + "point.pk_show = show.pk_show " + "AND " + "point.pk_point = task.pk_point "; + + public static final RowMapper TASK_DETAIL_MAPPER = new RowMapper() { + public TaskEntity mapRow(ResultSet rs, int row) throws SQLException { + TaskEntity t = new TaskEntity(); + t.pointId = rs.getString("pk_point"); + t.deptId = rs.getString("pk_dept"); + t.showId = rs.getString("pk_show"); + t.id = rs.getString("pk_task"); + t.minCoreUnits = rs.getInt("int_min_cores"); + t.name = rs.getString("str_name"); + t.shot = rs.getString("str_shot"); + return t; + } + }; + + @Override + public TaskEntity getTaskDetail(String id) { + return getJdbcTemplate().queryForObject(GET_TASK_DETAIL + " AND task.pk_task=?", + TASK_DETAIL_MAPPER, id); } - if (t.minCoreUnits == 0) { - getJdbcTemplate().update("DELETE FROM task WHERE pk_point=? AND str_shot=? ", t.getPointId(), - t.shot); - } else if (getJdbcTemplate().update( - "UPDATE task SET int_min_cores=? WHERE pk_point=? AND str_shot=?", t.minCoreUnits, - t.getPointId(), t.shot) == 0) { - try { - insertTask(t); - } catch (org.springframework.dao.DataIntegrityViolationException e) { - logger.warn("error inserting task " + t.shot + "," + e); - } + @Override + public TaskEntity getTaskDetail(DepartmentInterface d, String shot) { + return getJdbcTemplate().queryForObject( + GET_TASK_DETAIL + " AND point.pk_dept = ? AND task.str_shot = ?", + TASK_DETAIL_MAPPER, d.getDepartmentId(), shot); } - } - private static final String CLEAR_TASK_ADJUSTMENTS = "UPDATE " + "task " + "SET " - + "int_adjust_cores = 0 " + "WHERE " + "pk_show=? " + "AND " + "pk_dept = ? "; + @Override + public TaskEntity getTaskDetail(JobInterface j) { + Map map = getJdbcTemplate() + .queryForMap("SELECT pk_dept, str_shot FROM job WHERE job.pk_job=?", j.getJobId()); - @Override - public void clearTaskAdjustments(PointInterface cdept) { - getJdbcTemplate().update(CLEAR_TASK_ADJUSTMENTS, cdept.getShowId(), cdept.getDepartmentId()); - } + return getJdbcTemplate().queryForObject( + GET_TASK_DETAIL + " AND task.str_shot = ? AND point.pk_dept = ?", + TASK_DETAIL_MAPPER, map.get("str_shot").toString(), map.get("pk_dept").toString()); + } - private static final String CLEAR_TASK_ADJUSTMENT = - "UPDATE " + "task " + "SET " + "int_adjust_cores = 0 " + "WHERE " + "pk_task=?"; + public void updateTaskMinCores(TaskInterface t, int value) { + if (value < 0) { + throw new IllegalArgumentException("min cores must be greater than or equal to 0"); + } + getJdbcTemplate().update("UPDATE task SET int_min_cores=? WHERE pk_task=?", value, + t.getTaskId()); + } - @Override - public void clearTaskAdjustment(TaskInterface t) { - getJdbcTemplate().update(CLEAR_TASK_ADJUSTMENT, t.getTaskId()); - } + @Override + public void adjustTaskMinCores(TaskInterface t, int value) { + if (value < 0) { + throw new IllegalArgumentException("min cores must be greater than or equal to 0"); + } + getJdbcTemplate().update( + "UPDATE task SET int_adjust_cores = ? - int_min_cores WHERE pk_task=?", value, + t.getTaskId()); + } - private static final String IS_JOB_MANAGED = "SELECT " + "COUNT(1) " + "FROM " + "job," + "task," - + "point " + "WHERE " + "job.pk_show = point.pk_show " + "AND " - + "job.pk_dept = point.pk_dept " + "AND " + "task.pk_point = point.pk_point " + "AND " - + "task.str_shot = job.str_shot " + "AND " + "job.pk_job = ?"; + @Override + public void mergeTask(TaskEntity t) { + String pkTask = null; + try { + pkTask = getJdbcTemplate().queryForObject( + "SELECT task.pk_task FROM task, point WHERE task.pk_point = point.pk_point AND " + + "task.str_shot = ? AND point.pk_point=?", + String.class, t.shot, t.getPointId()); + + } catch (EmptyResultDataAccessException dae) { + // Eat this, its possible that no task exists + } + + // No need to do anything with this task. + if (pkTask == null && t.minCoreUnits == 0) { + return; + } + + if (t.minCoreUnits == 0) { + getJdbcTemplate().update("DELETE FROM task WHERE pk_point=? AND str_shot=? ", + t.getPointId(), t.shot); + } else if (getJdbcTemplate().update( + "UPDATE task SET int_min_cores=? WHERE pk_point=? AND str_shot=?", t.minCoreUnits, + t.getPointId(), t.shot) == 0) { + try { + insertTask(t); + } catch (org.springframework.dao.DataIntegrityViolationException e) { + logger.warn("error inserting task " + t.shot + "," + e); + } + } + } - @Override - public boolean isManaged(JobInterface j) { - return getJdbcTemplate().queryForObject(IS_JOB_MANAGED, Integer.class, j.getJobId()) > 0; - } + private static final String CLEAR_TASK_ADJUSTMENTS = "UPDATE " + "task " + "SET " + + "int_adjust_cores = 0 " + "WHERE " + "pk_show=? " + "AND " + "pk_dept = ? "; + + @Override + public void clearTaskAdjustments(PointInterface cdept) { + getJdbcTemplate().update(CLEAR_TASK_ADJUSTMENTS, cdept.getShowId(), + cdept.getDepartmentId()); + } + + private static final String CLEAR_TASK_ADJUSTMENT = + "UPDATE " + "task " + "SET " + "int_adjust_cores = 0 " + "WHERE " + "pk_task=?"; + + @Override + public void clearTaskAdjustment(TaskInterface t) { + getJdbcTemplate().update(CLEAR_TASK_ADJUSTMENT, t.getTaskId()); + } + + private static final String IS_JOB_MANAGED = "SELECT " + "COUNT(1) " + "FROM " + "job," + + "task," + "point " + "WHERE " + "job.pk_show = point.pk_show " + "AND " + + "job.pk_dept = point.pk_dept " + "AND " + "task.pk_point = point.pk_point " + "AND " + + "task.str_shot = job.str_shot " + "AND " + "job.pk_job = ?"; + + @Override + public boolean isManaged(JobInterface j) { + return getJdbcTemplate().queryForObject(IS_JOB_MANAGED, Integer.class, j.getJobId()) > 0; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/WhiteboardDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/WhiteboardDaoJdbc.java index 31338b5a0..aff8c9584 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/WhiteboardDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/WhiteboardDaoJdbc.java @@ -126,1557 +126,1658 @@ import com.imageworks.spcue.util.SqlUtil; public class WhiteboardDaoJdbc extends JdbcDaoSupport implements WhiteboardDao { - @SuppressWarnings("unused") - private static final Logger logger = LogManager.getLogger(WhiteboardDaoJdbc.class); - - private FrameSearchFactory frameSearchFactory; - private ProcSearchFactory procSearchFactory; - - @Override - public Service getService(String id) { - return getJdbcTemplate().queryForObject(GET_SERVICE + " WHERE (pk_service=? or str_name=?)", - SERVICE_MAPPER, id, id); - } - - @Override - public Service findService(String name) { - return getJdbcTemplate().queryForObject(GET_SERVICE + " WHERE service.str_name=?", - SERVICE_MAPPER, name); - } - - @Override - public ServiceSeq getDefaultServices() { - List services = getJdbcTemplate().query(GET_SERVICE, SERVICE_MAPPER); - return ServiceSeq.newBuilder().addAllServices(services).build(); - } - - @Override - public ServiceOverrideSeq getServiceOverrides(ShowInterface show) { - return ServiceOverrideSeq.newBuilder() - .addAllServiceOverrides( - getJdbcTemplate().query(GET_SERVICE_OVERRIDE + " AND show_service.pk_show = ?", - SERVICE_OVERRIDE_MAPPER, show.getId())) - .build(); - } - - @Override - public ServiceOverride getServiceOverride(ShowInterface show, String name) { - return getJdbcTemplate().queryForObject( - GET_SERVICE_OVERRIDE + " AND show_service.pk_show=? AND (show_service.str_name=? OR" - + " show_service.pk_show_service=?)", - SERVICE_OVERRIDE_MAPPER, show.getId(), name, name); - } - - @Override - public Filter getFilter(FilterInterface filter) { - return getJdbcTemplate().queryForObject(GET_FILTER + " AND pk_filter=?", FILTER_MAPPER, - filter.getFilterId()); - } - - @Override - public Filter findFilter(ShowInterface show, String name) { - return getJdbcTemplate().queryForObject( - GET_FILTER + " AND filter.pk_show=? AND filter.str_name=?", FILTER_MAPPER, show.getShowId(), - name); - } - - @Override - public Filter findFilter(String show, String name) { - return getJdbcTemplate().queryForObject( - GET_FILTER + " AND show.str_name=? AND filter.str_name=?", FILTER_MAPPER, show, name); - } - - @Override - public FilterSeq getFilters(ShowInterface show) { - return FilterSeq.newBuilder() - .addAllFilters( - getJdbcTemplate().query(GET_FILTER + " AND show.pk_show=? ORDER BY f_order ASC", - FILTER_MAPPER, show.getShowId())) - .build(); - } - - @Override - public ActionSeq getActions(FilterInterface filter) { - return ActionSeq.newBuilder() - .addAllActions(getJdbcTemplate().query( - GET_ACTION + " AND filter.pk_filter=? ORDER BY b_stop ASC, ts_created ASC ", - ACTION_MAPPER, filter.getFilterId())) - .build(); - } - - @Override - public MatcherSeq getMatchers(FilterInterface filter) { - return MatcherSeq.newBuilder() - .addAllMatchers( - getJdbcTemplate().query(GET_MATCHER + " AND filter.pk_filter=? ORDER BY ts_created ASC", - MATCHER_MAPPER, filter.getFilterId())) - .build(); - } - - @Override - public Action getAction(ActionInterface action) { - return getJdbcTemplate().queryForObject(GET_ACTION + " AND action.pk_action=?", ACTION_MAPPER, - action.getActionId()); - } - - @Override - public Matcher getMatcher(MatcherInterface matcher) { - return getJdbcTemplate().queryForObject(GET_MATCHER + " AND matcher.pk_matcher=?", - MATCHER_MAPPER, matcher.getMatcherId()); - } - - @Override - public Show getShow(String id) { - return getJdbcTemplate().queryForObject(GET_SHOW + " AND show.pk_show=?", SHOW_MAPPER, id); - } - - @Override - public ShowSeq getShows() { - List shows = getJdbcTemplate().query(GET_SHOW, SHOW_MAPPER); - return ShowSeq.newBuilder().addAllShows(shows).build(); - } - - @Override - public ShowSeq getActiveShows() { - List shows = getJdbcTemplate().query(GET_SHOW + " AND b_active=?", SHOW_MAPPER, true); - return ShowSeq.newBuilder().addAllShows(shows).build(); - } - - @Override - public Show findShow(String name) { - return getJdbcTemplate().queryForObject(GET_SHOW + " AND show.str_name=?", SHOW_MAPPER, name); - } - - @Override - public Subscription getSubscription(String id) { - return getJdbcTemplate().queryForObject( - GET_SUBSCRIPTION + " AND subscription.pk_subscription=?", SUBSCRIPTION_MAPPER, id); - } - - @Override - public Subscription findSubscription(String show, String alloc) { - return getJdbcTemplate().queryForObject( - GET_SUBSCRIPTION + " AND show.str_name=? AND alloc.str_name=?", SUBSCRIPTION_MAPPER, show, - alloc); - } - - @Override - public SubscriptionSeq getSubscriptions(ShowInterface show) { - List subscriptions = getJdbcTemplate() - .query(GET_SUBSCRIPTION + " AND show.pk_show=?", SUBSCRIPTION_MAPPER, show.getShowId()); - return SubscriptionSeq.newBuilder().addAllSubscriptions(subscriptions).build(); - } - - @Override - public SubscriptionSeq getSubscriptions(AllocationInterface alloc) { - List subscriptions = - getJdbcTemplate().query(GET_SUBSCRIPTION + " AND subscription.pk_alloc=?", - SUBSCRIPTION_MAPPER, alloc.getAllocationId()); - return SubscriptionSeq.newBuilder().addAllSubscriptions(subscriptions).build(); - } - - @Override - public Allocation findAllocation(String name) { - return getJdbcTemplate().queryForObject(GET_ALLOCATION + " AND alloc.str_name=?", - ALLOCATION_MAPPER, name); - } - - @Override - public Allocation getAllocation(String id) { - return getJdbcTemplate().queryForObject(GET_ALLOCATION + " AND alloc.pk_alloc=?", - ALLOCATION_MAPPER, id); - } - - @Override - public AllocationSeq getAllocations() { - return AllocationSeq.newBuilder().addAllAllocations( - getJdbcTemplate().query(GET_ALLOCATION + " ORDER BY alloc.str_name ", ALLOCATION_MAPPER)) - .build(); - } - - @Override - public AllocationSeq getAllocations(com.imageworks.spcue.FacilityInterface facility) { - return AllocationSeq.newBuilder() - .addAllAllocations(getJdbcTemplate().query(GET_ALLOCATION + " AND alloc.pk_facility = ?", - ALLOCATION_MAPPER, facility.getFacilityId())) - .build(); - } - - @Override - public JobSeq getJobs(GroupInterface group) { - List jobs = - getJdbcTemplate().query(GET_PENDING_JOBS + " AND job.pk_folder=? ORDER BY job.str_name ASC", - JOB_MAPPER, group.getId()); - return JobSeq.newBuilder().addAllJobs(jobs).build(); - } - - @Override - public List getJobNames(JobSearchInterface r) { - return getJdbcTemplate().query(r.getFilteredQuery(GET_JOB_NAMES), new RowMapper() { - public String mapRow(ResultSet rs, int rowNum) throws SQLException { - return rs.getString(1); - } - }, r.getValuesArray()); - } - - @Override - public JobSeq getJobs(JobSearchInterface r) { - List jobs = getJdbcTemplate().query( - r.getFilteredQuery(GET_JOB) + "ORDER BY job.str_name ASC", JOB_MAPPER, r.getValuesArray()); - return JobSeq.newBuilder().addAllJobs(jobs).build(); - } - - @Override - public Job findJob(String name) { - return getJdbcTemplate().queryForObject(GET_PENDING_JOBS + " AND job.str_name=?", JOB_MAPPER, - name.toLowerCase()); - } - - @Override - public Job getJob(String id) { - return getJdbcTemplate().queryForObject(GET_JOB + " AND job.pk_job=?", JOB_MAPPER, id); - } - - @Override - public Layer getLayer(String id) { - return getJdbcTemplate().queryForObject(GET_LAYER_WITH_LIMITS + " WHERE layer.pk_layer=?", - LAYER_MAPPER, id); - } - - @Override - public Layer findLayer(String job, String layer) { - return getJdbcTemplate().queryForObject( - GET_LAYER_WITH_LIMITS - + " WHERE job.str_state='PENDING' AND job.str_name=? AND layer.str_name=?", - LAYER_MAPPER, job, layer); - } - - @Override - public LayerSeq getLayers(JobInterface job) { - String query = - GET_LAYER_WITH_LIMITS + " WHERE layer.pk_job=? ORDER BY layer.int_dispatch_order ASC"; - List layers = getJdbcTemplate().query(query, LAYER_MAPPER, job.getJobId()); - return LayerSeq.newBuilder().addAllLayers(layers).build(); - } - - public Layer addLimitNames(Layer layer) { - return layer.toBuilder().addAllLimits(getLimitNames(layer.getId())).build(); - } - - public List getLimitNames(String layerId) { - return getJdbcTemplate().query(GET_LIMIT_NAMES, LIMIT_NAME_MAPPER, layerId); - } - - @Override - public List getLimits(LayerInterface layer) { - List limits = - getJdbcTemplate().query(GET_LIMIT_FROM_LAYER_ID, LIMIT_MAPPER, layer.getLayerId()); - return limits; - } - - @Override - public GroupSeq getGroups(ShowInterface show) { - List groups = getJdbcTemplate().query( - GET_GROUPS - + " AND folder.pk_show=? ORDER BY folder_level.int_level ASC, folder.str_name ASC ", - GROUP_MAPPER, show.getShowId()); - return GroupSeq.newBuilder().addAllGroups(groups).build(); - } - - @Override - public GroupSeq getGroups(GroupInterface group) { - List groups = getJdbcTemplate().query(GET_GROUPS - + " AND folder.pk_parent_folder=? ORDER BY folder_level.int_level ASC, folder.f_order DESC, folder.str_name ASC ", - GROUP_MAPPER, group.getGroupId()); - return GroupSeq.newBuilder().addAllGroups(groups).build(); - } - - @Override - public Group getGroup(String id) { - return getJdbcTemplate().queryForObject(GET_GROUPS + " AND folder.pk_folder=?", GROUP_MAPPER, - id); - } - - @Override - public Group getRootGroup(ShowInterface show) { - return getJdbcTemplate().queryForObject( - GET_GROUPS + " AND show.pk_show=? AND folder.b_default=?", GROUP_MAPPER, show.getShowId(), - true); - } - - @Override - public Frame findFrame(String job, String layer, int frame) { - return getJdbcTemplate().queryForObject(FIND_FRAME, FRAME_MAPPER, job, layer, frame); - } - - @Override - public Frame getFrame(String id) { - return getJdbcTemplate().queryForObject(GET_FRAME + " AND frame.pk_frame=?", FRAME_MAPPER, id); - } - - @Override - public FrameSeq getFrames(FrameSearchInterface r) { - List frames = getJdbcTemplate().query(r.getSortedQuery(GET_FRAMES_CRITERIA), - FRAME_MAPPER, r.getValuesArray()); - return FrameSeq.newBuilder().addAllFrames(frames).build(); - } - - @Override - public Depend getDepend(DependInterface depend) { - return getJdbcTemplate().queryForObject(GET_DEPEND + " WHERE pk_depend=?", DEPEND_MAPPER, - depend.getId()); - } - - @Override - public Depend getDepend(com.imageworks.spcue.depend.AbstractDepend depend) { - return getJdbcTemplate().queryForObject(GET_DEPEND + " WHERE pk_depend=?", DEPEND_MAPPER, - depend.getId()); - } - - @Override - public DependSeq getWhatDependsOnThis(JobInterface job) { - List depends = - getJdbcTemplate().query(GET_DEPEND + " WHERE pk_parent IS NULL AND pk_job_depend_on=?", - DEPEND_MAPPER, job.getJobId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - } - - @Override - public DependSeq getWhatDependsOnThis(LayerInterface layer) { - List depends = - getJdbcTemplate().query(GET_DEPEND + " WHERE pk_parent IS NULL AND pk_layer_depend_on=?", - DEPEND_MAPPER, layer.getLayerId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - - } - - @Override - public DependSeq getWhatDependsOnThis(FrameInterface frame) { - List depends = getJdbcTemplate().query(GET_DEPEND + " WHERE pk_frame_depend_on=?", - DEPEND_MAPPER, frame.getFrameId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - } - - @Override - public DependSeq getWhatThisDependsOn(JobInterface job) { - List depends = - getJdbcTemplate().query( - GET_DEPEND + " WHERE pk_parent IS NULL AND pk_layer_depend_er IS NULL AND " - + "pk_frame_depend_er IS NULL AND pk_job_depend_er=?", - DEPEND_MAPPER, job.getJobId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - } - - @Override - public DependSeq getWhatThisDependsOn(LayerInterface layer) { - List depends = - getJdbcTemplate().query(GET_DEPEND + " WHERE pk_parent IS NULL AND pk_layer_depend_er=?", - DEPEND_MAPPER, layer.getLayerId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - } - - @Override - public DependSeq getWhatThisDependsOn(FrameInterface frame) { + @SuppressWarnings("unused") + private static final Logger logger = LogManager.getLogger(WhiteboardDaoJdbc.class); + + private FrameSearchFactory frameSearchFactory; + private ProcSearchFactory procSearchFactory; + + @Override + public Service getService(String id) { + return getJdbcTemplate().queryForObject(GET_SERVICE + " WHERE (pk_service=? or str_name=?)", + SERVICE_MAPPER, id, id); + } + + @Override + public Service findService(String name) { + return getJdbcTemplate().queryForObject(GET_SERVICE + " WHERE service.str_name=?", + SERVICE_MAPPER, name); + } + + @Override + public ServiceSeq getDefaultServices() { + List services = getJdbcTemplate().query(GET_SERVICE, SERVICE_MAPPER); + return ServiceSeq.newBuilder().addAllServices(services).build(); + } + + @Override + public ServiceOverrideSeq getServiceOverrides(ShowInterface show) { + return ServiceOverrideSeq.newBuilder() + .addAllServiceOverrides(getJdbcTemplate().query( + GET_SERVICE_OVERRIDE + " AND show_service.pk_show = ?", + SERVICE_OVERRIDE_MAPPER, show.getId())) + .build(); + } + + @Override + public ServiceOverride getServiceOverride(ShowInterface show, String name) { + return getJdbcTemplate().queryForObject( + GET_SERVICE_OVERRIDE + " AND show_service.pk_show=? AND (show_service.str_name=? OR" + + " show_service.pk_show_service=?)", + SERVICE_OVERRIDE_MAPPER, show.getId(), name, name); + } + + @Override + public Filter getFilter(FilterInterface filter) { + return getJdbcTemplate().queryForObject(GET_FILTER + " AND pk_filter=?", FILTER_MAPPER, + filter.getFilterId()); + } + + @Override + public Filter findFilter(ShowInterface show, String name) { + return getJdbcTemplate().queryForObject( + GET_FILTER + " AND filter.pk_show=? AND filter.str_name=?", FILTER_MAPPER, + show.getShowId(), name); + } + + @Override + public Filter findFilter(String show, String name) { + return getJdbcTemplate().queryForObject( + GET_FILTER + " AND show.str_name=? AND filter.str_name=?", FILTER_MAPPER, show, + name); + } + + @Override + public FilterSeq getFilters(ShowInterface show) { + return FilterSeq.newBuilder() + .addAllFilters(getJdbcTemplate().query( + GET_FILTER + " AND show.pk_show=? ORDER BY f_order ASC", FILTER_MAPPER, + show.getShowId())) + .build(); + } + + @Override + public ActionSeq getActions(FilterInterface filter) { + return ActionSeq.newBuilder() + .addAllActions(getJdbcTemplate().query( + GET_ACTION + " AND filter.pk_filter=? ORDER BY b_stop ASC, ts_created ASC ", + ACTION_MAPPER, filter.getFilterId())) + .build(); + } + + @Override + public MatcherSeq getMatchers(FilterInterface filter) { + return MatcherSeq.newBuilder() + .addAllMatchers(getJdbcTemplate().query( + GET_MATCHER + " AND filter.pk_filter=? ORDER BY ts_created ASC", + MATCHER_MAPPER, filter.getFilterId())) + .build(); + } + + @Override + public Action getAction(ActionInterface action) { + return getJdbcTemplate().queryForObject(GET_ACTION + " AND action.pk_action=?", + ACTION_MAPPER, action.getActionId()); + } + + @Override + public Matcher getMatcher(MatcherInterface matcher) { + return getJdbcTemplate().queryForObject(GET_MATCHER + " AND matcher.pk_matcher=?", + MATCHER_MAPPER, matcher.getMatcherId()); + } + + @Override + public Show getShow(String id) { + return getJdbcTemplate().queryForObject(GET_SHOW + " AND show.pk_show=?", SHOW_MAPPER, id); + } + + @Override + public ShowSeq getShows() { + List shows = getJdbcTemplate().query(GET_SHOW, SHOW_MAPPER); + return ShowSeq.newBuilder().addAllShows(shows).build(); + } + + @Override + public ShowSeq getActiveShows() { + List shows = getJdbcTemplate().query(GET_SHOW + " AND b_active=?", SHOW_MAPPER, true); + return ShowSeq.newBuilder().addAllShows(shows).build(); + } + + @Override + public Show findShow(String name) { + return getJdbcTemplate().queryForObject(GET_SHOW + " AND show.str_name=?", SHOW_MAPPER, + name); + } + + @Override + public Subscription getSubscription(String id) { + return getJdbcTemplate().queryForObject( + GET_SUBSCRIPTION + " AND subscription.pk_subscription=?", SUBSCRIPTION_MAPPER, id); + } + + @Override + public Subscription findSubscription(String show, String alloc) { + return getJdbcTemplate().queryForObject( + GET_SUBSCRIPTION + " AND show.str_name=? AND alloc.str_name=?", SUBSCRIPTION_MAPPER, + show, alloc); + } + + @Override + public SubscriptionSeq getSubscriptions(ShowInterface show) { + List subscriptions = getJdbcTemplate().query( + GET_SUBSCRIPTION + " AND show.pk_show=?", SUBSCRIPTION_MAPPER, show.getShowId()); + return SubscriptionSeq.newBuilder().addAllSubscriptions(subscriptions).build(); + } + + @Override + public SubscriptionSeq getSubscriptions(AllocationInterface alloc) { + List subscriptions = + getJdbcTemplate().query(GET_SUBSCRIPTION + " AND subscription.pk_alloc=?", + SUBSCRIPTION_MAPPER, alloc.getAllocationId()); + return SubscriptionSeq.newBuilder().addAllSubscriptions(subscriptions).build(); + } + + @Override + public Allocation findAllocation(String name) { + return getJdbcTemplate().queryForObject(GET_ALLOCATION + " AND alloc.str_name=?", + ALLOCATION_MAPPER, name); + } + + @Override + public Allocation getAllocation(String id) { + return getJdbcTemplate().queryForObject(GET_ALLOCATION + " AND alloc.pk_alloc=?", + ALLOCATION_MAPPER, id); + } + + @Override + public AllocationSeq getAllocations() { + return AllocationSeq.newBuilder() + .addAllAllocations(getJdbcTemplate() + .query(GET_ALLOCATION + " ORDER BY alloc.str_name ", ALLOCATION_MAPPER)) + .build(); + } + + @Override + public AllocationSeq getAllocations(com.imageworks.spcue.FacilityInterface facility) { + return AllocationSeq.newBuilder() + .addAllAllocations( + getJdbcTemplate().query(GET_ALLOCATION + " AND alloc.pk_facility = ?", + ALLOCATION_MAPPER, facility.getFacilityId())) + .build(); + } + + @Override + public JobSeq getJobs(GroupInterface group) { + List jobs = getJdbcTemplate().query( + GET_PENDING_JOBS + " AND job.pk_folder=? ORDER BY job.str_name ASC", JOB_MAPPER, + group.getId()); + return JobSeq.newBuilder().addAllJobs(jobs).build(); + } + + @Override + public List getJobNames(JobSearchInterface r) { + return getJdbcTemplate().query(r.getFilteredQuery(GET_JOB_NAMES), new RowMapper() { + public String mapRow(ResultSet rs, int rowNum) throws SQLException { + return rs.getString(1); + } + }, r.getValuesArray()); + } + + @Override + public JobSeq getJobs(JobSearchInterface r) { + List jobs = + getJdbcTemplate().query(r.getFilteredQuery(GET_JOB) + "ORDER BY job.str_name ASC", + JOB_MAPPER, r.getValuesArray()); + return JobSeq.newBuilder().addAllJobs(jobs).build(); + } + + @Override + public Job findJob(String name) { + return getJdbcTemplate().queryForObject(GET_PENDING_JOBS + " AND job.str_name=?", + JOB_MAPPER, name.toLowerCase()); + } + + @Override + public Job getJob(String id) { + return getJdbcTemplate().queryForObject(GET_JOB + " AND job.pk_job=?", JOB_MAPPER, id); + } + + @Override + public Layer getLayer(String id) { + return getJdbcTemplate().queryForObject(GET_LAYER_WITH_LIMITS + " WHERE layer.pk_layer=?", + LAYER_MAPPER, id); + } + + @Override + public Layer findLayer(String job, String layer) { + return getJdbcTemplate().queryForObject( + GET_LAYER_WITH_LIMITS + + " WHERE job.str_state='PENDING' AND job.str_name=? AND layer.str_name=?", + LAYER_MAPPER, job, layer); + } + + @Override + public LayerSeq getLayers(JobInterface job) { + String query = GET_LAYER_WITH_LIMITS + + " WHERE layer.pk_job=? ORDER BY layer.int_dispatch_order ASC"; + List layers = getJdbcTemplate().query(query, LAYER_MAPPER, job.getJobId()); + return LayerSeq.newBuilder().addAllLayers(layers).build(); + } + + public Layer addLimitNames(Layer layer) { + return layer.toBuilder().addAllLimits(getLimitNames(layer.getId())).build(); + } + + public List getLimitNames(String layerId) { + return getJdbcTemplate().query(GET_LIMIT_NAMES, LIMIT_NAME_MAPPER, layerId); + } + + @Override + public List getLimits(LayerInterface layer) { + List limits = + getJdbcTemplate().query(GET_LIMIT_FROM_LAYER_ID, LIMIT_MAPPER, layer.getLayerId()); + return limits; + } + + @Override + public GroupSeq getGroups(ShowInterface show) { + List groups = getJdbcTemplate().query(GET_GROUPS + + " AND folder.pk_show=? ORDER BY folder_level.int_level ASC, folder.str_name ASC ", + GROUP_MAPPER, show.getShowId()); + return GroupSeq.newBuilder().addAllGroups(groups).build(); + } + + @Override + public GroupSeq getGroups(GroupInterface group) { + List groups = getJdbcTemplate().query(GET_GROUPS + + " AND folder.pk_parent_folder=? ORDER BY folder_level.int_level ASC, folder.f_order DESC, folder.str_name ASC ", + GROUP_MAPPER, group.getGroupId()); + return GroupSeq.newBuilder().addAllGroups(groups).build(); + } + + @Override + public Group getGroup(String id) { + return getJdbcTemplate().queryForObject(GET_GROUPS + " AND folder.pk_folder=?", + GROUP_MAPPER, id); + } + + @Override + public Group getRootGroup(ShowInterface show) { + return getJdbcTemplate().queryForObject( + GET_GROUPS + " AND show.pk_show=? AND folder.b_default=?", GROUP_MAPPER, + show.getShowId(), true); + } + + @Override + public Frame findFrame(String job, String layer, int frame) { + return getJdbcTemplate().queryForObject(FIND_FRAME, FRAME_MAPPER, job, layer, frame); + } + + @Override + public Frame getFrame(String id) { + return getJdbcTemplate().queryForObject(GET_FRAME + " AND frame.pk_frame=?", FRAME_MAPPER, + id); + } + + @Override + public FrameSeq getFrames(FrameSearchInterface r) { + List frames = getJdbcTemplate().query(r.getSortedQuery(GET_FRAMES_CRITERIA), + FRAME_MAPPER, r.getValuesArray()); + return FrameSeq.newBuilder().addAllFrames(frames).build(); + } + + @Override + public Depend getDepend(DependInterface depend) { + return getJdbcTemplate().queryForObject(GET_DEPEND + " WHERE pk_depend=?", DEPEND_MAPPER, + depend.getId()); + } + + @Override + public Depend getDepend(com.imageworks.spcue.depend.AbstractDepend depend) { + return getJdbcTemplate().queryForObject(GET_DEPEND + " WHERE pk_depend=?", DEPEND_MAPPER, + depend.getId()); + } + + @Override + public DependSeq getWhatDependsOnThis(JobInterface job) { + List depends = getJdbcTemplate().query( + GET_DEPEND + " WHERE pk_parent IS NULL AND pk_job_depend_on=?", DEPEND_MAPPER, + job.getJobId()); + return DependSeq.newBuilder().addAllDepends(depends).build(); + } + + @Override + public DependSeq getWhatDependsOnThis(LayerInterface layer) { + List depends = getJdbcTemplate().query( + GET_DEPEND + " WHERE pk_parent IS NULL AND pk_layer_depend_on=?", DEPEND_MAPPER, + layer.getLayerId()); + return DependSeq.newBuilder().addAllDepends(depends).build(); + + } + + @Override + public DependSeq getWhatDependsOnThis(FrameInterface frame) { + List depends = getJdbcTemplate().query(GET_DEPEND + " WHERE pk_frame_depend_on=?", + DEPEND_MAPPER, frame.getFrameId()); + return DependSeq.newBuilder().addAllDepends(depends).build(); + } + + @Override + public DependSeq getWhatThisDependsOn(JobInterface job) { + List depends = getJdbcTemplate().query( + GET_DEPEND + " WHERE pk_parent IS NULL AND pk_layer_depend_er IS NULL AND " + + "pk_frame_depend_er IS NULL AND pk_job_depend_er=?", + DEPEND_MAPPER, job.getJobId()); + return DependSeq.newBuilder().addAllDepends(depends).build(); + } + + @Override + public DependSeq getWhatThisDependsOn(LayerInterface layer) { + List depends = getJdbcTemplate().query( + GET_DEPEND + " WHERE pk_parent IS NULL AND pk_layer_depend_er=?", DEPEND_MAPPER, + layer.getLayerId()); + return DependSeq.newBuilder().addAllDepends(depends).build(); + } + + @Override + public DependSeq getWhatThisDependsOn(FrameInterface frame) { + /* + * This should show anything that is making the frame dependent. + */ + List depends = getJdbcTemplate().query(GET_DEPEND + " WHERE " + + "(pk_job_depend_er=? AND str_type IN ('JOB_ON_JOB','JOB_ON_LAYER','JOB_ON_FRAME')) OR " + + "(pk_layer_depend_er=? AND str_type IN ('LAYER_ON_JOB','LAYER_ON_LAYER','LAYER_ON_FRAME')) " + + "OR (pk_frame_depend_er=?)", DEPEND_MAPPER, frame.getJobId(), frame.getLayerId(), + frame.getFrameId()); + return DependSeq.newBuilder().addAllDepends(depends).build(); + } + + @Override + public DependSeq getDepends(JobInterface job) { + List depends = getJdbcTemplate().query( + GET_DEPEND + " WHERE pk_job_depend_er=? AND str_type != 'FRAME_ON_FRAME'", + DEPEND_MAPPER, job.getJobId()); + return DependSeq.newBuilder().addAllDepends(depends).build(); + } + + @Override + public Depend getDepend(String id) { + return getJdbcTemplate().queryForObject(GET_DEPEND + " WHERE pk_depend=?", DEPEND_MAPPER, + id); + } + + @Override + public Group findGroup(String show, String group) { + return getJdbcTemplate().queryForObject( + GET_GROUPS + " AND show.str_name=? AND folder.str_name=?", GROUP_MAPPER, show, + group); + } + + @Override + public Host findHost(String name) { + return getJdbcTemplate().queryForObject(GET_HOST + " AND host.str_name=?", HOST_MAPPER, + name); + } + + @Override + public HostSeq getHosts(HostSearchInterface r) { + List hosts = getJdbcTemplate().query(r.getFilteredQuery(GET_HOST), HOST_MAPPER, + r.getValuesArray()); + return HostSeq.newBuilder().addAllHosts(hosts).build(); + } + + @Override + public Host getHost(String id) { + return getJdbcTemplate().queryForObject(GET_HOST + " AND host.pk_host=?", HOST_MAPPER, id); + } + + @Override + public ProcSeq getProcs(HostInterface host) { + ProcSearchInterface r = procSearchFactory.create(); + r.filterByHost(host); + r.sortByHostName(); + r.sortByDispatchedTime(); + return ProcSeq.newBuilder().addAllProcs(getProcs(r).getProcsList()).build(); + } + + @Override + public ProcSeq getProcs(ProcSearchInterface p) { + p.sortByHostName(); + p.sortByDispatchedTime(); + List procs = getJdbcTemplate().query(p.getFilteredQuery(GET_PROC), PROC_MAPPER, + p.getValuesArray()); + return ProcSeq.newBuilder().addAllProcs(procs).build(); + } + + @Override + public CommentSeq getComments(HostInterface h) { + List comments = + getJdbcTemplate().query(GET_HOST_COMMENTS, COMMENT_MAPPER, h.getHostId()); + return CommentSeq.newBuilder().addAllComments(comments).build(); + } + + @Override + public CommentSeq getComments(JobInterface j) { + List comments = + getJdbcTemplate().query(GET_JOB_COMMENTS, COMMENT_MAPPER, j.getJobId()); + return CommentSeq.newBuilder().addAllComments(comments).build(); + } + + @Override + public UpdatedFrameCheckResult getUpdatedFrames(JobInterface job, List layers, + int epochSeconds) { + + long timeDiff = (System.currentTimeMillis() / 1000) - epochSeconds; + if (timeDiff > 60) { + throw new IllegalArgumentException("the last update timestamp cannot be over " + + "a minute off the current time, difference was: " + timeDiff); + } + + UpdatedFrameCheckResult.Builder resultBuilder = UpdatedFrameCheckResult.newBuilder(); + resultBuilder.setState(JobState.valueOf(getJdbcTemplate().queryForObject( + "SELECT str_state FROM job WHERE pk_job=?", String.class, job.getJobId()))); + + FrameSearchInterface r = frameSearchFactory.create(job); + r.filterByLayers(layers); + r.filterByChangeDate(epochSeconds); + r.setMaxResults(100); + + List updatedFrameList = getJdbcTemplate().query( + r.getFilteredQuery(GET_UPDATED_FRAME), UPDATED_FRAME_MAPPER, r.getValuesArray()); + resultBuilder.setUpdatedFrames( + UpdatedFrameSeq.newBuilder().addAllUpdatedFrames(updatedFrameList).build()); + resultBuilder.setServerTime((int) (System.currentTimeMillis() / 1000) - 1); + + return resultBuilder.build(); + } + + @Override + public Department getDepartment(ShowInterface show, String name) { + return getJdbcTemplate().queryForObject(GET_DEPARTMENT, DEPARTMENT_MAPPER, show.getShowId(), + name); + } + + @Override + public DepartmentSeq getDepartments(ShowInterface show) { + List departments = + getJdbcTemplate().query(GET_DEPARTMENTS, DEPARTMENT_MAPPER, show.getShowId()); + return DepartmentSeq.newBuilder().addAllDepartments(departments).build(); + } + + @Override + public List getDepartmentNames() { + return getJdbcTemplate().query("SELECT str_name FROM dept ORDER BY str_name ASC", + new RowMapper() { + public String mapRow(ResultSet rs, int row) throws SQLException { + return rs.getString("str_name"); + } + }); + } + + @Override + public Task getTask(ShowInterface show, DepartmentInterface dept, String shot) { + return getJdbcTemplate().queryForObject( + GET_TASK + " AND point.pk_show=? AND point.pk_dept=? AND task.str_shot=?", + TASK_MAPPER, show.getShowId(), dept.getDepartmentId(), shot); + } + + @Override + public TaskSeq getTasks(ShowInterface show, DepartmentInterface dept) { + if (dept == null) { + return TaskSeq.newBuilder() + .addAllTasks(getJdbcTemplate().query( + GET_TASK + " AND point.pk_show=? ORDER BY task.str_shot", TASK_MAPPER, + show.getShowId())) + .build(); + } else { + return TaskSeq.newBuilder() + .addAllTasks(getJdbcTemplate().query(GET_TASK + + " AND point.pk_show=? AND point.pk_dept=? ORDER BY task.str_shot", + TASK_MAPPER, show.getShowId(), dept.getDepartmentId())) + .build(); + } + } + + @Override + public DeedSeq getDeeds(OwnerEntity owner) { + List deeds = getJdbcTemplate().query(QUERY_FOR_DEED + " AND owner.pk_owner=?", + DEED_MAPPER, owner.getId()); + return DeedSeq.newBuilder().addAllDeeds(deeds).build(); + } + + @Override + public DeedSeq getDeeds(ShowInterface show) { + List deeds = getJdbcTemplate().query(QUERY_FOR_DEED + " AND show.pk_show=?", + DEED_MAPPER, show.getId()); + return DeedSeq.newBuilder().addAllDeeds(deeds).build(); + } + + @Override + public Host getHost(DeedEntity deed) { + return getJdbcTemplate().queryForObject(GET_HOST + " AND host.pk_host=?", HOST_MAPPER, + deed.id); + } + + @Override + public Deed getDeed(HostInterface host) { + return getJdbcTemplate().queryForObject(QUERY_FOR_DEED + " AND host.pk_host=?", DEED_MAPPER, + host.getHostId()); + } + + @Override + public HostSeq getHosts(OwnerEntity owner) { + StringBuilder sb = new StringBuilder(4096); + String query = GET_HOST; + query = query.replace("FROM ", "FROM owner, deed,"); + sb.append(query); + sb.append("AND deed.pk_host = host.pk_host "); + sb.append("AND deed.pk_owner = owner.pk_owner "); + sb.append("AND owner.pk_owner = ?"); + + List hosts = getJdbcTemplate().query(sb.toString(), HOST_MAPPER, owner.getId()); + return HostSeq.newBuilder().addAllHosts(hosts).build(); + } + + @Override + public Owner getOwner(DeedEntity deed) { + return getJdbcTemplate().queryForObject(QUERY_FOR_OWNER + " AND " + + "pk_owner = (SELECT deed.pk_owner FROM deed " + "WHERE pk_deed=?)", OWNER_MAPPER, + deed.getId()); + } + + @Override + public Owner getOwner(HostInterface host) { + return getJdbcTemplate().queryForObject(QUERY_FOR_OWNER + " AND " + + "pk_owner = (SELECT deed.pk_owner FROM deed " + "WHERE pk_host=?)", OWNER_MAPPER, + host.getHostId()); + } + + @Override + public List getOwners(ShowInterface show) { + return getJdbcTemplate().query(QUERY_FOR_OWNER + " AND owner.pk_show=?", OWNER_MAPPER, + show.getShowId()); + } + + @Override + public RenderPartition getRenderPartition(LocalHostAssignment l) { + return getJdbcTemplate().queryForObject( + QUERY_FOR_RENDER_PART + "WHERE host_local.pk_host_local = ?", RENDER_PARTION_MAPPER, + l.getId()); + } + + @Override + public RenderPartitionSeq getRenderPartitions(HostInterface host) { + List partitions = + getJdbcTemplate().query(QUERY_FOR_RENDER_PART + "WHERE host_local.pk_host = ?", + RENDER_PARTION_MAPPER, host.getHostId()); + return RenderPartitionSeq.newBuilder().addAllRenderPartitions(partitions).build(); + } + + @Override + public Owner getOwner(String name) { + return getJdbcTemplate().queryForObject(QUERY_FOR_OWNER + " AND " + "(" + + "owner.str_username = ? " + "OR " + "owner.pk_owner = ?" + ")", OWNER_MAPPER, + name, name); + } + + @Override + public Facility getFacility(String name) { + return getJdbcTemplate().queryForObject( + QUERY_FOR_FACILITY + " WHERE facility.pk_facility = ? OR facility.str_name = ?", + FACILITY_MAPPER, name, name); + } + + @Override + public FacilitySeq getFacilities() { + return FacilitySeq.newBuilder() + .addAllFacilities(getJdbcTemplate().query(QUERY_FOR_FACILITY, FACILITY_MAPPER)) + .build(); + } + + @Override + public Limit findLimit(String name) { + String findLimitQuery = QUERY_FOR_LIMIT + " WHERE limit_record.str_name = ? " + "GROUP BY " + + "limit_record.str_name, " + "limit_record.pk_limit_record, " + + "limit_record.int_max_value"; + return getJdbcTemplate().queryForObject(findLimitQuery, LIMIT_MAPPER, name); + } + + @Override + public Limit getLimit(String id) { + String getLimitQuery = QUERY_FOR_LIMIT + " WHERE limit_record.pk_limit_record = ? " + + "GROUP BY " + "limit_record.str_name, " + "limit_record.pk_limit_record, " + + "limit_record.int_max_value"; + return getJdbcTemplate().queryForObject(getLimitQuery, LIMIT_MAPPER, id); + } + + @Override + public List getLimits() { + String getLimitsQuery = QUERY_FOR_LIMIT + " GROUP BY " + "limit_record.str_name, " + + "limit_record.pk_limit_record, " + "limit_record.int_max_value"; + return getJdbcTemplate().query(getLimitsQuery, LIMIT_MAPPER); + } + /* - * This should show anything that is making the frame dependent. + * Row Mappers */ - List depends = getJdbcTemplate().query(GET_DEPEND + " WHERE " - + "(pk_job_depend_er=? AND str_type IN ('JOB_ON_JOB','JOB_ON_LAYER','JOB_ON_FRAME')) OR " - + "(pk_layer_depend_er=? AND str_type IN ('LAYER_ON_JOB','LAYER_ON_LAYER','LAYER_ON_FRAME')) " - + "OR (pk_frame_depend_er=?)", DEPEND_MAPPER, frame.getJobId(), frame.getLayerId(), - frame.getFrameId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - } - - @Override - public DependSeq getDepends(JobInterface job) { - List depends = getJdbcTemplate().query( - GET_DEPEND + " WHERE pk_job_depend_er=? AND str_type != 'FRAME_ON_FRAME'", DEPEND_MAPPER, - job.getJobId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - } - - @Override - public Depend getDepend(String id) { - return getJdbcTemplate().queryForObject(GET_DEPEND + " WHERE pk_depend=?", DEPEND_MAPPER, id); - } - - @Override - public Group findGroup(String show, String group) { - return getJdbcTemplate().queryForObject( - GET_GROUPS + " AND show.str_name=? AND folder.str_name=?", GROUP_MAPPER, show, group); - } - - @Override - public Host findHost(String name) { - return getJdbcTemplate().queryForObject(GET_HOST + " AND host.str_name=?", HOST_MAPPER, name); - } - - @Override - public HostSeq getHosts(HostSearchInterface r) { - List hosts = - getJdbcTemplate().query(r.getFilteredQuery(GET_HOST), HOST_MAPPER, r.getValuesArray()); - return HostSeq.newBuilder().addAllHosts(hosts).build(); - } - - @Override - public Host getHost(String id) { - return getJdbcTemplate().queryForObject(GET_HOST + " AND host.pk_host=?", HOST_MAPPER, id); - } - - @Override - public ProcSeq getProcs(HostInterface host) { - ProcSearchInterface r = procSearchFactory.create(); - r.filterByHost(host); - r.sortByHostName(); - r.sortByDispatchedTime(); - return ProcSeq.newBuilder().addAllProcs(getProcs(r).getProcsList()).build(); - } - - @Override - public ProcSeq getProcs(ProcSearchInterface p) { - p.sortByHostName(); - p.sortByDispatchedTime(); - List procs = - getJdbcTemplate().query(p.getFilteredQuery(GET_PROC), PROC_MAPPER, p.getValuesArray()); - return ProcSeq.newBuilder().addAllProcs(procs).build(); - } - - @Override - public CommentSeq getComments(HostInterface h) { - List comments = - getJdbcTemplate().query(GET_HOST_COMMENTS, COMMENT_MAPPER, h.getHostId()); - return CommentSeq.newBuilder().addAllComments(comments).build(); - } - - @Override - public CommentSeq getComments(JobInterface j) { - List comments = - getJdbcTemplate().query(GET_JOB_COMMENTS, COMMENT_MAPPER, j.getJobId()); - return CommentSeq.newBuilder().addAllComments(comments).build(); - } - - @Override - public UpdatedFrameCheckResult getUpdatedFrames(JobInterface job, List layers, - int epochSeconds) { - - long timeDiff = (System.currentTimeMillis() / 1000) - epochSeconds; - if (timeDiff > 60) { - throw new IllegalArgumentException("the last update timestamp cannot be over " - + "a minute off the current time, difference was: " + timeDiff); - } - - UpdatedFrameCheckResult.Builder resultBuilder = UpdatedFrameCheckResult.newBuilder(); - resultBuilder.setState(JobState.valueOf(getJdbcTemplate() - .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job.getJobId()))); - - FrameSearchInterface r = frameSearchFactory.create(job); - r.filterByLayers(layers); - r.filterByChangeDate(epochSeconds); - r.setMaxResults(100); - - List updatedFrameList = getJdbcTemplate() - .query(r.getFilteredQuery(GET_UPDATED_FRAME), UPDATED_FRAME_MAPPER, r.getValuesArray()); - resultBuilder.setUpdatedFrames( - UpdatedFrameSeq.newBuilder().addAllUpdatedFrames(updatedFrameList).build()); - resultBuilder.setServerTime((int) (System.currentTimeMillis() / 1000) - 1); - - return resultBuilder.build(); - } - - @Override - public Department getDepartment(ShowInterface show, String name) { - return getJdbcTemplate().queryForObject(GET_DEPARTMENT, DEPARTMENT_MAPPER, show.getShowId(), - name); - } - - @Override - public DepartmentSeq getDepartments(ShowInterface show) { - List departments = - getJdbcTemplate().query(GET_DEPARTMENTS, DEPARTMENT_MAPPER, show.getShowId()); - return DepartmentSeq.newBuilder().addAllDepartments(departments).build(); - } - - @Override - public List getDepartmentNames() { - return getJdbcTemplate().query("SELECT str_name FROM dept ORDER BY str_name ASC", - new RowMapper() { - public String mapRow(ResultSet rs, int row) throws SQLException { - return rs.getString("str_name"); - } - }); - } - - @Override - public Task getTask(ShowInterface show, DepartmentInterface dept, String shot) { - return getJdbcTemplate().queryForObject( - GET_TASK + " AND point.pk_show=? AND point.pk_dept=? AND task.str_shot=?", TASK_MAPPER, - show.getShowId(), dept.getDepartmentId(), shot); - } - - @Override - public TaskSeq getTasks(ShowInterface show, DepartmentInterface dept) { - if (dept == null) { - return TaskSeq.newBuilder() - .addAllTasks( - getJdbcTemplate().query(GET_TASK + " AND point.pk_show=? ORDER BY task.str_shot", - TASK_MAPPER, show.getShowId())) - .build(); - } else { - return TaskSeq.newBuilder() - .addAllTasks(getJdbcTemplate().query( - GET_TASK + " AND point.pk_show=? AND point.pk_dept=? ORDER BY task.str_shot", - TASK_MAPPER, show.getShowId(), dept.getDepartmentId())) - .build(); - } - } - - @Override - public DeedSeq getDeeds(OwnerEntity owner) { - List deeds = getJdbcTemplate().query(QUERY_FOR_DEED + " AND owner.pk_owner=?", - DEED_MAPPER, owner.getId()); - return DeedSeq.newBuilder().addAllDeeds(deeds).build(); - } - - @Override - public DeedSeq getDeeds(ShowInterface show) { - List deeds = - getJdbcTemplate().query(QUERY_FOR_DEED + " AND show.pk_show=?", DEED_MAPPER, show.getId()); - return DeedSeq.newBuilder().addAllDeeds(deeds).build(); - } - - @Override - public Host getHost(DeedEntity deed) { - return getJdbcTemplate().queryForObject(GET_HOST + " AND host.pk_host=?", HOST_MAPPER, deed.id); - } - - @Override - public Deed getDeed(HostInterface host) { - return getJdbcTemplate().queryForObject(QUERY_FOR_DEED + " AND host.pk_host=?", DEED_MAPPER, - host.getHostId()); - } - - @Override - public HostSeq getHosts(OwnerEntity owner) { - StringBuilder sb = new StringBuilder(4096); - String query = GET_HOST; - query = query.replace("FROM ", "FROM owner, deed,"); - sb.append(query); - sb.append("AND deed.pk_host = host.pk_host "); - sb.append("AND deed.pk_owner = owner.pk_owner "); - sb.append("AND owner.pk_owner = ?"); - - List hosts = getJdbcTemplate().query(sb.toString(), HOST_MAPPER, owner.getId()); - return HostSeq.newBuilder().addAllHosts(hosts).build(); - } - - @Override - public Owner getOwner(DeedEntity deed) { - return getJdbcTemplate().queryForObject(QUERY_FOR_OWNER + " AND " - + "pk_owner = (SELECT deed.pk_owner FROM deed " + "WHERE pk_deed=?)", OWNER_MAPPER, - deed.getId()); - } - - @Override - public Owner getOwner(HostInterface host) { - return getJdbcTemplate().queryForObject(QUERY_FOR_OWNER + " AND " - + "pk_owner = (SELECT deed.pk_owner FROM deed " + "WHERE pk_host=?)", OWNER_MAPPER, - host.getHostId()); - } - - @Override - public List getOwners(ShowInterface show) { - return getJdbcTemplate().query(QUERY_FOR_OWNER + " AND owner.pk_show=?", OWNER_MAPPER, - show.getShowId()); - } - - @Override - public RenderPartition getRenderPartition(LocalHostAssignment l) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_RENDER_PART + "WHERE host_local.pk_host_local = ?", RENDER_PARTION_MAPPER, - l.getId()); - } - - @Override - public RenderPartitionSeq getRenderPartitions(HostInterface host) { - List partitions = - getJdbcTemplate().query(QUERY_FOR_RENDER_PART + "WHERE host_local.pk_host = ?", - RENDER_PARTION_MAPPER, host.getHostId()); - return RenderPartitionSeq.newBuilder().addAllRenderPartitions(partitions).build(); - } - - @Override - public Owner getOwner(String name) { - return getJdbcTemplate().queryForObject(QUERY_FOR_OWNER + " AND " + "(" - + "owner.str_username = ? " + "OR " + "owner.pk_owner = ?" + ")", OWNER_MAPPER, name, name); - } - - @Override - public Facility getFacility(String name) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_FACILITY + " WHERE facility.pk_facility = ? OR facility.str_name = ?", - FACILITY_MAPPER, name, name); - } - - @Override - public FacilitySeq getFacilities() { - return FacilitySeq.newBuilder() - .addAllFacilities(getJdbcTemplate().query(QUERY_FOR_FACILITY, FACILITY_MAPPER)).build(); - } - - @Override - public Limit findLimit(String name) { - String findLimitQuery = QUERY_FOR_LIMIT + " WHERE limit_record.str_name = ? " + "GROUP BY " - + "limit_record.str_name, " + "limit_record.pk_limit_record, " - + "limit_record.int_max_value"; - return getJdbcTemplate().queryForObject(findLimitQuery, LIMIT_MAPPER, name); - } - - @Override - public Limit getLimit(String id) { - String getLimitQuery = QUERY_FOR_LIMIT + " WHERE limit_record.pk_limit_record = ? " - + "GROUP BY " + "limit_record.str_name, " + "limit_record.pk_limit_record, " - + "limit_record.int_max_value"; - return getJdbcTemplate().queryForObject(getLimitQuery, LIMIT_MAPPER, id); - } - - @Override - public List getLimits() { - String getLimitsQuery = QUERY_FOR_LIMIT + " GROUP BY " + "limit_record.str_name, " - + "limit_record.pk_limit_record, " + "limit_record.int_max_value"; - return getJdbcTemplate().query(getLimitsQuery, LIMIT_MAPPER); - } - - /* - * Row Mappers - */ - - public static final RowMapper LIMIT_MAPPER = new RowMapper() { - public Limit mapRow(ResultSet rs, int rowNum) throws SQLException { - return Limit.newBuilder().setId(SqlUtil.getString(rs, "pk_limit_record")) - .setName(SqlUtil.getString(rs, "str_name")).setMaxValue(rs.getInt("int_max_value")) - .setCurrentRunning(rs.getInt("int_current_running")).build(); - } - }; - - public static final RowMapper MATCHER_MAPPER = new RowMapper() { - public Matcher mapRow(ResultSet rs, int rowNum) throws SQLException { - return Matcher.newBuilder().setId(SqlUtil.getString(rs, "pk_matcher")) - .setInput(SqlUtil.getString(rs, "str_value")) - .setSubject(MatchSubject.valueOf(SqlUtil.getString(rs, "str_subject"))) - .setType(MatchType.valueOf(SqlUtil.getString(rs, "str_match"))).build(); - } - }; - - public static final RowMapper FILTER_MAPPER = new RowMapper() { - public Filter mapRow(ResultSet rs, int rowNum) throws SQLException { - return Filter.newBuilder().setId(SqlUtil.getString(rs, "pk_filter")) - .setType(FilterType.valueOf(SqlUtil.getString(rs, "str_type"))) - .setOrder(rs.getFloat("f_order")).setName(SqlUtil.getString(rs, "str_name")) - .setEnabled(rs.getBoolean("b_enabled")).build(); - } - }; - - public static final RowMapper ACTION_MAPPER = new RowMapper() { - public Action mapRow(ResultSet rs, int rowNum) throws SQLException { - Action.Builder builder = Action.newBuilder().setId(SqlUtil.getString(rs, "pk_action")) - .setBooleanValue(false).setIntegerValue(0).setFloatValue(0f).setStringValue("") - .setType(ActionType.valueOf(SqlUtil.getString(rs, "str_action"))) - .setValueType(ActionValueType.valueOf(SqlUtil.getString(rs, "str_value_type"))); - - switch (builder.getValueType()) { - case GROUP_TYPE: - builder.setGroupValue(SqlUtil.getString(rs, "pk_folder")); - break; - case STRING_TYPE: - builder.setStringValue(SqlUtil.getString(rs, "str_value")); - break; - case INTEGER_TYPE: - builder.setIntegerValue(rs.getInt("int_value")); - break; - case FLOAT_TYPE: - builder.setFloatValue(rs.getFloat("float_value")); - break; - case BOOLEAN_TYPE: - builder.setBooleanValue(rs.getBoolean("b_value")); - break; - } - return builder.build(); - } - }; - - public static final RowMapper FACILITY_MAPPER = new RowMapper() { - public Facility mapRow(ResultSet rs, int rowNum) throws SQLException { - return Facility.newBuilder().setName(rs.getString("str_name")) - .setId(rs.getString("pk_facility")).build(); - } - }; - - public static final RowMapper DEED_MAPPER = new RowMapper() { - public Deed mapRow(ResultSet rs, int rowNum) throws SQLException { - return Deed.newBuilder().setId(SqlUtil.getString(rs, "pk_deed")) - .setHost(SqlUtil.getString(rs, "str_host")) - .setOwner(SqlUtil.getString(rs, "str_username")).build(); - } - }; - - public static final RowMapper RENDER_PARTION_MAPPER = - new RowMapper() { - public RenderPartition mapRow(ResultSet rs, int rowNum) throws SQLException { - - RenderPartition.Builder builder = - RenderPartition.newBuilder().setId(SqlUtil.getString(rs, "pk_host_local")) - .setCores(rs.getInt("int_cores_max") - rs.getInt("int_cores_idle")) - .setMaxCores(rs.getInt("int_cores_max")).setThreads(rs.getInt("int_threads")) - .setMaxMemory(rs.getLong("int_mem_max")) - .setMemory(rs.getLong("int_mem_max") - rs.getLong("int_mem_idle")) - .setGpus(rs.getInt("int_gpus_max") - rs.getInt("int_gpus_idle")) - .setMaxGpus(rs.getInt("int_gpus_max")) - .setGpuMemory(rs.getLong("int_gpu_mem_max") - rs.getLong("int_gpu_mem_idle")) - .setMaxGpuMemory(rs.getLong("int_gpu_mem_max")) - .setHost(SqlUtil.getString(rs, "str_host_name")) - .setJob(SqlUtil.getString(rs, "str_job_name")) - .setRenderPartType(RenderPartitionType.valueOf(SqlUtil.getString(rs, "str_type"))) - .setLayer("").setFrame(""); - - if (SqlUtil.getString(rs, "str_layer_name") != null) { - builder.setLayer(SqlUtil.getString(rs, "str_layer_name")); - } - - if (SqlUtil.getString(rs, "str_frame_name") != null) { - builder.setFrame(SqlUtil.getString(rs, "str_frame_name")); - } - - return builder.build(); + public static final RowMapper LIMIT_MAPPER = new RowMapper() { + public Limit mapRow(ResultSet rs, int rowNum) throws SQLException { + return Limit.newBuilder().setId(SqlUtil.getString(rs, "pk_limit_record")) + .setName(SqlUtil.getString(rs, "str_name")) + .setMaxValue(rs.getInt("int_max_value")) + .setCurrentRunning(rs.getInt("int_current_running")).build(); + } + }; + + public static final RowMapper MATCHER_MAPPER = new RowMapper() { + public Matcher mapRow(ResultSet rs, int rowNum) throws SQLException { + return Matcher.newBuilder().setId(SqlUtil.getString(rs, "pk_matcher")) + .setInput(SqlUtil.getString(rs, "str_value")) + .setSubject(MatchSubject.valueOf(SqlUtil.getString(rs, "str_subject"))) + .setType(MatchType.valueOf(SqlUtil.getString(rs, "str_match"))).build(); + } + }; + + public static final RowMapper FILTER_MAPPER = new RowMapper() { + public Filter mapRow(ResultSet rs, int rowNum) throws SQLException { + return Filter.newBuilder().setId(SqlUtil.getString(rs, "pk_filter")) + .setType(FilterType.valueOf(SqlUtil.getString(rs, "str_type"))) + .setOrder(rs.getFloat("f_order")).setName(SqlUtil.getString(rs, "str_name")) + .setEnabled(rs.getBoolean("b_enabled")).build(); + } + }; + + public static final RowMapper ACTION_MAPPER = new RowMapper() { + public Action mapRow(ResultSet rs, int rowNum) throws SQLException { + Action.Builder builder = Action.newBuilder().setId(SqlUtil.getString(rs, "pk_action")) + .setBooleanValue(false).setIntegerValue(0).setFloatValue(0f).setStringValue("") + .setType(ActionType.valueOf(SqlUtil.getString(rs, "str_action"))) + .setValueType(ActionValueType.valueOf(SqlUtil.getString(rs, "str_value_type"))); + + switch (builder.getValueType()) { + case GROUP_TYPE: + builder.setGroupValue(SqlUtil.getString(rs, "pk_folder")); + break; + case STRING_TYPE: + builder.setStringValue(SqlUtil.getString(rs, "str_value")); + break; + case INTEGER_TYPE: + builder.setIntegerValue(rs.getInt("int_value")); + break; + case FLOAT_TYPE: + builder.setFloatValue(rs.getFloat("float_value")); + break; + case BOOLEAN_TYPE: + builder.setBooleanValue(rs.getBoolean("b_value")); + break; + } + return builder.build(); + } + }; + + public static final RowMapper FACILITY_MAPPER = new RowMapper() { + public Facility mapRow(ResultSet rs, int rowNum) throws SQLException { + return Facility.newBuilder().setName(rs.getString("str_name")) + .setId(rs.getString("pk_facility")).build(); + } + }; + + public static final RowMapper DEED_MAPPER = new RowMapper() { + public Deed mapRow(ResultSet rs, int rowNum) throws SQLException { + return Deed.newBuilder().setId(SqlUtil.getString(rs, "pk_deed")) + .setHost(SqlUtil.getString(rs, "str_host")) + .setOwner(SqlUtil.getString(rs, "str_username")).build(); + } + }; + + public static final RowMapper RENDER_PARTION_MAPPER = + new RowMapper() { + public RenderPartition mapRow(ResultSet rs, int rowNum) throws SQLException { + + RenderPartition.Builder builder = RenderPartition.newBuilder() + .setId(SqlUtil.getString(rs, "pk_host_local")) + .setCores(rs.getInt("int_cores_max") - rs.getInt("int_cores_idle")) + .setMaxCores(rs.getInt("int_cores_max")) + .setThreads(rs.getInt("int_threads")) + .setMaxMemory(rs.getLong("int_mem_max")) + .setMemory(rs.getLong("int_mem_max") - rs.getLong("int_mem_idle")) + .setGpus(rs.getInt("int_gpus_max") - rs.getInt("int_gpus_idle")) + .setMaxGpus(rs.getInt("int_gpus_max")) + .setGpuMemory( + rs.getLong("int_gpu_mem_max") - rs.getLong("int_gpu_mem_idle")) + .setMaxGpuMemory(rs.getLong("int_gpu_mem_max")) + .setHost(SqlUtil.getString(rs, "str_host_name")) + .setJob(SqlUtil.getString(rs, "str_job_name")) + .setRenderPartType( + RenderPartitionType.valueOf(SqlUtil.getString(rs, "str_type"))) + .setLayer("").setFrame(""); + + if (SqlUtil.getString(rs, "str_layer_name") != null) { + builder.setLayer(SqlUtil.getString(rs, "str_layer_name")); + } + + if (SqlUtil.getString(rs, "str_frame_name") != null) { + builder.setFrame(SqlUtil.getString(rs, "str_frame_name")); + } + + return builder.build(); + + } + }; + + public static final RowMapper OWNER_MAPPER = new RowMapper() { + public Owner mapRow(ResultSet rs, int rowNum) throws SQLException { + return Owner.newBuilder().setName(SqlUtil.getString(rs, "str_username")) + .setId(SqlUtil.getString(rs, "pk_owner")) + .setShow(SqlUtil.getString(rs, "str_show")) + .setHostCount(rs.getInt("host_count")).build(); + } + }; + + public static final RowMapper DEPARTMENT_MAPPER = new RowMapper() { + public Department mapRow(ResultSet rs, int row) throws SQLException { + return Department.newBuilder().setId(SqlUtil.getString(rs, "pk_point")) + .setName(SqlUtil.getString(rs, "str_name")) + .setDept(SqlUtil.getString(rs, "str_dept")) + .setTiManaged(rs.getBoolean("b_managed")) + .setTiTask(SqlUtil.getString(rs, "str_ti_task")) + .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))).build(); + } + }; + + public static final RowMapper PROC_MAPPER = new RowMapper() { + public Proc mapRow(ResultSet rs, int row) throws SQLException { + return Proc.newBuilder().setId(SqlUtil.getString(rs, "pk_proc")) + .setName(CueUtil.buildProcName(SqlUtil.getString(rs, "host_name"), + rs.getInt("int_cores_reserved"), rs.getInt("int_gpus_reserved"))) + .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores_reserved"))) + .setReservedMemory(rs.getLong("int_mem_reserved")) + .setReservedGpus(rs.getInt("int_gpus_reserved")) + .setReservedGpuMemory(rs.getLong("int_gpu_mem_reserved")) + .setUsedMemory(rs.getLong("int_mem_used")) + .setUsedGpuMemory(rs.getLong("int_gpu_mem_used")) + .setFrameName(SqlUtil.getString(rs, "frame_name")) + .setJobName(SqlUtil.getString(rs, "job_name")) + .setGroupName(SqlUtil.getString(rs, "folder_name")) + .setShowName(SqlUtil.getString(rs, "show_name")) + .setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)) + .setBookedTime((int) (rs.getTimestamp("ts_booked").getTime() / 1000)) + .setDispatchTime((int) (rs.getTimestamp("ts_dispatched").getTime() / 1000)) + .setUnbooked(rs.getBoolean("b_unbooked")) + .setLogPath(String.format("%s/%s.%s.rqlog", + SqlUtil.getString(rs, "str_log_dir"), SqlUtil.getString(rs, "job_name"), + SqlUtil.getString(rs, "frame_name"))) + .setRedirectTarget(SqlUtil.getString(rs, "str_redirect")) + .setChildProcesses(SqlUtil.getByteString(rs, "bytea_children")) + .addAllServices(Arrays.asList(SqlUtil.getString(rs, "str_services").split(","))) + .build(); + } + }; + + public static final RowMapper TASK_MAPPER = new RowMapper() { + public Task mapRow(ResultSet rs, int row) throws SQLException { + return Task.newBuilder().setId(SqlUtil.getString(rs, "pk_task")) + .setDept(SqlUtil.getString(rs, "str_dept")) + .setShot(SqlUtil.getString(rs, "str_shot")) + .setMinCores(Convert.coreUnitsToWholeCores(rs.getInt("int_min_cores"))) + .setAdjustCores(Convert.coreUnitsToWholeCores(rs.getInt("int_adjust_cores"))) + .build(); + } + }; + + public static final RowMapper COMMENT_MAPPER = new RowMapper() { + + public Comment mapRow(ResultSet rs, int row) throws SQLException { + return Comment.newBuilder().setId(SqlUtil.getString(rs, "pk_comment")) + .setMessage(SqlUtil.getString(rs, "str_message")) + .setSubject(SqlUtil.getString(rs, "str_subject")) + .setTimestamp((int) (rs.getTimestamp("ts_created").getTime() / 1000)) + .setUser(SqlUtil.getString(rs, "str_user")).build(); + } + }; + + public static NestedHost.Builder mapNestedHostBuilder(ResultSet rs) throws SQLException { + NestedHost.Builder builder = NestedHost.newBuilder().setId(SqlUtil.getString(rs, "pk_host")) + .setName(SqlUtil.getString(rs, "host_name")) + .setAllocName(SqlUtil.getString(rs, "alloc_name")) + .setBootTime((int) (rs.getTimestamp("ts_booted").getTime() / 1000)) + .setFreeMcp(rs.getLong("int_mcp_free")).setFreeMemory(rs.getLong("int_mem_free")) + .setFreeSwap(rs.getLong("int_swap_free")) + .setFreeGpuMemory(rs.getLong("int_gpu_mem_free")).setLoad(rs.getInt("int_load")) + .setNimbyEnabled(rs.getBoolean("b_nimby")) + .setCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) + .setIdleCores(Convert.coreUnitsToCores(rs.getInt("int_cores_idle"))) + .setMemory(rs.getLong("int_mem")).setIdleMemory(rs.getLong("int_mem_idle")) + .setGpus(rs.getInt("int_gpus")).setIdleGpus(rs.getInt("int_gpus_idle")) + .setGpuMemory(rs.getLong("int_gpu_mem")) + .setIdleGpuMemory(rs.getLong("int_gpu_mem_idle")) + .setState(HardwareState.valueOf(SqlUtil.getString(rs, "host_state"))) + .setTotalMcp(rs.getLong("int_mcp_total")) + .setTotalMemory(rs.getLong("int_mem_total")) + .setTotalSwap(rs.getLong("int_swap_total")) + .setTotalGpuMemory(rs.getLong("int_gpu_mem_total")) + .setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)) + .setLockState(LockState.valueOf(SqlUtil.getString(rs, "str_lock_state"))) + .setHasComment(rs.getBoolean("b_comment")) + .setThreadMode(ThreadMode.values()[rs.getInt("int_thread_mode")]) + .setOs(SqlUtil.getString(rs, "str_os")); + + String tags = SqlUtil.getString(rs, "str_tags"); + if (tags != null) + builder.addAllTags(Arrays.asList(tags.split(" "))); + return builder; + } + + public static Host.Builder mapHostBuilder(ResultSet rs) throws SQLException { + Host.Builder builder = Host.newBuilder(); + builder.setId(SqlUtil.getString(rs, "pk_host")); + builder.setName(SqlUtil.getString(rs, "host_name")); + builder.setAllocName(SqlUtil.getString(rs, "alloc_name")); + builder.setBootTime((int) (rs.getTimestamp("ts_booted").getTime() / 1000)); + builder.setFreeMcp(rs.getLong("int_mcp_free")); + builder.setFreeMemory(rs.getLong("int_mem_free")); + builder.setFreeSwap(rs.getLong("int_swap_free")); + builder.setFreeGpuMemory(rs.getLong("int_gpu_mem_free")); + builder.setLoad(rs.getInt("int_load")); + builder.setNimbyEnabled(rs.getBoolean("b_nimby")); + builder.setCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))); + builder.setIdleCores(Convert.coreUnitsToCores(rs.getInt("int_cores_idle"))); + builder.setMemory(rs.getLong("int_mem")); + builder.setIdleMemory(rs.getLong("int_mem_idle")); + builder.setGpus(rs.getInt("int_gpus")); + builder.setIdleGpus(rs.getInt("int_gpus_idle")); + builder.setGpuMemory(rs.getLong("int_gpu_mem")); + builder.setIdleGpuMemory(rs.getLong("int_gpu_mem_idle")); + builder.setState(HardwareState.valueOf(SqlUtil.getString(rs, "host_state"))); + builder.setTotalMcp(rs.getLong("int_mcp_total")); + builder.setTotalMemory(rs.getLong("int_mem_total")); + builder.setTotalSwap(rs.getLong("int_swap_total")); + builder.setTotalGpuMemory(rs.getLong("int_gpu_mem_total")); + builder.setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)); + builder.setLockState(LockState.valueOf(SqlUtil.getString(rs, "str_lock_state"))); + builder.setHasComment(rs.getBoolean("b_comment")); + builder.setThreadMode(ThreadMode.values()[rs.getInt("int_thread_mode")]); + builder.setOs(SqlUtil.getString(rs, "str_os")); + + String tags = SqlUtil.getString(rs, "str_tags"); + if (tags != null) + builder.addAllTags(Arrays.asList(tags.split(" "))); + return builder; + } + + public static final RowMapper HOST_MAPPER = new RowMapper() { + public Host mapRow(ResultSet rs, int row) throws SQLException { + Host.Builder builder = mapHostBuilder(rs); + return builder.build(); + } + }; + + public static final RowMapper DEPEND_MAPPER = new RowMapper() { + public Depend mapRow(ResultSet rs, int rowNum) throws SQLException { + return Depend.newBuilder().setId(SqlUtil.getString(rs, "pk_depend")) + .setActive(rs.getBoolean("b_active")).setAnyFrame(rs.getBoolean("b_any")) + .setDependErFrame(SqlUtil.getString(rs, "depend_er_frame")) + .setDependErLayer(SqlUtil.getString(rs, "depend_er_layer")) + .setDependErJob(SqlUtil.getString(rs, "depend_er_job")) + .setDependOnFrame(SqlUtil.getString(rs, "depend_on_frame")) + .setDependOnLayer(SqlUtil.getString(rs, "depend_on_layer")) + .setDependOnJob(SqlUtil.getString(rs, "depend_on_job")) + .setType(DependType.valueOf(SqlUtil.getString(rs, "str_type"))) + .setTarget(DependTarget.valueOf(SqlUtil.getString(rs, "str_target"))).build(); + } + }; + + public static final RowMapper ALLOCATION_MAPPER = new RowMapper() { + public Allocation mapRow(ResultSet rs, int rowNum) throws SQLException { + return Allocation.newBuilder().setId(rs.getString("pk_alloc")) + .setName(rs.getString("str_name")).setFacility(rs.getString("facility_name")) + .setTag(rs.getString("str_tag")).setBillable(rs.getBoolean("b_billable")) + .setStats(AllocationStats.newBuilder() + .setCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) + .setAvailableCores( + Convert.coreUnitsToCores(rs.getInt("int_available_cores"))) + .setIdleCores(Convert.coreUnitsToCores(rs.getInt("int_idle_cores"))) + .setRunningCores( + Convert.coreUnitsToCores(rs.getInt("int_running_cores"))) + .setLockedCores(Convert.coreUnitsToCores(rs.getInt("int_locked_cores"))) + .setGpus(rs.getInt("int_gpus")) + .setAvailableGpus(rs.getInt("int_available_gpus")) + .setIdleGpus(rs.getInt("int_idle_gpus")) + .setRunningGpus(rs.getInt("int_running_gpus")) + .setLockedGpus(rs.getInt("int_locked_gpus")) + .setHosts(rs.getInt("int_hosts")) + .setDownHosts(rs.getInt("int_down_hosts")) + .setLockedHosts(rs.getInt("int_locked_hosts")).build()) + .build(); + } + }; + + private static final RowMapper GROUP_MAPPER = new RowMapper() { + + public Group mapRow(ResultSet rs, int rowNum) throws SQLException { + GroupStats stats = GroupStats.newBuilder().setDeadFrames(rs.getInt("int_dead_count")) + .setRunningFrames(rs.getInt("int_running_count")) + .setWaitingFrames(rs.getInt("int_waiting_count")) + .setDependFrames(rs.getInt("int_depend_count")) + .setPendingJobs(rs.getInt("int_job_count")) + .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) + .setReservedGpus(rs.getInt("int_gpus")).build(); + return Group.newBuilder().setId(SqlUtil.getString(rs, "pk_folder")) + .setName(SqlUtil.getString(rs, "group_name")) + .setDepartment(SqlUtil.getString(rs, "str_dept")) + .setDefaultJobPriority(rs.getInt("int_job_priority")) + .setDefaultJobMinCores(Convert.coreUnitsToCores(rs.getInt("int_job_min_cores"))) + .setDefaultJobMaxCores(Convert.coreUnitsToCores(rs.getInt("int_job_max_cores"))) + .setDefaultJobMinGpus(rs.getInt("int_job_min_gpus")) + .setDefaultJobMaxGpus(rs.getInt("int_job_max_gpus")) + .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_max_cores"))) + .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) + .setMaxGpus(rs.getInt("int_max_gpus")).setMinGpus(rs.getInt("int_min_gpus")) + .setLevel(rs.getInt("int_level")) + .setParentId(SqlUtil.getString(rs, "pk_parent_folder")).setGroupStats(stats) + .build(); + } + }; + + public static final RowMapper JOB_MAPPER = new RowMapper() { + public Job mapRow(ResultSet rs, int rowNum) throws SQLException { + Job.Builder jobBuilder = Job.newBuilder().setId(SqlUtil.getString(rs, "pk_job")) + .setLogDir(SqlUtil.getString(rs, "str_log_dir")) + .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_max_cores"))) + .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) + .setMaxGpus(rs.getInt("int_max_gpus")).setMinGpus(rs.getInt("int_min_gpus")) + .setName(SqlUtil.getString(rs, "str_name")) + .setPriority(rs.getInt("int_priority")) + .setShot(SqlUtil.getString(rs, "str_shot")) + .setShow(SqlUtil.getString(rs, "str_show")) + .setFacility(SqlUtil.getString(rs, "facility_name")) + .setGroup(SqlUtil.getString(rs, "group_name")) + .setState(JobState.valueOf(SqlUtil.getString(rs, "str_state"))) + .setUser(SqlUtil.getString(rs, "str_user")) + .setIsPaused(rs.getBoolean("b_paused")) + .setHasComment(rs.getBoolean("b_comment")) + .setAutoEat(rs.getBoolean("b_autoeat")) + .setStartTime((int) (rs.getTimestamp("ts_started").getTime() / 1000)) + .setOs(SqlUtil.getString(rs, "str_os")); + + int uid = rs.getInt("int_uid"); + if (!rs.wasNull()) { + jobBuilder.setUid(uid); + } + + Timestamp ts = rs.getTimestamp("ts_stopped"); + if (ts != null) { + jobBuilder.setStopTime((int) (ts.getTime() / 1000)); + } else { + jobBuilder.setStopTime(0); + } + + jobBuilder.setJobStats(mapJobStats(rs)); + return jobBuilder.build(); + } + }; + + public static JobStats mapJobStats(ResultSet rs) throws SQLException { + + JobStats.Builder statsBuilder = JobStats.newBuilder() + .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) + .setReservedGpus(rs.getInt("int_gpus")).setMaxRss(rs.getLong("int_max_rss")) + .setTotalFrames(rs.getInt("int_frame_count")) + .setTotalLayers(rs.getInt("int_layer_count")) + .setWaitingFrames(rs.getInt("int_waiting_count")) + .setRunningFrames(rs.getInt("int_running_count")) + .setDeadFrames(rs.getInt("int_dead_count")) + .setSucceededFrames(rs.getInt("int_succeeded_count")) + .setEatenFrames(rs.getInt("int_eaten_count")) + .setDependFrames(rs.getInt("int_depend_count")) + .setPendingFrames(rs.getInt("int_waiting_count") + rs.getInt("int_depend_count")) + .setFailedCoreSec(rs.getLong("int_core_time_fail")) + .setRenderedCoreSec(rs.getLong("int_core_time_success")) + .setTotalCoreSec( + rs.getLong("int_core_time_fail") + rs.getLong("int_core_time_success")) + .setFailedGpuSec(rs.getLong("int_gpu_time_fail")) + .setRenderedGpuSec(rs.getLong("int_gpu_time_success")) + .setTotalGpuSec( + rs.getLong("int_gpu_time_fail") + rs.getLong("int_gpu_time_success")) + .setRenderedFrameCount(rs.getLong("int_frame_success_count")) + .setFailedFrameCount(rs.getLong("int_frame_fail_count")) + .setHighFrameSec(rs.getInt("int_clock_time_high")); + + if (statsBuilder.getRenderedFrameCount() > 0) { + statsBuilder.setAvgCoreSec((int) (rs.getLong("int_clock_time_success") + / statsBuilder.getRenderedFrameCount())); + statsBuilder.setAvgCoreSec((int) (statsBuilder.getRenderedCoreSec() + / statsBuilder.getRenderedFrameCount())); + statsBuilder.setRemainingCoreSec( + (long) statsBuilder.getPendingFrames() * statsBuilder.getAvgCoreSec()); + } else { + statsBuilder.setAvgFrameSec(0); + statsBuilder.setAvgCoreSec(0); + statsBuilder.setRemainingCoreSec(0); + } + return statsBuilder.build(); + } + + public static final RowMapper LAYER_MAPPER = new RowMapper() { + public Layer mapRow(ResultSet rs, int rowNum) throws SQLException { + Layer.Builder builder = Layer.newBuilder().setId(SqlUtil.getString(rs, "pk_layer")) + .setParentId(SqlUtil.getString(rs, "pk_job")) + .setChunkSize(rs.getInt("int_chunk_size")) + .setDispatchOrder(rs.getInt("int_dispatch_order")) + .setName(SqlUtil.getString(rs, "str_name")) + .setCommand(SqlUtil.getString(rs, "str_cmd")) + .setRange(SqlUtil.getString(rs, "str_range")) + .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_cores_min"))) + .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_cores_max"))) + .setIsThreadable(rs.getBoolean("b_threadable")) + .setMinMemory(rs.getLong("int_mem_min")).setMinGpus(rs.getInt("int_gpus_min")) + .setMaxGpus(rs.getInt("int_gpus_max")) + .setMinGpuMemory(rs.getLong("int_gpu_mem_min")) + .setType(LayerType.valueOf(SqlUtil.getString(rs, "str_type"))) + .addAllTags(Sets.newHashSet( + SqlUtil.getString(rs, "str_tags").replaceAll(" ", "").split("\\|"))) + .addAllServices(Arrays.asList(SqlUtil.getString(rs, "str_services").split(","))) + .addAllLimits( + Arrays.asList(SqlUtil.getString(rs, "str_limit_names").split(","))) + .setMemoryOptimizerEnabled(rs.getBoolean("b_optimize")) + .setTimeout(rs.getInt("int_timeout")) + .setTimeoutLlu(rs.getInt("int_timeout_llu")); + + LayerStats.Builder statsBuilder = LayerStats.newBuilder() + .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) + .setReservedGpus(rs.getInt("int_gpus")).setMaxRss(rs.getLong("int_max_rss")) + .setTotalFrames(rs.getInt("int_total_count")) + .setWaitingFrames(rs.getInt("int_waiting_count")) + .setRunningFrames(rs.getInt("int_running_count")) + .setDeadFrames(rs.getInt("int_dead_count")) + .setSucceededFrames(rs.getInt("int_succeeded_count")) + .setEatenFrames(rs.getInt("int_eaten_count")) + .setDependFrames(rs.getInt("int_depend_count")) + .setPendingFrames( + rs.getInt("int_waiting_count") + rs.getInt("int_depend_count")) + .setFailedCoreSec(rs.getLong("int_core_time_fail")) + .setRenderedCoreSec(rs.getLong("int_core_time_success")) + .setTotalCoreSec( + rs.getLong("int_core_time_fail") + rs.getLong("int_core_time_success")) + .setFailedGpuSec(rs.getLong("int_gpu_time_fail")) + .setRenderedGpuSec(rs.getLong("int_gpu_time_success")) + .setTotalGpuSec( + rs.getLong("int_gpu_time_fail") + rs.getLong("int_gpu_time_success")) + .setRenderedFrameCount(rs.getLong("int_frame_success_count")) + .setFailedFrameCount(rs.getLong("int_frame_fail_count")) + .setHighFrameSec(rs.getInt("int_clock_time_high")) + .setLowFrameSec(rs.getInt("int_clock_time_low")); + + if (statsBuilder.getRenderedFrameCount() > 0) { + statsBuilder.setAvgFrameSec((int) (rs.getLong("int_clock_time_success") + / statsBuilder.getRenderedFrameCount())); + statsBuilder.setAvgCoreSec((int) (statsBuilder.getRenderedCoreSec() + / statsBuilder.getRenderedFrameCount())); + statsBuilder.setRemainingCoreSec( + (long) statsBuilder.getPendingFrames() * statsBuilder.getAvgCoreSec()); + } else { + statsBuilder.setAvgFrameSec(0); + statsBuilder.setAvgCoreSec(0); + statsBuilder.setRemainingCoreSec(0); + } + builder.setLayerStats(statsBuilder.build()); + return builder.build(); } - }; - - public static final RowMapper OWNER_MAPPER = new RowMapper() { - public Owner mapRow(ResultSet rs, int rowNum) throws SQLException { - return Owner.newBuilder().setName(SqlUtil.getString(rs, "str_username")) - .setId(SqlUtil.getString(rs, "pk_owner")).setShow(SqlUtil.getString(rs, "str_show")) - .setHostCount(rs.getInt("host_count")).build(); - } - }; - - public static final RowMapper DEPARTMENT_MAPPER = new RowMapper() { - public Department mapRow(ResultSet rs, int row) throws SQLException { - return Department.newBuilder().setId(SqlUtil.getString(rs, "pk_point")) - .setName(SqlUtil.getString(rs, "str_name")).setDept(SqlUtil.getString(rs, "str_dept")) - .setTiManaged(rs.getBoolean("b_managed")).setTiTask(SqlUtil.getString(rs, "str_ti_task")) - .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))).build(); - } - }; - - public static final RowMapper PROC_MAPPER = new RowMapper() { - public Proc mapRow(ResultSet rs, int row) throws SQLException { - return Proc.newBuilder().setId(SqlUtil.getString(rs, "pk_proc")) - .setName(CueUtil.buildProcName(SqlUtil.getString(rs, "host_name"), - rs.getInt("int_cores_reserved"), rs.getInt("int_gpus_reserved"))) - .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores_reserved"))) - .setReservedMemory(rs.getLong("int_mem_reserved")) - .setReservedGpus(rs.getInt("int_gpus_reserved")) - .setReservedGpuMemory(rs.getLong("int_gpu_mem_reserved")) - .setUsedMemory(rs.getLong("int_mem_used")) - .setUsedGpuMemory(rs.getLong("int_gpu_mem_used")) - .setFrameName(SqlUtil.getString(rs, "frame_name")) - .setJobName(SqlUtil.getString(rs, "job_name")) - .setGroupName(SqlUtil.getString(rs, "folder_name")) - .setShowName(SqlUtil.getString(rs, "show_name")) - .setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)) - .setBookedTime((int) (rs.getTimestamp("ts_booked").getTime() / 1000)) - .setDispatchTime((int) (rs.getTimestamp("ts_dispatched").getTime() / 1000)) - .setUnbooked(rs.getBoolean("b_unbooked")) - .setLogPath(String.format("%s/%s.%s.rqlog", SqlUtil.getString(rs, "str_log_dir"), - SqlUtil.getString(rs, "job_name"), SqlUtil.getString(rs, "frame_name"))) - .setRedirectTarget(SqlUtil.getString(rs, "str_redirect")) - .setChildProcesses(SqlUtil.getByteString(rs, "bytea_children")) - .addAllServices(Arrays.asList(SqlUtil.getString(rs, "str_services").split(","))).build(); - } - }; - - public static final RowMapper TASK_MAPPER = new RowMapper() { - public Task mapRow(ResultSet rs, int row) throws SQLException { - return Task.newBuilder().setId(SqlUtil.getString(rs, "pk_task")) - .setDept(SqlUtil.getString(rs, "str_dept")).setShot(SqlUtil.getString(rs, "str_shot")) - .setMinCores(Convert.coreUnitsToWholeCores(rs.getInt("int_min_cores"))) - .setAdjustCores(Convert.coreUnitsToWholeCores(rs.getInt("int_adjust_cores"))).build(); - } - }; - - public static final RowMapper COMMENT_MAPPER = new RowMapper() { - - public Comment mapRow(ResultSet rs, int row) throws SQLException { - return Comment.newBuilder().setId(SqlUtil.getString(rs, "pk_comment")) - .setMessage(SqlUtil.getString(rs, "str_message")) - .setSubject(SqlUtil.getString(rs, "str_subject")) - .setTimestamp((int) (rs.getTimestamp("ts_created").getTime() / 1000)) - .setUser(SqlUtil.getString(rs, "str_user")).build(); - } - }; - - public static NestedHost.Builder mapNestedHostBuilder(ResultSet rs) throws SQLException { - NestedHost.Builder builder = NestedHost.newBuilder().setId(SqlUtil.getString(rs, "pk_host")) - .setName(SqlUtil.getString(rs, "host_name")) - .setAllocName(SqlUtil.getString(rs, "alloc_name")) - .setBootTime((int) (rs.getTimestamp("ts_booted").getTime() / 1000)) - .setFreeMcp(rs.getLong("int_mcp_free")).setFreeMemory(rs.getLong("int_mem_free")) - .setFreeSwap(rs.getLong("int_swap_free")).setFreeGpuMemory(rs.getLong("int_gpu_mem_free")) - .setLoad(rs.getInt("int_load")).setNimbyEnabled(rs.getBoolean("b_nimby")) - .setCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) - .setIdleCores(Convert.coreUnitsToCores(rs.getInt("int_cores_idle"))) - .setMemory(rs.getLong("int_mem")).setIdleMemory(rs.getLong("int_mem_idle")) - .setGpus(rs.getInt("int_gpus")).setIdleGpus(rs.getInt("int_gpus_idle")) - .setGpuMemory(rs.getLong("int_gpu_mem")).setIdleGpuMemory(rs.getLong("int_gpu_mem_idle")) - .setState(HardwareState.valueOf(SqlUtil.getString(rs, "host_state"))) - .setTotalMcp(rs.getLong("int_mcp_total")).setTotalMemory(rs.getLong("int_mem_total")) - .setTotalSwap(rs.getLong("int_swap_total")) - .setTotalGpuMemory(rs.getLong("int_gpu_mem_total")) - .setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)) - .setLockState(LockState.valueOf(SqlUtil.getString(rs, "str_lock_state"))) - .setHasComment(rs.getBoolean("b_comment")) - .setThreadMode(ThreadMode.values()[rs.getInt("int_thread_mode")]) - .setOs(SqlUtil.getString(rs, "str_os")); - - String tags = SqlUtil.getString(rs, "str_tags"); - if (tags != null) - builder.addAllTags(Arrays.asList(tags.split(" "))); - return builder; - } - - public static Host.Builder mapHostBuilder(ResultSet rs) throws SQLException { - Host.Builder builder = Host.newBuilder(); - builder.setId(SqlUtil.getString(rs, "pk_host")); - builder.setName(SqlUtil.getString(rs, "host_name")); - builder.setAllocName(SqlUtil.getString(rs, "alloc_name")); - builder.setBootTime((int) (rs.getTimestamp("ts_booted").getTime() / 1000)); - builder.setFreeMcp(rs.getLong("int_mcp_free")); - builder.setFreeMemory(rs.getLong("int_mem_free")); - builder.setFreeSwap(rs.getLong("int_swap_free")); - builder.setFreeGpuMemory(rs.getLong("int_gpu_mem_free")); - builder.setLoad(rs.getInt("int_load")); - builder.setNimbyEnabled(rs.getBoolean("b_nimby")); - builder.setCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))); - builder.setIdleCores(Convert.coreUnitsToCores(rs.getInt("int_cores_idle"))); - builder.setMemory(rs.getLong("int_mem")); - builder.setIdleMemory(rs.getLong("int_mem_idle")); - builder.setGpus(rs.getInt("int_gpus")); - builder.setIdleGpus(rs.getInt("int_gpus_idle")); - builder.setGpuMemory(rs.getLong("int_gpu_mem")); - builder.setIdleGpuMemory(rs.getLong("int_gpu_mem_idle")); - builder.setState(HardwareState.valueOf(SqlUtil.getString(rs, "host_state"))); - builder.setTotalMcp(rs.getLong("int_mcp_total")); - builder.setTotalMemory(rs.getLong("int_mem_total")); - builder.setTotalSwap(rs.getLong("int_swap_total")); - builder.setTotalGpuMemory(rs.getLong("int_gpu_mem_total")); - builder.setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)); - builder.setLockState(LockState.valueOf(SqlUtil.getString(rs, "str_lock_state"))); - builder.setHasComment(rs.getBoolean("b_comment")); - builder.setThreadMode(ThreadMode.values()[rs.getInt("int_thread_mode")]); - builder.setOs(SqlUtil.getString(rs, "str_os")); - - String tags = SqlUtil.getString(rs, "str_tags"); - if (tags != null) - builder.addAllTags(Arrays.asList(tags.split(" "))); - return builder; - } - - public static final RowMapper HOST_MAPPER = new RowMapper() { - public Host mapRow(ResultSet rs, int row) throws SQLException { - Host.Builder builder = mapHostBuilder(rs); - return builder.build(); - } - }; - - public static final RowMapper DEPEND_MAPPER = new RowMapper() { - public Depend mapRow(ResultSet rs, int rowNum) throws SQLException { - return Depend.newBuilder().setId(SqlUtil.getString(rs, "pk_depend")) - .setActive(rs.getBoolean("b_active")).setAnyFrame(rs.getBoolean("b_any")) - .setDependErFrame(SqlUtil.getString(rs, "depend_er_frame")) - .setDependErLayer(SqlUtil.getString(rs, "depend_er_layer")) - .setDependErJob(SqlUtil.getString(rs, "depend_er_job")) - .setDependOnFrame(SqlUtil.getString(rs, "depend_on_frame")) - .setDependOnLayer(SqlUtil.getString(rs, "depend_on_layer")) - .setDependOnJob(SqlUtil.getString(rs, "depend_on_job")) - .setType(DependType.valueOf(SqlUtil.getString(rs, "str_type"))) - .setTarget(DependTarget.valueOf(SqlUtil.getString(rs, "str_target"))).build(); - } - }; - - public static final RowMapper ALLOCATION_MAPPER = new RowMapper() { - public Allocation mapRow(ResultSet rs, int rowNum) throws SQLException { - return Allocation.newBuilder().setId(rs.getString("pk_alloc")) - .setName(rs.getString("str_name")).setFacility(rs.getString("facility_name")) - .setTag(rs.getString("str_tag")).setBillable(rs.getBoolean("b_billable")) - .setStats(AllocationStats.newBuilder() - .setCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) - .setAvailableCores(Convert.coreUnitsToCores(rs.getInt("int_available_cores"))) - .setIdleCores(Convert.coreUnitsToCores(rs.getInt("int_idle_cores"))) - .setRunningCores(Convert.coreUnitsToCores(rs.getInt("int_running_cores"))) - .setLockedCores(Convert.coreUnitsToCores(rs.getInt("int_locked_cores"))) - .setGpus(rs.getInt("int_gpus")).setAvailableGpus(rs.getInt("int_available_gpus")) - .setIdleGpus(rs.getInt("int_idle_gpus")).setRunningGpus(rs.getInt("int_running_gpus")) - .setLockedGpus(rs.getInt("int_locked_gpus")).setHosts(rs.getInt("int_hosts")) - .setDownHosts(rs.getInt("int_down_hosts")) - .setLockedHosts(rs.getInt("int_locked_hosts")).build()) - .build(); - } - }; - - private static final RowMapper GROUP_MAPPER = new RowMapper() { - - public Group mapRow(ResultSet rs, int rowNum) throws SQLException { - GroupStats stats = GroupStats.newBuilder().setDeadFrames(rs.getInt("int_dead_count")) - .setRunningFrames(rs.getInt("int_running_count")) - .setWaitingFrames(rs.getInt("int_waiting_count")) - .setDependFrames(rs.getInt("int_depend_count")).setPendingJobs(rs.getInt("int_job_count")) - .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) - .setReservedGpus(rs.getInt("int_gpus")).build(); - return Group.newBuilder().setId(SqlUtil.getString(rs, "pk_folder")) - .setName(SqlUtil.getString(rs, "group_name")) - .setDepartment(SqlUtil.getString(rs, "str_dept")) - .setDefaultJobPriority(rs.getInt("int_job_priority")) - .setDefaultJobMinCores(Convert.coreUnitsToCores(rs.getInt("int_job_min_cores"))) - .setDefaultJobMaxCores(Convert.coreUnitsToCores(rs.getInt("int_job_max_cores"))) - .setDefaultJobMinGpus(rs.getInt("int_job_min_gpus")) - .setDefaultJobMaxGpus(rs.getInt("int_job_max_gpus")) - .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_max_cores"))) - .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) - .setMaxGpus(rs.getInt("int_max_gpus")).setMinGpus(rs.getInt("int_min_gpus")) - .setLevel(rs.getInt("int_level")).setParentId(SqlUtil.getString(rs, "pk_parent_folder")) - .setGroupStats(stats).build(); - } - }; - - public static final RowMapper JOB_MAPPER = new RowMapper() { - public Job mapRow(ResultSet rs, int rowNum) throws SQLException { - Job.Builder jobBuilder = Job.newBuilder().setId(SqlUtil.getString(rs, "pk_job")) - .setLogDir(SqlUtil.getString(rs, "str_log_dir")) - .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_max_cores"))) - .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) - .setMaxGpus(rs.getInt("int_max_gpus")).setMinGpus(rs.getInt("int_min_gpus")) - .setName(SqlUtil.getString(rs, "str_name")).setPriority(rs.getInt("int_priority")) - .setShot(SqlUtil.getString(rs, "str_shot")).setShow(SqlUtil.getString(rs, "str_show")) - .setFacility(SqlUtil.getString(rs, "facility_name")) - .setGroup(SqlUtil.getString(rs, "group_name")) - .setState(JobState.valueOf(SqlUtil.getString(rs, "str_state"))) - .setUser(SqlUtil.getString(rs, "str_user")).setIsPaused(rs.getBoolean("b_paused")) - .setHasComment(rs.getBoolean("b_comment")).setAutoEat(rs.getBoolean("b_autoeat")) - .setStartTime((int) (rs.getTimestamp("ts_started").getTime() / 1000)) - .setOs(SqlUtil.getString(rs, "str_os")); - - int uid = rs.getInt("int_uid"); - if (!rs.wasNull()) { - jobBuilder.setUid(uid); - } - - Timestamp ts = rs.getTimestamp("ts_stopped"); - if (ts != null) { - jobBuilder.setStopTime((int) (ts.getTime() / 1000)); - } else { - jobBuilder.setStopTime(0); - } - - jobBuilder.setJobStats(mapJobStats(rs)); - return jobBuilder.build(); - } - }; - - public static JobStats mapJobStats(ResultSet rs) throws SQLException { - - JobStats.Builder statsBuilder = JobStats.newBuilder() - .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) - .setReservedGpus(rs.getInt("int_gpus")).setMaxRss(rs.getLong("int_max_rss")) - .setTotalFrames(rs.getInt("int_frame_count")).setTotalLayers(rs.getInt("int_layer_count")) - .setWaitingFrames(rs.getInt("int_waiting_count")) - .setRunningFrames(rs.getInt("int_running_count")).setDeadFrames(rs.getInt("int_dead_count")) - .setSucceededFrames(rs.getInt("int_succeeded_count")) - .setEatenFrames(rs.getInt("int_eaten_count")).setDependFrames(rs.getInt("int_depend_count")) - .setPendingFrames(rs.getInt("int_waiting_count") + rs.getInt("int_depend_count")) - .setFailedCoreSec(rs.getLong("int_core_time_fail")) - .setRenderedCoreSec(rs.getLong("int_core_time_success")) - .setTotalCoreSec(rs.getLong("int_core_time_fail") + rs.getLong("int_core_time_success")) - .setFailedGpuSec(rs.getLong("int_gpu_time_fail")) - .setRenderedGpuSec(rs.getLong("int_gpu_time_success")) - .setTotalGpuSec(rs.getLong("int_gpu_time_fail") + rs.getLong("int_gpu_time_success")) - .setRenderedFrameCount(rs.getLong("int_frame_success_count")) - .setFailedFrameCount(rs.getLong("int_frame_fail_count")) - .setHighFrameSec(rs.getInt("int_clock_time_high")); - - if (statsBuilder.getRenderedFrameCount() > 0) { - statsBuilder.setAvgCoreSec( - (int) (rs.getLong("int_clock_time_success") / statsBuilder.getRenderedFrameCount())); - statsBuilder.setAvgCoreSec( - (int) (statsBuilder.getRenderedCoreSec() / statsBuilder.getRenderedFrameCount())); - statsBuilder.setRemainingCoreSec( - (long) statsBuilder.getPendingFrames() * statsBuilder.getAvgCoreSec()); - } else { - statsBuilder.setAvgFrameSec(0); - statsBuilder.setAvgCoreSec(0); - statsBuilder.setRemainingCoreSec(0); - } - return statsBuilder.build(); - } - - public static final RowMapper LAYER_MAPPER = new RowMapper() { - public Layer mapRow(ResultSet rs, int rowNum) throws SQLException { - Layer.Builder builder = Layer.newBuilder().setId(SqlUtil.getString(rs, "pk_layer")) - .setParentId(SqlUtil.getString(rs, "pk_job")).setChunkSize(rs.getInt("int_chunk_size")) - .setDispatchOrder(rs.getInt("int_dispatch_order")) - .setName(SqlUtil.getString(rs, "str_name")).setCommand(SqlUtil.getString(rs, "str_cmd")) - .setRange(SqlUtil.getString(rs, "str_range")) - .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_cores_min"))) - .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_cores_max"))) - .setIsThreadable(rs.getBoolean("b_threadable")).setMinMemory(rs.getLong("int_mem_min")) - .setMinGpus(rs.getInt("int_gpus_min")).setMaxGpus(rs.getInt("int_gpus_max")) - .setMinGpuMemory(rs.getLong("int_gpu_mem_min")) - .setType(LayerType.valueOf(SqlUtil.getString(rs, "str_type"))) - .addAllTags( - Sets.newHashSet(SqlUtil.getString(rs, "str_tags").replaceAll(" ", "").split("\\|"))) - .addAllServices(Arrays.asList(SqlUtil.getString(rs, "str_services").split(","))) - .addAllLimits(Arrays.asList(SqlUtil.getString(rs, "str_limit_names").split(","))) - .setMemoryOptimizerEnabled(rs.getBoolean("b_optimize")) - .setTimeout(rs.getInt("int_timeout")).setTimeoutLlu(rs.getInt("int_timeout_llu")); - - LayerStats.Builder statsBuilder = LayerStats.newBuilder() - .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) - .setReservedGpus(rs.getInt("int_gpus")).setMaxRss(rs.getLong("int_max_rss")) - .setTotalFrames(rs.getInt("int_total_count")) - .setWaitingFrames(rs.getInt("int_waiting_count")) - .setRunningFrames(rs.getInt("int_running_count")) - .setDeadFrames(rs.getInt("int_dead_count")) - .setSucceededFrames(rs.getInt("int_succeeded_count")) - .setEatenFrames(rs.getInt("int_eaten_count")) - .setDependFrames(rs.getInt("int_depend_count")) - .setPendingFrames(rs.getInt("int_waiting_count") + rs.getInt("int_depend_count")) - .setFailedCoreSec(rs.getLong("int_core_time_fail")) - .setRenderedCoreSec(rs.getLong("int_core_time_success")) - .setTotalCoreSec(rs.getLong("int_core_time_fail") + rs.getLong("int_core_time_success")) - .setFailedGpuSec(rs.getLong("int_gpu_time_fail")) - .setRenderedGpuSec(rs.getLong("int_gpu_time_success")) - .setTotalGpuSec(rs.getLong("int_gpu_time_fail") + rs.getLong("int_gpu_time_success")) - .setRenderedFrameCount(rs.getLong("int_frame_success_count")) - .setFailedFrameCount(rs.getLong("int_frame_fail_count")) - .setHighFrameSec(rs.getInt("int_clock_time_high")) - .setLowFrameSec(rs.getInt("int_clock_time_low")); - - if (statsBuilder.getRenderedFrameCount() > 0) { - statsBuilder.setAvgFrameSec( - (int) (rs.getLong("int_clock_time_success") / statsBuilder.getRenderedFrameCount())); - statsBuilder.setAvgCoreSec( - (int) (statsBuilder.getRenderedCoreSec() / statsBuilder.getRenderedFrameCount())); - statsBuilder.setRemainingCoreSec( - (long) statsBuilder.getPendingFrames() * statsBuilder.getAvgCoreSec()); - } else { - statsBuilder.setAvgFrameSec(0); - statsBuilder.setAvgCoreSec(0); - statsBuilder.setRemainingCoreSec(0); - } - builder.setLayerStats(statsBuilder.build()); - return builder.build(); - } - }; - - private static final RowMapper LIMIT_NAME_MAPPER = new RowMapper() { - public String mapRow(ResultSet rs, int rowNum) throws SQLException { - return rs.getString("str_name"); - } - }; - - public static final RowMapper SUBSCRIPTION_MAPPER = new RowMapper() { - public Subscription mapRow(ResultSet rs, int rowNum) throws SQLException { - return Subscription.newBuilder().setId(SqlUtil.getString(rs, "pk_subscription")) - .setBurst(rs.getInt("int_burst")).setName(rs.getString("name")) - .setReservedCores(rs.getInt("int_cores")).setReservedGpus(rs.getInt("int_gpus")) - .setSize(rs.getInt("int_size")).setAllocationName(rs.getString("alloc_name")) - .setShowName(rs.getString("show_name")).setFacility(rs.getString("facility_name")) - .build(); - } - }; - - public static final RowMapper UPDATED_FRAME_MAPPER = new RowMapper() { - public UpdatedFrame mapRow(ResultSet rs, int rowNum) throws SQLException { - UpdatedFrame.Builder builder = UpdatedFrame.newBuilder() - .setId(SqlUtil.getString(rs, "pk_frame")).setExitStatus(rs.getInt("int_exit_status")) - .setMaxRss(rs.getInt("int_mem_max_used")).setRetryCount(rs.getInt("int_retries")) - .setState(FrameState.valueOf(SqlUtil.getString(rs, "str_state"))) - .setUsedMemory(rs.getInt("int_mem_used")); - - if (SqlUtil.getString(rs, "str_host") != null) { - builder.setLastResource( - String.format(Locale.ROOT, "%s/%2.2f/%d", SqlUtil.getString(rs, "str_host"), - Convert.coreUnitsToCores(rs.getInt("int_cores")), rs.getInt("int_gpus"))); - } else { - builder.setLastResource(""); - } - - java.sql.Timestamp ts_started = rs.getTimestamp("ts_started"); - if (ts_started != null) { - builder.setStartTime((int) (rs.getTimestamp("ts_started").getTime() / 1000)); - } else { - builder.setStartTime(0); - } - java.sql.Timestamp ts_stopped = rs.getTimestamp("ts_stopped"); - if (ts_stopped != null) { - builder.setStopTime((int) (ts_stopped.getTime() / 1000)); - } else { - builder.setStopTime(0); - } - - if (rs.getString("pk_frame_override") != null) { - String[] rgb = rs.getString("str_rgb").split(","); - FrameStateDisplayOverride override = FrameStateDisplayOverride.newBuilder() - .setState(FrameState.valueOf(rs.getString("str_frame_state"))) - .setText(rs.getString("str_override_text")) - .setColor(FrameStateDisplayOverride.RGB.newBuilder().setRed(Integer.parseInt(rgb[0])) - .setGreen(Integer.parseInt(rgb[1])).setBlue(Integer.parseInt(rgb[2])).build()) - .build(); - builder.setFrameStateDisplayOverride(override); - } - - return builder.build(); - } - }; - - public static final RowMapper FRAME_MAPPER = new RowMapper() { - public Frame mapRow(ResultSet rs, int rowNum) throws SQLException { - Frame.Builder builder = Frame.newBuilder().setId(SqlUtil.getString(rs, "pk_frame")) - .setName(SqlUtil.getString(rs, "str_name")).setExitStatus(rs.getInt("int_exit_status")) - .setMaxRss(rs.getLong("int_mem_max_used")).setNumber(rs.getInt("int_number")) - .setDispatchOrder(rs.getInt("int_dispatch_order")).setRetryCount(rs.getInt("int_retries")) - .setState(FrameState.valueOf(SqlUtil.getString(rs, "str_state"))) - .setLayerName(SqlUtil.getString(rs, "layer_name")) - .setUsedMemory(rs.getLong("int_mem_used")) - .setReservedMemory(rs.getLong("int_mem_reserved")) - .setReservedGpuMemory(rs.getLong("int_gpu_mem_reserved")) - .setCheckpointState( - CheckpointState.valueOf(SqlUtil.getString(rs, "str_checkpoint_state"))) - .setCheckpointCount(rs.getInt("int_checkpoint_count")); - - if (SqlUtil.getString(rs, "str_host") != null) { - builder.setLastResource(CueUtil.buildProcName(SqlUtil.getString(rs, "str_host"), - rs.getInt("int_cores"), rs.getInt("int_gpus"))); - } else { - builder.setLastResource(""); - } - - java.sql.Timestamp ts_started = rs.getTimestamp("ts_started"); - if (ts_started != null) { - builder.setStartTime((int) (rs.getTimestamp("ts_started").getTime() / 1000)); - } else { - builder.setStartTime(0); - } - java.sql.Timestamp ts_stopped = rs.getTimestamp("ts_stopped"); - if (ts_stopped != null) { - builder.setStopTime((int) (ts_stopped.getTime() / 1000)); - } else { - builder.setStopTime(0); - } - java.sql.Timestamp ts_llu = rs.getTimestamp("ts_llu"); - if (ts_llu != null) { - builder.setLluTime((int) (ts_llu.getTime() / 1000)); - } else { - builder.setLluTime(0); - } - - builder.setTotalCoreTime(rs.getInt("int_total_past_core_time")); - builder.setTotalGpuTime(rs.getInt("int_total_past_gpu_time")); - if (builder.getState() == FrameState.RUNNING) { - builder.setTotalCoreTime(builder.getTotalCoreTime() - + (int) (System.currentTimeMillis() / 1000 - builder.getStartTime()) - * rs.getInt("int_cores") / 100); - builder.setTotalGpuTime(builder.getTotalGpuTime() - + (int) (System.currentTimeMillis() / 1000 - builder.getStartTime()) - * rs.getInt("int_gpus")); - } - - if (rs.getString("pk_frame_override") != null) { - String[] rgb = rs.getString("str_rgb").split(","); - FrameStateDisplayOverride override = FrameStateDisplayOverride.newBuilder() - .setState(FrameState.valueOf(rs.getString("str_frame_state"))) - .setText(rs.getString("str_override_text")) - .setColor(FrameStateDisplayOverride.RGB.newBuilder().setRed(Integer.parseInt(rgb[0])) - .setGreen(Integer.parseInt(rgb[1])).setBlue(Integer.parseInt(rgb[2])).build()) - .build(); - builder.setFrameStateDisplayOverride(override); - } - - return builder.build(); - } - }; - - private static final RowMapper SERVICE_MAPPER = new RowMapper() { - public Service mapRow(ResultSet rs, int rowNum) throws SQLException { - return Service.newBuilder().setId(SqlUtil.getString(rs, "pk_service")) - .setName(SqlUtil.getString(rs, "str_name")).setThreadable(rs.getBoolean("b_threadable")) - .setMinCores(rs.getInt("int_cores_min")).setMaxCores(rs.getInt("int_cores_max")) - .setMinMemory(rs.getInt("int_mem_min")).setMinGpus(rs.getInt("int_gpus_min")) - .setMaxGpus(rs.getInt("int_gpus_max")).setMinGpuMemory(rs.getInt("int_gpu_mem_min")) - .addAllTags( - Lists.newArrayList(ServiceDaoJdbc.splitTags(SqlUtil.getString(rs, "str_tags")))) - .setTimeout(rs.getInt("int_timeout")).setTimeoutLlu(rs.getInt("int_timeout_llu")) - .setMinMemoryIncrease(rs.getInt("int_min_memory_increase")).build(); - } - }; - - private static final RowMapper SERVICE_OVERRIDE_MAPPER = - new RowMapper() { - public ServiceOverride mapRow(ResultSet rs, int rowNum) throws SQLException { - Service data = Service.newBuilder().setId(SqlUtil.getString(rs, "pk_show_service")) - .setName(SqlUtil.getString(rs, "str_name")) - .setThreadable(rs.getBoolean("b_threadable")).setMinCores(rs.getInt("int_cores_min")) - .setMaxCores(rs.getInt("int_cores_max")).setMinMemory(rs.getInt("int_mem_min")) - .setMinGpus(rs.getInt("int_gpus_min")).setMaxGpus(rs.getInt("int_gpus_max")) - .setMinGpuMemory(rs.getInt("int_gpu_mem_min")) - .addAllTags( - Lists.newArrayList(ServiceDaoJdbc.splitTags(SqlUtil.getString(rs, "str_tags")))) - .setTimeout(rs.getInt("int_timeout")).setTimeoutLlu(rs.getInt("int_timeout_llu")) - .setMinMemoryIncrease(rs.getInt("int_min_memory_increase")).build(); - return ServiceOverride.newBuilder().setId(SqlUtil.getString(rs, "pk_show_service")) - .setData(data).build(); + }; + + private static final RowMapper LIMIT_NAME_MAPPER = new RowMapper() { + public String mapRow(ResultSet rs, int rowNum) throws SQLException { + return rs.getString("str_name"); } - }; - - public static final RowMapper SHOW_MAPPER = new RowMapper() { - public Show mapRow(ResultSet rs, int rowNum) throws SQLException { - ShowStats stats = ShowStats.newBuilder().setPendingFrames(rs.getInt("int_pending_count")) - .setRunningFrames(rs.getInt("int_running_count")) - .setDeadFrames(rs.getInt("int_dead_count")) - .setCreatedFrameCount(rs.getLong("int_frame_insert_count")) - .setCreatedJobCount(rs.getLong("int_job_insert_count")) - .setRenderedFrameCount(rs.getLong("int_frame_success_count")) - .setFailedFrameCount(rs.getLong("int_frame_fail_count")) - .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) - .setReservedGpus(rs.getInt("int_gpus")).setPendingJobs(rs.getInt("int_job_count")) - .build(); - return Show.newBuilder().setId(SqlUtil.getString(rs, "pk_show")) - .setName(SqlUtil.getString(rs, "str_name")).setActive(rs.getBoolean("b_active")) - .setDefaultMaxCores(Convert.coreUnitsToCores(rs.getInt("int_default_max_cores"))) - .setDefaultMinCores(Convert.coreUnitsToCores(rs.getInt("int_default_min_cores"))) - .setDefaultMaxGpus(rs.getInt("int_default_max_gpus")) - .setDefaultMinGpus(rs.getInt("int_default_min_gpus")) - .setBookingEnabled(rs.getBoolean("b_booking_enabled")) - .setDispatchEnabled(rs.getBoolean("b_dispatch_enabled")) - .setCommentEmail(SqlUtil.getString(rs, "str_comment_email")).setShowStats(stats).build(); - } - }; - /* - * Queries - */ - - private static final String GET_JOB_NAMES = "SELECT " + "job.str_name " + "FROM " + "job," - + "show " + "WHERE " + "job.pk_show = show.pk_show " + "AND " + "job.str_state = 'PENDING' "; - - private static final String GET_HOST_COMMENTS = "SELECT " + "* " + "FROM " + "comments " - + "WHERE " + "pk_host=? " + "ORDER BY " + "ts_created ASC"; - - private static final String GET_FILTER = "SELECT " + "filter.* " + "FROM " + "filter," + "show " - + "WHERE " + "filter.pk_show = show.pk_show"; - - private static final String GET_FRAME = "SELECT " + "frame.pk_frame, " + "frame.int_exit_status," - + "frame.str_name," + "frame.int_number," + "frame.int_dispatch_order," + "frame.ts_started," - + "frame.ts_stopped," + "frame.ts_llu," + "frame.int_retries," + "frame.str_state," - + "frame.str_host," + "frame.int_cores," + "frame.int_gpus," + "frame.int_mem_max_used," - + "frame.int_mem_used, " + "frame.int_mem_reserved, " + "frame.int_gpu_mem_reserved, " - + "frame.str_checkpoint_state," + "frame.int_checkpoint_count," - + "frame.int_total_past_core_time," + "frame.int_total_past_gpu_time," - + "layer.str_name AS layer_name," + "job.str_name AS job_name," - + "frame_state_display_overrides.* " + "FROM " + "job, " + "layer, " + "frame " - + "LEFT JOIN frame_state_display_overrides ON " - + "(frame.pk_frame = frame_state_display_overrides.pk_frame AND " - + "frame.str_state = frame_state_display_overrides.str_frame_state) " + "WHERE " - + "frame.pk_layer = layer.pk_layer " + "AND " + "frame.pk_job= job.pk_job"; - - private static final String FIND_FRAME = GET_FRAME + " " + "AND " + "job.str_state='PENDING' " - + "AND " + "job.str_name=? " + "AND " + "layer.str_name=? " + "AND " + "frame.int_number=?"; - - private static final String GET_PROC = "SELECT " + "host.str_name AS host_name, " - + "job.str_name AS job_name, " + "job.str_log_dir, " + "folder.str_name as folder_name, " - + "show.str_name AS show_name, " + "frame.str_name AS frame_name, " + "layer.str_services, " - + "proc.pk_proc, " + "proc.pk_host, " + "proc.int_cores_reserved, " - + "proc.int_mem_reserved, " + "proc.int_mem_used, " + "proc.int_mem_max_used, " - + "proc.int_gpus_reserved, " + "proc.int_gpu_mem_reserved, " + "proc.int_gpu_mem_used, " - + "proc.int_gpu_mem_max_used, " + "proc.ts_ping, " + "proc.ts_booked, " - + "proc.ts_dispatched, " + "proc.b_unbooked, " + "proc.bytea_children, " - + "redirect.str_name AS str_redirect " + "FROM proc " - + "JOIN host ON proc.pk_host = host.pk_host " - + "JOIN alloc ON host.pk_alloc = alloc.pk_alloc " - + "JOIN frame ON proc.pk_frame = frame.pk_frame " - + "JOIN layer ON proc.pk_layer = layer.pk_layer " + "JOIN job ON proc.pk_job = job.pk_job " - + "JOIN folder ON job.pk_folder = folder.pk_folder " - + "JOIN show ON proc.pk_show = show.pk_show " - + "LEFT JOIN redirect ON proc.pk_proc = redirect.pk_proc " + "WHERE true "; - - private static final String GET_JOB_COMMENTS = "SELECT " + "* " + "FROM " + "comments " + "WHERE " - + "pk_job=? " + "ORDER BY " + "ts_created ASC"; - - private static final String GET_UPDATED_FRAME = - "SELECT " + "frame.pk_frame, " + "frame.int_exit_status," + "frame.ts_started," - + "frame.ts_stopped," + "frame.int_retries," + "frame.str_state," + "frame.str_host," - + "frame.int_cores," + "frame.int_gpus," + "frame.ts_llu," - + "COALESCE(proc.int_mem_max_used, frame.int_mem_max_used) AS int_mem_max_used," - + "COALESCE(proc.int_mem_used, frame.int_mem_used) AS int_mem_used," - + "frame_state_display_overrides.* " + "FROM " + "job, " + "layer," + "frame " - + "LEFT JOIN proc ON (proc.pk_frame = frame.pk_frame) " - + "LEFT JOIN frame_state_display_overrides ON " - + "(frame.pk_frame = frame_state_display_overrides.pk_frame AND " - + "frame.str_state = frame_state_display_overrides.str_frame_state) " + "WHERE " - + "frame.pk_layer = layer.pk_layer " + "AND " + "frame.pk_job= job.pk_job"; - - private static final String GET_ALLOCATION = "SELECT " + "alloc.pk_alloc, " + "alloc.str_name, " - + "alloc.str_tag, " + "alloc.b_billable," + "facility.str_name AS facility_name," - + "vs_alloc_usage.int_cores," + "vs_alloc_usage.int_idle_cores," - + "vs_alloc_usage.int_running_cores," + "vs_alloc_usage.int_available_cores," - + "vs_alloc_usage.int_locked_cores," + "vs_alloc_usage.int_gpus," - + "vs_alloc_usage.int_idle_gpus," + "vs_alloc_usage.int_running_gpus," - + "vs_alloc_usage.int_available_gpus," + "vs_alloc_usage.int_locked_gpus," - + "vs_alloc_usage.int_hosts," + "vs_alloc_usage.int_locked_hosts," - + "vs_alloc_usage.int_down_hosts " + "FROM " + "alloc, " + "facility, " + "vs_alloc_usage " - + "WHERE " + "alloc.pk_alloc = vs_alloc_usage.pk_alloc " + "AND " - + "alloc.pk_facility = facility.pk_facility " + "AND " + "alloc.b_enabled = true"; - - private static final String GET_MATCHER = "SELECT " + "filter.pk_show," + "matcher.* " + "FROM " - + "filter," + "matcher " + "WHERE " + "filter.pk_filter = matcher.pk_filter"; - - private static final String GET_DEPARTMENT = "SELECT " + "dept.str_name AS str_dept," - + "show.str_name || '.' || dept.str_name AS str_name, " + "pk_point," + "str_ti_task," - + "int_cores," + "int_min_cores," + "int_gpus," + "int_min_gpus," + "b_managed " + "FROM " - + "point," + "dept," + "show " + "WHERE " + "point.pk_show = show.pk_show " + "AND " - + "point.pk_dept = dept.pk_dept " + "AND " + "point.pk_show = ? " + "AND " - + "dept.str_name = ?"; - - private static final String GET_DEPARTMENTS = "SELECT " + "dept.str_name AS str_dept," - + "show.str_name || '.' || dept.str_name AS str_name, " + "pk_point," + "str_ti_task," - + "int_cores," + "int_min_cores," + "int_gpus," + "int_min_gpus," + "b_managed " + "FROM " - + "point," + "dept," + "show " + "WHERE " + "point.pk_show = show.pk_show " + "AND " - + "point.pk_dept = dept.pk_dept " + "AND " + "point.pk_show = ? "; - - private static final String QUERY_FOR_OWNER = - "SELECT " + "owner.pk_owner," + "owner.str_username," + "show.str_name AS str_show, " - + "(SELECT COUNT(1) FROM deed WHERE deed.pk_owner = owner.pk_owner) " + " AS host_count " - + "FROM " + "owner, " + "show " + "WHERE " + "owner.pk_show = show.pk_show"; - - private static final String QUERY_FOR_RENDER_PART = "SELECT " + "host_local.pk_host_local," - + "host_local.int_cores_idle," + "host_local.int_cores_max," + "host_local.int_gpus_idle," - + "host_local.int_gpus_max," + "host_local.int_threads," + "host_local.int_mem_idle," - + "host_local.int_mem_max," + "host_local.int_gpu_mem_idle," + "host_local.int_gpu_mem_max," - + "host_local.str_type," - + "(SELECT str_name FROM host WHERE host.pk_host = host_local.pk_host) " + "AS str_host_name," - + "(SELECT str_name FROM job WHERE job.pk_job = host_local.pk_job) " + "AS str_job_name," - + "(SELECT str_name FROM layer WHERE layer.pk_layer = host_local.pk_layer) " - + "AS str_layer_name," - + "(SELECT str_name FROM frame WHERE frame.pk_frame = host_local.pk_frame) " - + "AS str_frame_name " + "FROM " + "host_local "; - - private static final String QUERY_FOR_FACILITY = - "SELECT " + "facility.pk_facility," + "facility.str_name " + "FROM " + "facility "; - - private static final String QUERY_FOR_LIMIT = "SELECT " + "limit_record.pk_limit_record, " - + "limit_record.str_name, " + "limit_record.int_max_value, " - + "SUM(layer_stat.int_running_count) AS int_current_running " + "FROM " + "limit_record " - + "LEFT JOIN " + "layer_limit ON layer_limit.pk_limit_record = limit_record.pk_limit_record " - + "LEFT JOIN " + "layer ON layer.pk_layer = layer_limit.pk_layer " + "LEFT JOIN " - + "layer_stat ON layer_stat.pk_layer = layer.pk_layer "; - - private static final String GET_LIMIT_FROM_LAYER_ID = "SELECT " + "limit_record.pk_limit_record, " - + "limit_record.str_name, " + "limit_record.int_max_value, " - + "SUM(layer_stat.int_running_count) AS int_current_running " + "FROM " + "limit_record " - + "LEFT JOIN " + "layer_limit ON layer_limit.pk_limit_record = limit_record.pk_limit_record " - + "LEFT JOIN " + "layer ON layer.pk_layer = layer_limit.pk_layer " + "LEFT JOIN " - + "layer_stat ON layer_stat.pk_layer = layer.pk_layer " + "WHERE " - + "layer_limit.pk_layer = ? " + "GROUP BY " + "limit_record.str_name, " - + "limit_record.pk_limit_record, " + "limit_record.int_max_value"; - - public static final String GET_GROUPS = "SELECT " + "show.pk_show, " - + "show.str_name AS str_show," + "dept.str_name AS str_dept," + "folder.pk_folder," - + "folder.pk_parent_folder," + "folder.str_name AS group_name," + "folder.int_job_priority," - + "folder.int_job_min_cores," + "folder.int_job_max_cores," + "folder_resource.int_min_cores," - + "folder_resource.int_max_cores," + "folder.int_job_min_gpus," + "folder.int_job_max_gpus," - + "folder_resource.int_min_gpus," + "folder_resource.int_max_gpus," + "folder.b_default, " - + "folder_level.int_level, " + "c.int_waiting_count, " + "c.int_depend_count, " - + "c.int_running_count," + "c.int_dead_count," + "c.int_job_count," + "c.int_cores," - + "c.int_gpus " + "FROM " + "folder, " + "folder_level," + "folder_resource, " - + "vs_folder_counts c, " + "show," + "dept " + "WHERE " + "show.pk_show = folder.pk_show " - + "AND " + "folder.pk_folder = folder_level.pk_folder " + "AND " - + "folder.pk_folder = folder_resource.pk_folder " + "AND " + "folder.pk_folder = c.pk_folder " - + "AND " + "folder.pk_dept = dept.pk_dept "; - - private static final String GET_ACTION = "SELECT " + "filter.pk_show," + "action.* " + "FROM " - + "filter," + "action " + "WHERE " + "filter.pk_filter = action.pk_filter "; - - private static final String GET_JOB = "SELECT " + "job.pk_job," + "job.str_log_dir," - + "job_resource.int_max_cores," + "job_resource.int_min_cores," + "job_resource.int_max_gpus," - + "job_resource.int_min_gpus," + "job.str_name," + "job.str_shot," + "job.str_state," - + "job.int_uid," + "job.str_user," + "job.b_paused," + "job.ts_started," + "job.ts_stopped," - + "job.b_comment," + "job.b_autoeat," + "job.str_os," + "job_resource.int_priority," - + "job.int_frame_count, " + "job.int_layer_count, " + "show.str_name as str_show," - + "show.pk_show as id_show," + "facility.str_name AS facility_name," - + "folder.str_name AS group_name," + "job_stat.int_waiting_count, " - + "job_stat.int_running_count, " + "job_stat.int_dead_count, " + "job_stat.int_eaten_count," - + "job_stat.int_depend_count, " + "job_stat.int_succeeded_count, " - + "job_usage.int_core_time_success, " + "job_usage.int_core_time_fail, " - + "job_usage.int_gpu_time_success, " + "job_usage.int_gpu_time_fail, " - + "job_usage.int_frame_success_count, " + "job_usage.int_frame_fail_count, " - + "job_usage.int_clock_time_high," + "job_usage.int_clock_time_success," - + "job_mem.int_max_rss," - + "(job_resource.int_cores + job_resource.int_local_cores) AS int_cores," - + "(job_resource.int_gpus + job_resource.int_local_gpus) AS int_gpus " + "FROM " + "job," - + "folder," + "show," + "facility," + "job_stat," + "job_resource, " + "job_mem, " - + "job_usage " + "WHERE " + "job.pk_show = show.pk_show " + "AND " - + "job.pk_folder = folder.pk_folder " + "AND " + "job.pk_facility = facility.pk_facility " - + "AND " + "job.pk_job = job_stat.pk_job " + "AND " + "job.pk_job = job_resource.pk_job " - + "AND " + "job.pk_job = job_mem.pk_job " + "AND " + "job.pk_job = job_usage.pk_job "; - - private static final String GET_LAYER = "SELECT " + "layer.*," + "layer_stat.int_total_count," - + "layer_stat.int_waiting_count," + "layer_stat.int_running_count," - + "layer_stat.int_dead_count," + "layer_stat.int_depend_count," - + "layer_stat.int_eaten_count," + "layer_stat.int_succeeded_count," - + "layer_usage.int_core_time_success," + "layer_usage.int_core_time_fail, " - + "layer_usage.int_gpu_time_success," + "layer_usage.int_gpu_time_fail, " - + "layer_usage.int_frame_success_count, " + "layer_usage.int_frame_fail_count, " - + "layer_usage.int_clock_time_low, " + "layer_usage.int_clock_time_high," - + "layer_usage.int_clock_time_success," + "layer_usage.int_clock_time_fail," - + "layer_mem.int_max_rss," + "layer_resource.int_cores," + "layer_resource.int_gpus " - + "FROM " + "layer, " + "job," + "layer_stat, " + "layer_resource, " + "layer_usage, " - + "layer_mem " + "WHERE " + "layer.pk_job = job.pk_job " + "AND " - + "layer.pk_layer = layer_stat.pk_layer " + "AND " - + "layer.pk_layer = layer_resource.pk_layer " + "AND " - + "layer.pk_layer = layer_usage.pk_layer " + "AND " + "layer.pk_layer = layer_mem.pk_layer"; - - private static final String GET_LAYER_WITH_LIMITS = "SELECT " + "layer.*, " - + "layer_stat.int_total_count, " + "layer_stat.int_waiting_count, " - + "layer_stat.int_running_count, " + "layer_stat.int_dead_count, " - + "layer_stat.int_depend_count, " + "layer_stat.int_eaten_count, " - + "layer_stat.int_succeeded_count, " + "layer_usage.int_core_time_success, " - + "layer_usage.int_core_time_fail, " + "layer_usage.int_gpu_time_success, " - + "layer_usage.int_gpu_time_fail, " + "layer_usage.int_frame_success_count, " - + "layer_usage.int_frame_fail_count, " + "layer_usage.int_clock_time_low, " - + "layer_usage.int_clock_time_high, " + "layer_usage.int_clock_time_success, " - + "layer_usage.int_clock_time_fail, " + "layer_mem.int_max_rss, " - + "layer_resource.int_cores, " + "layer_resource.int_gpus, " + "limit_names.str_limit_names " - + "FROM " + "layer " + "JOIN " + "job ON layer.pk_job = job.pk_job " + "JOIN " - + "layer_stat ON layer.pk_layer = layer_stat.pk_layer " + "JOIN " - + "layer_resource ON layer.pk_layer = layer_resource.pk_layer " + "JOIN " - + "layer_usage ON layer.pk_layer = layer_usage.pk_layer " + "JOIN " - + "layer_mem ON layer.pk_layer = layer_mem.pk_layer " + "LEFT JOIN " + "(" + "SELECT " - + "layer_limit.pk_layer, " + "string_agg(limit_record.str_name, ',') AS str_limit_names " - + "FROM " + "limit_record, " + "layer_limit " + "WHERE " - + "layer_limit.pk_limit_record = limit_record.pk_limit_record " + "GROUP BY " - + "layer_limit.pk_layer) AS limit_names " + "ON layer.pk_layer = limit_names.pk_layer "; - - private static final String GET_LIMIT_NAMES = "SELECT " + "limit_record.str_name " + "FROM " - + "layer_limit, " + "limit_record " + "WHERE " + "layer_limit.pk_layer = ? " + "AND " - + "limit_record.pk_limit_record = layer_limit.pk_limit_record "; - - private static final String GET_SHOW = "SELECT " + "show.pk_show," + "show.str_name," - + "show.b_paused," + "show.int_default_min_cores," + "show.int_default_max_cores," - + "show.int_default_min_gpus," + "show.int_default_max_gpus," + "show.b_booking_enabled," - + "show.b_dispatch_enabled," + "show.b_active," + "show.str_comment_email," - + "show_stats.int_frame_insert_count," + "show_stats.int_job_insert_count," - + "show_stats.int_frame_success_count," + "show_stats.int_frame_fail_count," - + "COALESCE(vs_show_stat.int_pending_count,0) AS int_pending_count," - + "COALESCE(vs_show_stat.int_running_count,0) AS int_running_count," - + "COALESCE(vs_show_stat.int_dead_count,0) AS int_dead_count," - + "COALESCE(vs_show_resource.int_cores,0) AS int_cores, " - + "COALESCE(vs_show_resource.int_gpus,0) AS int_gpus, " - + "COALESCE(vs_show_stat.int_job_count,0) AS int_job_count " + "FROM " + "show " - + "JOIN show_stats ON (show.pk_show = show_stats.pk_show) " - + "LEFT JOIN vs_show_stat ON (vs_show_stat.pk_show = show.pk_show) " - + "LEFT JOIN vs_show_resource ON (vs_show_resource.pk_show=show.pk_show) " + "WHERE " - + "1 = 1 "; - - private static final String GET_SERVICE = "SELECT " + "service.pk_service," + "service.str_name," - + "service.b_threadable," + "service.int_cores_min," + "service.int_cores_max," - + "service.int_mem_min," + "service.int_gpus_min," + "service.int_gpus_max," - + "service.int_gpu_mem_min," + "service.str_tags," + "service.int_timeout," - + "service.int_timeout_llu," + "service.int_min_memory_increase " + "FROM " + "service "; - - private static final String GET_SERVICE_OVERRIDE = "SELECT " + "show_service.pk_show_service," - + "show_service.str_name," + "show_service.b_threadable," + "show_service.int_cores_min," - + "show_service.int_cores_max," + "show_service.int_mem_min," + "show_service.int_gpus_min," - + "show_service.int_gpus_max," + "show_service.int_gpu_mem_min," + "show_service.str_tags," - + "show_service.int_timeout," + "show_service.int_timeout_llu," - + "show_service.int_min_memory_increase " + "FROM " + "show_service, " + "show " + "WHERE " - + "show_service.pk_show = show.pk_show "; - - private static final String GET_TASK = "SELECT " + "task.pk_task," + "task.str_shot," - + "task.int_min_cores + task.int_adjust_cores AS int_min_cores, " + "task.int_adjust_cores, " - + "task.int_min_gpus + task.int_adjust_gpus AS int_min_gpus, " + "task.int_adjust_gpus, " - + "dept.str_name AS str_dept " + "FROM " + "task," + "dept, " + "point " + "WHERE " - + "task.pk_point = point.pk_point " + "AND " + "point.pk_dept = dept.pk_dept "; - - private static final String GET_HOST = "SELECT " + "host.pk_host, " - + "host.str_name AS host_name," + "host_stat.str_state AS host_state," + "host.b_nimby," - + "host_stat.ts_booted," + "host_stat.ts_ping," + "host.int_cores," + "host.int_cores_idle," - + "host.int_mem," + "host.int_mem_idle," + "host.int_gpus," + "host.int_gpus_idle," - + "host.int_gpu_mem," + "host.int_gpu_mem_idle," + "host.str_tags," + "host.str_lock_state," - + "host.b_comment," + "host.int_thread_mode," + "host_stat.str_os," - + "host_stat.int_mem_total," + "host_stat.int_mem_free," + "host_stat.int_swap_total," - + "host_stat.int_swap_free," + "host_stat.int_mcp_total," + "host_stat.int_mcp_free," - + "host_stat.int_gpu_mem_total," + "host_stat.int_gpu_mem_free," + "host_stat.int_load, " - + "alloc.str_name AS alloc_name " + "FROM " + "alloc," + "facility, " + "host_stat," + "host " - + "WHERE " + "host.pk_alloc = alloc.pk_alloc " + "AND " - + "facility.pk_facility = alloc.pk_facility " + "AND " + "host.pk_host = host_stat.pk_host "; - - private static final String GET_DEPEND = "SELECT " + "depend.pk_depend, " + "depend.str_type, " - + "depend.b_active, " + "depend.b_any, " + "depend.str_target, " - + "(SELECT str_name FROM job j WHERE j.pk_job = depend.pk_job_depend_on) AS depend_on_job, " - + "(SELECT str_name FROM job j WHERE j.pk_job = depend.pk_job_depend_er) AS depend_er_job, " - + "(SELECT str_name FROM layer l WHERE l.pk_layer = depend.pk_layer_depend_on) AS depend_on_layer, " - + "(SELECT str_name FROM layer l WHERE l.pk_layer = depend.pk_layer_depend_er) AS depend_er_layer, " - + "(SELECT str_name FROM frame f WHERE f.pk_frame = depend.pk_frame_depend_on) AS depend_on_frame, " - + "(SELECT str_name FROM frame f WHERE f.pk_frame = depend.pk_frame_depend_er) AS depend_er_frame " - + "FROM " + "depend "; - - private static final String GET_SUBSCRIPTION = "SELECT " + "subscription.pk_subscription, " - + "(alloc.str_name || '.' || show.str_name) AS name, " + "subscription.int_burst, " - + "subscription.int_size, " + "subscription.int_cores, " + "subscription.int_gpus, " - + "show.str_name AS show_name, " + "alloc.str_name AS alloc_name, " - + "facility.str_name AS facility_name " + "FROM " + "show, " + "alloc, " + "facility," - + "subscription " + "WHERE " + "subscription.pk_show = show.pk_show " + "AND " - + "subscription.pk_alloc = alloc.pk_alloc " + "AND " - + "alloc.pk_facility = facility.pk_facility "; - - private static final String GET_PENDING_JOBS = GET_JOB + "AND " + "job.str_state = 'PENDING' "; - - private static final String GET_FRAMES_CRITERIA = - - "SELECT " + "frame.pk_frame, " + "frame.int_exit_status," + "frame.str_name," - + "frame.int_number," + "frame.int_dispatch_order," + "frame.ts_started," - + "frame.ts_stopped," + "frame.ts_llu," + "frame.int_retries," + "frame.str_state," - + "frame.str_host," + "frame.int_cores," + "frame.int_mem_max_used," - + "frame.int_mem_used, " + "frame.int_mem_reserved, " + "frame.int_gpus," - + "frame.int_gpu_mem_max_used, " + "frame.int_gpu_mem_used, " - + "frame.int_gpu_mem_reserved, " + "frame.str_checkpoint_state," - + "frame.int_checkpoint_count," + "frame.int_total_past_core_time," - + "frame.int_total_past_gpu_time," + "layer.str_name AS layer_name," - + "job.str_name AS job_name, " + "frame_state_display_overrides.*, " - + "ROW_NUMBER() OVER " - + "(ORDER BY frame.int_dispatch_order ASC, layer.int_dispatch_order ASC) AS row_number " - + "FROM " + "job, " + "layer," + "frame " + "LEFT JOIN frame_state_display_overrides ON " - + "(frame.pk_frame = frame_state_display_overrides.pk_frame AND " - + "frame.str_state = frame_state_display_overrides.str_frame_state) " + "WHERE " - + "frame.pk_layer = layer.pk_layer " + "AND " + "frame.pk_job= job.pk_job "; - - private static final String QUERY_FOR_DEED = "SELECT " + "host.str_name AS str_host," - + "show.str_name AS str_show," + "owner.str_username," + "deed.pk_deed " + "FROM " + "deed," - + "owner," + "host," + "show " + "WHERE " + "deed.pk_host = host.pk_host " + "AND " - + "deed.pk_owner = owner.pk_owner " + "AND " + "owner.pk_show = show.pk_show "; - - public FrameSearchFactory getFrameSearchFactory() { - return frameSearchFactory; - } - - public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { - this.frameSearchFactory = frameSearchFactory; - } - - public ProcSearchFactory getProcSearchFactory() { - return procSearchFactory; - } - - public void setProcSearchFactory(ProcSearchFactory procSearchFactory) { - this.procSearchFactory = procSearchFactory; - } + }; + + public static final RowMapper SUBSCRIPTION_MAPPER = + new RowMapper() { + public Subscription mapRow(ResultSet rs, int rowNum) throws SQLException { + return Subscription.newBuilder().setId(SqlUtil.getString(rs, "pk_subscription")) + .setBurst(rs.getInt("int_burst")).setName(rs.getString("name")) + .setReservedCores(rs.getInt("int_cores")) + .setReservedGpus(rs.getInt("int_gpus")).setSize(rs.getInt("int_size")) + .setAllocationName(rs.getString("alloc_name")) + .setShowName(rs.getString("show_name")) + .setFacility(rs.getString("facility_name")).build(); + } + }; + + public static final RowMapper UPDATED_FRAME_MAPPER = + new RowMapper() { + public UpdatedFrame mapRow(ResultSet rs, int rowNum) throws SQLException { + UpdatedFrame.Builder builder = + UpdatedFrame.newBuilder().setId(SqlUtil.getString(rs, "pk_frame")) + .setExitStatus(rs.getInt("int_exit_status")) + .setMaxRss(rs.getInt("int_mem_max_used")) + .setRetryCount(rs.getInt("int_retries")) + .setState( + FrameState.valueOf(SqlUtil.getString(rs, "str_state"))) + .setUsedMemory(rs.getInt("int_mem_used")); + + if (SqlUtil.getString(rs, "str_host") != null) { + builder.setLastResource(String.format(Locale.ROOT, "%s/%2.2f/%d", + SqlUtil.getString(rs, "str_host"), + Convert.coreUnitsToCores(rs.getInt("int_cores")), + rs.getInt("int_gpus"))); + } else { + builder.setLastResource(""); + } + + java.sql.Timestamp ts_started = rs.getTimestamp("ts_started"); + if (ts_started != null) { + builder.setStartTime( + (int) (rs.getTimestamp("ts_started").getTime() / 1000)); + } else { + builder.setStartTime(0); + } + java.sql.Timestamp ts_stopped = rs.getTimestamp("ts_stopped"); + if (ts_stopped != null) { + builder.setStopTime((int) (ts_stopped.getTime() / 1000)); + } else { + builder.setStopTime(0); + } + + if (rs.getString("pk_frame_override") != null) { + String[] rgb = rs.getString("str_rgb").split(","); + FrameStateDisplayOverride override = FrameStateDisplayOverride.newBuilder() + .setState(FrameState.valueOf(rs.getString("str_frame_state"))) + .setText(rs.getString("str_override_text")) + .setColor(FrameStateDisplayOverride.RGB.newBuilder() + .setRed(Integer.parseInt(rgb[0])) + .setGreen(Integer.parseInt(rgb[1])) + .setBlue(Integer.parseInt(rgb[2])).build()) + .build(); + builder.setFrameStateDisplayOverride(override); + } + + return builder.build(); + } + }; + + public static final RowMapper FRAME_MAPPER = new RowMapper() { + public Frame mapRow(ResultSet rs, int rowNum) throws SQLException { + Frame.Builder builder = Frame.newBuilder().setId(SqlUtil.getString(rs, "pk_frame")) + .setName(SqlUtil.getString(rs, "str_name")) + .setExitStatus(rs.getInt("int_exit_status")) + .setMaxRss(rs.getLong("int_mem_max_used")).setNumber(rs.getInt("int_number")) + .setDispatchOrder(rs.getInt("int_dispatch_order")) + .setRetryCount(rs.getInt("int_retries")) + .setState(FrameState.valueOf(SqlUtil.getString(rs, "str_state"))) + .setLayerName(SqlUtil.getString(rs, "layer_name")) + .setUsedMemory(rs.getLong("int_mem_used")) + .setReservedMemory(rs.getLong("int_mem_reserved")) + .setReservedGpuMemory(rs.getLong("int_gpu_mem_reserved")) + .setCheckpointState( + CheckpointState.valueOf(SqlUtil.getString(rs, "str_checkpoint_state"))) + .setCheckpointCount(rs.getInt("int_checkpoint_count")); + + if (SqlUtil.getString(rs, "str_host") != null) { + builder.setLastResource(CueUtil.buildProcName(SqlUtil.getString(rs, "str_host"), + rs.getInt("int_cores"), rs.getInt("int_gpus"))); + } else { + builder.setLastResource(""); + } + + java.sql.Timestamp ts_started = rs.getTimestamp("ts_started"); + if (ts_started != null) { + builder.setStartTime((int) (rs.getTimestamp("ts_started").getTime() / 1000)); + } else { + builder.setStartTime(0); + } + java.sql.Timestamp ts_stopped = rs.getTimestamp("ts_stopped"); + if (ts_stopped != null) { + builder.setStopTime((int) (ts_stopped.getTime() / 1000)); + } else { + builder.setStopTime(0); + } + java.sql.Timestamp ts_llu = rs.getTimestamp("ts_llu"); + if (ts_llu != null) { + builder.setLluTime((int) (ts_llu.getTime() / 1000)); + } else { + builder.setLluTime(0); + } + + builder.setTotalCoreTime(rs.getInt("int_total_past_core_time")); + builder.setTotalGpuTime(rs.getInt("int_total_past_gpu_time")); + if (builder.getState() == FrameState.RUNNING) { + builder.setTotalCoreTime(builder.getTotalCoreTime() + + (int) (System.currentTimeMillis() / 1000 - builder.getStartTime()) + * rs.getInt("int_cores") / 100); + builder.setTotalGpuTime(builder.getTotalGpuTime() + + (int) (System.currentTimeMillis() / 1000 - builder.getStartTime()) + * rs.getInt("int_gpus")); + } + + if (rs.getString("pk_frame_override") != null) { + String[] rgb = rs.getString("str_rgb").split(","); + FrameStateDisplayOverride override = FrameStateDisplayOverride.newBuilder() + .setState(FrameState.valueOf(rs.getString("str_frame_state"))) + .setText(rs.getString("str_override_text")) + .setColor(FrameStateDisplayOverride.RGB.newBuilder() + .setRed(Integer.parseInt(rgb[0])).setGreen(Integer.parseInt(rgb[1])) + .setBlue(Integer.parseInt(rgb[2])).build()) + .build(); + builder.setFrameStateDisplayOverride(override); + } + + return builder.build(); + } + }; + + private static final RowMapper SERVICE_MAPPER = new RowMapper() { + public Service mapRow(ResultSet rs, int rowNum) throws SQLException { + return Service.newBuilder().setId(SqlUtil.getString(rs, "pk_service")) + .setName(SqlUtil.getString(rs, "str_name")) + .setThreadable(rs.getBoolean("b_threadable")) + .setMinCores(rs.getInt("int_cores_min")).setMaxCores(rs.getInt("int_cores_max")) + .setMinMemory(rs.getInt("int_mem_min")).setMinGpus(rs.getInt("int_gpus_min")) + .setMaxGpus(rs.getInt("int_gpus_max")) + .setMinGpuMemory(rs.getInt("int_gpu_mem_min")) + .addAllTags(Lists.newArrayList( + ServiceDaoJdbc.splitTags(SqlUtil.getString(rs, "str_tags")))) + .setTimeout(rs.getInt("int_timeout")) + .setTimeoutLlu(rs.getInt("int_timeout_llu")) + .setMinMemoryIncrease(rs.getInt("int_min_memory_increase")).build(); + } + }; + + private static final RowMapper SERVICE_OVERRIDE_MAPPER = + new RowMapper() { + public ServiceOverride mapRow(ResultSet rs, int rowNum) throws SQLException { + Service data = Service.newBuilder() + .setId(SqlUtil.getString(rs, "pk_show_service")) + .setName(SqlUtil.getString(rs, "str_name")) + .setThreadable(rs.getBoolean("b_threadable")) + .setMinCores(rs.getInt("int_cores_min")) + .setMaxCores(rs.getInt("int_cores_max")) + .setMinMemory(rs.getInt("int_mem_min")) + .setMinGpus(rs.getInt("int_gpus_min")) + .setMaxGpus(rs.getInt("int_gpus_max")) + .setMinGpuMemory(rs.getInt("int_gpu_mem_min")) + .addAllTags(Lists.newArrayList( + ServiceDaoJdbc.splitTags(SqlUtil.getString(rs, "str_tags")))) + .setTimeout(rs.getInt("int_timeout")) + .setTimeoutLlu(rs.getInt("int_timeout_llu")) + .setMinMemoryIncrease(rs.getInt("int_min_memory_increase")).build(); + return ServiceOverride.newBuilder() + .setId(SqlUtil.getString(rs, "pk_show_service")).setData(data).build(); + } + }; + + public static final RowMapper SHOW_MAPPER = new RowMapper() { + public Show mapRow(ResultSet rs, int rowNum) throws SQLException { + ShowStats stats = + ShowStats.newBuilder().setPendingFrames(rs.getInt("int_pending_count")) + .setRunningFrames(rs.getInt("int_running_count")) + .setDeadFrames(rs.getInt("int_dead_count")) + .setCreatedFrameCount(rs.getLong("int_frame_insert_count")) + .setCreatedJobCount(rs.getLong("int_job_insert_count")) + .setRenderedFrameCount(rs.getLong("int_frame_success_count")) + .setFailedFrameCount(rs.getLong("int_frame_fail_count")) + .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) + .setReservedGpus(rs.getInt("int_gpus")) + .setPendingJobs(rs.getInt("int_job_count")).build(); + return Show.newBuilder().setId(SqlUtil.getString(rs, "pk_show")) + .setName(SqlUtil.getString(rs, "str_name")).setActive(rs.getBoolean("b_active")) + .setDefaultMaxCores( + Convert.coreUnitsToCores(rs.getInt("int_default_max_cores"))) + .setDefaultMinCores( + Convert.coreUnitsToCores(rs.getInt("int_default_min_cores"))) + .setDefaultMaxGpus(rs.getInt("int_default_max_gpus")) + .setDefaultMinGpus(rs.getInt("int_default_min_gpus")) + .setBookingEnabled(rs.getBoolean("b_booking_enabled")) + .setDispatchEnabled(rs.getBoolean("b_dispatch_enabled")) + .setCommentEmail(SqlUtil.getString(rs, "str_comment_email")).setShowStats(stats) + .build(); + } + }; + /* + * Queries + */ + + private static final String GET_JOB_NAMES = + "SELECT " + "job.str_name " + "FROM " + "job," + "show " + "WHERE " + + "job.pk_show = show.pk_show " + "AND " + "job.str_state = 'PENDING' "; + + private static final String GET_HOST_COMMENTS = "SELECT " + "* " + "FROM " + "comments " + + "WHERE " + "pk_host=? " + "ORDER BY " + "ts_created ASC"; + + private static final String GET_FILTER = "SELECT " + "filter.* " + "FROM " + "filter," + "show " + + "WHERE " + "filter.pk_show = show.pk_show"; + + private static final String GET_FRAME = "SELECT " + "frame.pk_frame, " + + "frame.int_exit_status," + "frame.str_name," + "frame.int_number," + + "frame.int_dispatch_order," + "frame.ts_started," + "frame.ts_stopped," + + "frame.ts_llu," + "frame.int_retries," + "frame.str_state," + "frame.str_host," + + "frame.int_cores," + "frame.int_gpus," + "frame.int_mem_max_used," + + "frame.int_mem_used, " + "frame.int_mem_reserved, " + "frame.int_gpu_mem_reserved, " + + "frame.str_checkpoint_state," + "frame.int_checkpoint_count," + + "frame.int_total_past_core_time," + "frame.int_total_past_gpu_time," + + "layer.str_name AS layer_name," + "job.str_name AS job_name," + + "frame_state_display_overrides.* " + "FROM " + "job, " + "layer, " + "frame " + + "LEFT JOIN frame_state_display_overrides ON " + + "(frame.pk_frame = frame_state_display_overrides.pk_frame AND " + + "frame.str_state = frame_state_display_overrides.str_frame_state) " + "WHERE " + + "frame.pk_layer = layer.pk_layer " + "AND " + "frame.pk_job= job.pk_job"; + + private static final String FIND_FRAME = + GET_FRAME + " " + "AND " + "job.str_state='PENDING' " + "AND " + "job.str_name=? " + + "AND " + "layer.str_name=? " + "AND " + "frame.int_number=?"; + + private static final String GET_PROC = "SELECT " + "host.str_name AS host_name, " + + "job.str_name AS job_name, " + "job.str_log_dir, " + + "folder.str_name as folder_name, " + "show.str_name AS show_name, " + + "frame.str_name AS frame_name, " + "layer.str_services, " + "proc.pk_proc, " + + "proc.pk_host, " + "proc.int_cores_reserved, " + "proc.int_mem_reserved, " + + "proc.int_mem_used, " + "proc.int_mem_max_used, " + "proc.int_gpus_reserved, " + + "proc.int_gpu_mem_reserved, " + "proc.int_gpu_mem_used, " + + "proc.int_gpu_mem_max_used, " + "proc.ts_ping, " + "proc.ts_booked, " + + "proc.ts_dispatched, " + "proc.b_unbooked, " + "proc.bytea_children, " + + "redirect.str_name AS str_redirect " + "FROM proc " + + "JOIN host ON proc.pk_host = host.pk_host " + + "JOIN alloc ON host.pk_alloc = alloc.pk_alloc " + + "JOIN frame ON proc.pk_frame = frame.pk_frame " + + "JOIN layer ON proc.pk_layer = layer.pk_layer " + + "JOIN job ON proc.pk_job = job.pk_job " + + "JOIN folder ON job.pk_folder = folder.pk_folder " + + "JOIN show ON proc.pk_show = show.pk_show " + + "LEFT JOIN redirect ON proc.pk_proc = redirect.pk_proc " + "WHERE true "; + + private static final String GET_JOB_COMMENTS = "SELECT " + "* " + "FROM " + "comments " + + "WHERE " + "pk_job=? " + "ORDER BY " + "ts_created ASC"; + + private static final String GET_UPDATED_FRAME = + "SELECT " + "frame.pk_frame, " + "frame.int_exit_status," + "frame.ts_started," + + "frame.ts_stopped," + "frame.int_retries," + "frame.str_state," + + "frame.str_host," + "frame.int_cores," + "frame.int_gpus," + "frame.ts_llu," + + "COALESCE(proc.int_mem_max_used, frame.int_mem_max_used) AS int_mem_max_used," + + "COALESCE(proc.int_mem_used, frame.int_mem_used) AS int_mem_used," + + "frame_state_display_overrides.* " + "FROM " + "job, " + "layer," + "frame " + + "LEFT JOIN proc ON (proc.pk_frame = frame.pk_frame) " + + "LEFT JOIN frame_state_display_overrides ON " + + "(frame.pk_frame = frame_state_display_overrides.pk_frame AND " + + "frame.str_state = frame_state_display_overrides.str_frame_state) " + "WHERE " + + "frame.pk_layer = layer.pk_layer " + "AND " + "frame.pk_job= job.pk_job"; + + private static final String GET_ALLOCATION = "SELECT " + "alloc.pk_alloc, " + "alloc.str_name, " + + "alloc.str_tag, " + "alloc.b_billable," + "facility.str_name AS facility_name," + + "vs_alloc_usage.int_cores," + "vs_alloc_usage.int_idle_cores," + + "vs_alloc_usage.int_running_cores," + "vs_alloc_usage.int_available_cores," + + "vs_alloc_usage.int_locked_cores," + "vs_alloc_usage.int_gpus," + + "vs_alloc_usage.int_idle_gpus," + "vs_alloc_usage.int_running_gpus," + + "vs_alloc_usage.int_available_gpus," + "vs_alloc_usage.int_locked_gpus," + + "vs_alloc_usage.int_hosts," + "vs_alloc_usage.int_locked_hosts," + + "vs_alloc_usage.int_down_hosts " + "FROM " + "alloc, " + "facility, " + + "vs_alloc_usage " + "WHERE " + "alloc.pk_alloc = vs_alloc_usage.pk_alloc " + "AND " + + "alloc.pk_facility = facility.pk_facility " + "AND " + "alloc.b_enabled = true"; + + private static final String GET_MATCHER = "SELECT " + "filter.pk_show," + "matcher.* " + "FROM " + + "filter," + "matcher " + "WHERE " + "filter.pk_filter = matcher.pk_filter"; + + private static final String GET_DEPARTMENT = "SELECT " + "dept.str_name AS str_dept," + + "show.str_name || '.' || dept.str_name AS str_name, " + "pk_point," + "str_ti_task," + + "int_cores," + "int_min_cores," + "int_gpus," + "int_min_gpus," + "b_managed " + + "FROM " + "point," + "dept," + "show " + "WHERE " + "point.pk_show = show.pk_show " + + "AND " + "point.pk_dept = dept.pk_dept " + "AND " + "point.pk_show = ? " + "AND " + + "dept.str_name = ?"; + + private static final String GET_DEPARTMENTS = "SELECT " + "dept.str_name AS str_dept," + + "show.str_name || '.' || dept.str_name AS str_name, " + "pk_point," + "str_ti_task," + + "int_cores," + "int_min_cores," + "int_gpus," + "int_min_gpus," + "b_managed " + + "FROM " + "point," + "dept," + "show " + "WHERE " + "point.pk_show = show.pk_show " + + "AND " + "point.pk_dept = dept.pk_dept " + "AND " + "point.pk_show = ? "; + + private static final String QUERY_FOR_OWNER = + "SELECT " + "owner.pk_owner," + "owner.str_username," + "show.str_name AS str_show, " + + "(SELECT COUNT(1) FROM deed WHERE deed.pk_owner = owner.pk_owner) " + + " AS host_count " + "FROM " + "owner, " + "show " + "WHERE " + + "owner.pk_show = show.pk_show"; + + private static final String QUERY_FOR_RENDER_PART = "SELECT " + "host_local.pk_host_local," + + "host_local.int_cores_idle," + "host_local.int_cores_max," + + "host_local.int_gpus_idle," + "host_local.int_gpus_max," + "host_local.int_threads," + + "host_local.int_mem_idle," + "host_local.int_mem_max," + + "host_local.int_gpu_mem_idle," + "host_local.int_gpu_mem_max," + + "host_local.str_type," + + "(SELECT str_name FROM host WHERE host.pk_host = host_local.pk_host) " + + "AS str_host_name," + + "(SELECT str_name FROM job WHERE job.pk_job = host_local.pk_job) " + + "AS str_job_name," + + "(SELECT str_name FROM layer WHERE layer.pk_layer = host_local.pk_layer) " + + "AS str_layer_name," + + "(SELECT str_name FROM frame WHERE frame.pk_frame = host_local.pk_frame) " + + "AS str_frame_name " + "FROM " + "host_local "; + + private static final String QUERY_FOR_FACILITY = + "SELECT " + "facility.pk_facility," + "facility.str_name " + "FROM " + "facility "; + + private static final String QUERY_FOR_LIMIT = "SELECT " + "limit_record.pk_limit_record, " + + "limit_record.str_name, " + "limit_record.int_max_value, " + + "SUM(layer_stat.int_running_count) AS int_current_running " + "FROM " + + "limit_record " + "LEFT JOIN " + + "layer_limit ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + + "LEFT JOIN " + "layer ON layer.pk_layer = layer_limit.pk_layer " + "LEFT JOIN " + + "layer_stat ON layer_stat.pk_layer = layer.pk_layer "; + + private static final String GET_LIMIT_FROM_LAYER_ID = "SELECT " + + "limit_record.pk_limit_record, " + "limit_record.str_name, " + + "limit_record.int_max_value, " + + "SUM(layer_stat.int_running_count) AS int_current_running " + "FROM " + + "limit_record " + "LEFT JOIN " + + "layer_limit ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + + "LEFT JOIN " + "layer ON layer.pk_layer = layer_limit.pk_layer " + "LEFT JOIN " + + "layer_stat ON layer_stat.pk_layer = layer.pk_layer " + "WHERE " + + "layer_limit.pk_layer = ? " + "GROUP BY " + "limit_record.str_name, " + + "limit_record.pk_limit_record, " + "limit_record.int_max_value"; + + public static final String GET_GROUPS = "SELECT " + "show.pk_show, " + + "show.str_name AS str_show," + "dept.str_name AS str_dept," + "folder.pk_folder," + + "folder.pk_parent_folder," + "folder.str_name AS group_name," + + "folder.int_job_priority," + "folder.int_job_min_cores," + "folder.int_job_max_cores," + + "folder_resource.int_min_cores," + "folder_resource.int_max_cores," + + "folder.int_job_min_gpus," + "folder.int_job_max_gpus," + + "folder_resource.int_min_gpus," + "folder_resource.int_max_gpus," + + "folder.b_default, " + "folder_level.int_level, " + "c.int_waiting_count, " + + "c.int_depend_count, " + "c.int_running_count," + "c.int_dead_count," + + "c.int_job_count," + "c.int_cores," + "c.int_gpus " + "FROM " + "folder, " + + "folder_level," + "folder_resource, " + "vs_folder_counts c, " + "show," + "dept " + + "WHERE " + "show.pk_show = folder.pk_show " + "AND " + + "folder.pk_folder = folder_level.pk_folder " + "AND " + + "folder.pk_folder = folder_resource.pk_folder " + "AND " + + "folder.pk_folder = c.pk_folder " + "AND " + "folder.pk_dept = dept.pk_dept "; + + private static final String GET_ACTION = "SELECT " + "filter.pk_show," + "action.* " + "FROM " + + "filter," + "action " + "WHERE " + "filter.pk_filter = action.pk_filter "; + + private static final String GET_JOB = "SELECT " + "job.pk_job," + "job.str_log_dir," + + "job_resource.int_max_cores," + "job_resource.int_min_cores," + + "job_resource.int_max_gpus," + "job_resource.int_min_gpus," + "job.str_name," + + "job.str_shot," + "job.str_state," + "job.int_uid," + "job.str_user," + + "job.b_paused," + "job.ts_started," + "job.ts_stopped," + "job.b_comment," + + "job.b_autoeat," + "job.str_os," + "job_resource.int_priority," + + "job.int_frame_count, " + "job.int_layer_count, " + "show.str_name as str_show," + + "show.pk_show as id_show," + "facility.str_name AS facility_name," + + "folder.str_name AS group_name," + "job_stat.int_waiting_count, " + + "job_stat.int_running_count, " + "job_stat.int_dead_count, " + + "job_stat.int_eaten_count," + "job_stat.int_depend_count, " + + "job_stat.int_succeeded_count, " + "job_usage.int_core_time_success, " + + "job_usage.int_core_time_fail, " + "job_usage.int_gpu_time_success, " + + "job_usage.int_gpu_time_fail, " + "job_usage.int_frame_success_count, " + + "job_usage.int_frame_fail_count, " + "job_usage.int_clock_time_high," + + "job_usage.int_clock_time_success," + "job_mem.int_max_rss," + + "(job_resource.int_cores + job_resource.int_local_cores) AS int_cores," + + "(job_resource.int_gpus + job_resource.int_local_gpus) AS int_gpus " + "FROM " + + "job," + "folder," + "show," + "facility," + "job_stat," + "job_resource, " + + "job_mem, " + "job_usage " + "WHERE " + "job.pk_show = show.pk_show " + "AND " + + "job.pk_folder = folder.pk_folder " + "AND " + + "job.pk_facility = facility.pk_facility " + "AND " + "job.pk_job = job_stat.pk_job " + + "AND " + "job.pk_job = job_resource.pk_job " + "AND " + "job.pk_job = job_mem.pk_job " + + "AND " + "job.pk_job = job_usage.pk_job "; + + private static final String GET_LAYER = "SELECT " + "layer.*," + "layer_stat.int_total_count," + + "layer_stat.int_waiting_count," + "layer_stat.int_running_count," + + "layer_stat.int_dead_count," + "layer_stat.int_depend_count," + + "layer_stat.int_eaten_count," + "layer_stat.int_succeeded_count," + + "layer_usage.int_core_time_success," + "layer_usage.int_core_time_fail, " + + "layer_usage.int_gpu_time_success," + "layer_usage.int_gpu_time_fail, " + + "layer_usage.int_frame_success_count, " + "layer_usage.int_frame_fail_count, " + + "layer_usage.int_clock_time_low, " + "layer_usage.int_clock_time_high," + + "layer_usage.int_clock_time_success," + "layer_usage.int_clock_time_fail," + + "layer_mem.int_max_rss," + "layer_resource.int_cores," + "layer_resource.int_gpus " + + "FROM " + "layer, " + "job," + "layer_stat, " + "layer_resource, " + "layer_usage, " + + "layer_mem " + "WHERE " + "layer.pk_job = job.pk_job " + "AND " + + "layer.pk_layer = layer_stat.pk_layer " + "AND " + + "layer.pk_layer = layer_resource.pk_layer " + "AND " + + "layer.pk_layer = layer_usage.pk_layer " + "AND " + + "layer.pk_layer = layer_mem.pk_layer"; + + private static final String GET_LAYER_WITH_LIMITS = "SELECT " + "layer.*, " + + "layer_stat.int_total_count, " + "layer_stat.int_waiting_count, " + + "layer_stat.int_running_count, " + "layer_stat.int_dead_count, " + + "layer_stat.int_depend_count, " + "layer_stat.int_eaten_count, " + + "layer_stat.int_succeeded_count, " + "layer_usage.int_core_time_success, " + + "layer_usage.int_core_time_fail, " + "layer_usage.int_gpu_time_success, " + + "layer_usage.int_gpu_time_fail, " + "layer_usage.int_frame_success_count, " + + "layer_usage.int_frame_fail_count, " + "layer_usage.int_clock_time_low, " + + "layer_usage.int_clock_time_high, " + "layer_usage.int_clock_time_success, " + + "layer_usage.int_clock_time_fail, " + "layer_mem.int_max_rss, " + + "layer_resource.int_cores, " + "layer_resource.int_gpus, " + + "limit_names.str_limit_names " + "FROM " + "layer " + "JOIN " + + "job ON layer.pk_job = job.pk_job " + "JOIN " + + "layer_stat ON layer.pk_layer = layer_stat.pk_layer " + "JOIN " + + "layer_resource ON layer.pk_layer = layer_resource.pk_layer " + "JOIN " + + "layer_usage ON layer.pk_layer = layer_usage.pk_layer " + "JOIN " + + "layer_mem ON layer.pk_layer = layer_mem.pk_layer " + "LEFT JOIN " + "(" + "SELECT " + + "layer_limit.pk_layer, " + + "string_agg(limit_record.str_name, ',') AS str_limit_names " + "FROM " + + "limit_record, " + "layer_limit " + "WHERE " + + "layer_limit.pk_limit_record = limit_record.pk_limit_record " + "GROUP BY " + + "layer_limit.pk_layer) AS limit_names " + "ON layer.pk_layer = limit_names.pk_layer "; + + private static final String GET_LIMIT_NAMES = "SELECT " + "limit_record.str_name " + "FROM " + + "layer_limit, " + "limit_record " + "WHERE " + "layer_limit.pk_layer = ? " + "AND " + + "limit_record.pk_limit_record = layer_limit.pk_limit_record "; + + private static final String GET_SHOW = "SELECT " + "show.pk_show," + "show.str_name," + + "show.b_paused," + "show.int_default_min_cores," + "show.int_default_max_cores," + + "show.int_default_min_gpus," + "show.int_default_max_gpus," + + "show.b_booking_enabled," + "show.b_dispatch_enabled," + "show.b_active," + + "show.str_comment_email," + "show_stats.int_frame_insert_count," + + "show_stats.int_job_insert_count," + "show_stats.int_frame_success_count," + + "show_stats.int_frame_fail_count," + + "COALESCE(vs_show_stat.int_pending_count,0) AS int_pending_count," + + "COALESCE(vs_show_stat.int_running_count,0) AS int_running_count," + + "COALESCE(vs_show_stat.int_dead_count,0) AS int_dead_count," + + "COALESCE(vs_show_resource.int_cores,0) AS int_cores, " + + "COALESCE(vs_show_resource.int_gpus,0) AS int_gpus, " + + "COALESCE(vs_show_stat.int_job_count,0) AS int_job_count " + "FROM " + "show " + + "JOIN show_stats ON (show.pk_show = show_stats.pk_show) " + + "LEFT JOIN vs_show_stat ON (vs_show_stat.pk_show = show.pk_show) " + + "LEFT JOIN vs_show_resource ON (vs_show_resource.pk_show=show.pk_show) " + "WHERE " + + "1 = 1 "; + + private static final String GET_SERVICE = + "SELECT " + "service.pk_service," + "service.str_name," + "service.b_threadable," + + "service.int_cores_min," + "service.int_cores_max," + "service.int_mem_min," + + "service.int_gpus_min," + "service.int_gpus_max," + "service.int_gpu_mem_min," + + "service.str_tags," + "service.int_timeout," + "service.int_timeout_llu," + + "service.int_min_memory_increase " + "FROM " + "service "; + + private static final String GET_SERVICE_OVERRIDE = "SELECT " + "show_service.pk_show_service," + + "show_service.str_name," + "show_service.b_threadable," + + "show_service.int_cores_min," + "show_service.int_cores_max," + + "show_service.int_mem_min," + "show_service.int_gpus_min," + + "show_service.int_gpus_max," + "show_service.int_gpu_mem_min," + + "show_service.str_tags," + "show_service.int_timeout," + + "show_service.int_timeout_llu," + "show_service.int_min_memory_increase " + "FROM " + + "show_service, " + "show " + "WHERE " + "show_service.pk_show = show.pk_show "; + + private static final String GET_TASK = "SELECT " + "task.pk_task," + "task.str_shot," + + "task.int_min_cores + task.int_adjust_cores AS int_min_cores, " + + "task.int_adjust_cores, " + + "task.int_min_gpus + task.int_adjust_gpus AS int_min_gpus, " + + "task.int_adjust_gpus, " + "dept.str_name AS str_dept " + "FROM " + "task," + "dept, " + + "point " + "WHERE " + "task.pk_point = point.pk_point " + "AND " + + "point.pk_dept = dept.pk_dept "; + + private static final String GET_HOST = "SELECT " + "host.pk_host, " + + "host.str_name AS host_name," + "host_stat.str_state AS host_state," + "host.b_nimby," + + "host_stat.ts_booted," + "host_stat.ts_ping," + "host.int_cores," + + "host.int_cores_idle," + "host.int_mem," + "host.int_mem_idle," + "host.int_gpus," + + "host.int_gpus_idle," + "host.int_gpu_mem," + "host.int_gpu_mem_idle," + + "host.str_tags," + "host.str_lock_state," + "host.b_comment," + + "host.int_thread_mode," + "host_stat.str_os," + "host_stat.int_mem_total," + + "host_stat.int_mem_free," + "host_stat.int_swap_total," + "host_stat.int_swap_free," + + "host_stat.int_mcp_total," + "host_stat.int_mcp_free," + + "host_stat.int_gpu_mem_total," + "host_stat.int_gpu_mem_free," + + "host_stat.int_load, " + "alloc.str_name AS alloc_name " + "FROM " + "alloc," + + "facility, " + "host_stat," + "host " + "WHERE " + "host.pk_alloc = alloc.pk_alloc " + + "AND " + "facility.pk_facility = alloc.pk_facility " + "AND " + + "host.pk_host = host_stat.pk_host "; + + private static final String GET_DEPEND = "SELECT " + "depend.pk_depend, " + "depend.str_type, " + + "depend.b_active, " + "depend.b_any, " + "depend.str_target, " + + "(SELECT str_name FROM job j WHERE j.pk_job = depend.pk_job_depend_on) AS depend_on_job, " + + "(SELECT str_name FROM job j WHERE j.pk_job = depend.pk_job_depend_er) AS depend_er_job, " + + "(SELECT str_name FROM layer l WHERE l.pk_layer = depend.pk_layer_depend_on) AS depend_on_layer, " + + "(SELECT str_name FROM layer l WHERE l.pk_layer = depend.pk_layer_depend_er) AS depend_er_layer, " + + "(SELECT str_name FROM frame f WHERE f.pk_frame = depend.pk_frame_depend_on) AS depend_on_frame, " + + "(SELECT str_name FROM frame f WHERE f.pk_frame = depend.pk_frame_depend_er) AS depend_er_frame " + + "FROM " + "depend "; + + private static final String GET_SUBSCRIPTION = "SELECT " + "subscription.pk_subscription, " + + "(alloc.str_name || '.' || show.str_name) AS name, " + "subscription.int_burst, " + + "subscription.int_size, " + "subscription.int_cores, " + "subscription.int_gpus, " + + "show.str_name AS show_name, " + "alloc.str_name AS alloc_name, " + + "facility.str_name AS facility_name " + "FROM " + "show, " + "alloc, " + "facility," + + "subscription " + "WHERE " + "subscription.pk_show = show.pk_show " + "AND " + + "subscription.pk_alloc = alloc.pk_alloc " + "AND " + + "alloc.pk_facility = facility.pk_facility "; + + private static final String GET_PENDING_JOBS = GET_JOB + "AND " + "job.str_state = 'PENDING' "; + + private static final String GET_FRAMES_CRITERIA = + + "SELECT " + "frame.pk_frame, " + "frame.int_exit_status," + "frame.str_name," + + "frame.int_number," + "frame.int_dispatch_order," + "frame.ts_started," + + "frame.ts_stopped," + "frame.ts_llu," + "frame.int_retries," + + "frame.str_state," + "frame.str_host," + "frame.int_cores," + + "frame.int_mem_max_used," + "frame.int_mem_used, " + + "frame.int_mem_reserved, " + "frame.int_gpus," + + "frame.int_gpu_mem_max_used, " + "frame.int_gpu_mem_used, " + + "frame.int_gpu_mem_reserved, " + "frame.str_checkpoint_state," + + "frame.int_checkpoint_count," + "frame.int_total_past_core_time," + + "frame.int_total_past_gpu_time," + "layer.str_name AS layer_name," + + "job.str_name AS job_name, " + "frame_state_display_overrides.*, " + + "ROW_NUMBER() OVER " + + "(ORDER BY frame.int_dispatch_order ASC, layer.int_dispatch_order ASC) AS row_number " + + "FROM " + "job, " + "layer," + "frame " + + "LEFT JOIN frame_state_display_overrides ON " + + "(frame.pk_frame = frame_state_display_overrides.pk_frame AND " + + "frame.str_state = frame_state_display_overrides.str_frame_state) " + "WHERE " + + "frame.pk_layer = layer.pk_layer " + "AND " + "frame.pk_job= job.pk_job "; + + private static final String QUERY_FOR_DEED = "SELECT " + "host.str_name AS str_host," + + "show.str_name AS str_show," + "owner.str_username," + "deed.pk_deed " + "FROM " + + "deed," + "owner," + "host," + "show " + "WHERE " + "deed.pk_host = host.pk_host " + + "AND " + "deed.pk_owner = owner.pk_owner " + "AND " + "owner.pk_show = show.pk_show "; + + public FrameSearchFactory getFrameSearchFactory() { + return frameSearchFactory; + } + + public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { + this.frameSearchFactory = frameSearchFactory; + } + + public ProcSearchFactory getProcSearchFactory() { + return procSearchFactory; + } + + public void setProcSearchFactory(ProcSearchFactory procSearchFactory) { + this.procSearchFactory = procSearchFactory; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/AbstractDepend.java b/cuebot/src/main/java/com/imageworks/spcue/depend/AbstractDepend.java index 1da4e4940..cf876e058 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/AbstractDepend.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/AbstractDepend.java @@ -22,59 +22,59 @@ */ public abstract class AbstractDepend { - private boolean launch = false; - private boolean active = true; - private boolean anyFrame = false; + private boolean launch = false; + private boolean active = true; + private boolean anyFrame = false; - /** - * True if the dependency is just a container for other depends and cannot be satisfied by frames - * completing. Its essentially a way to group related depends. - */ - private boolean composite = false; + /** + * True if the dependency is just a container for other depends and cannot be satisfied by + * frames completing. Its essentially a way to group related depends. + */ + private boolean composite = false; - private String id = null; + private String id = null; - public String getId() { - return id; - } + public String getId() { + return id; + } - public boolean isActive() { - return active; - } + public boolean isActive() { + return active; + } - public boolean isAnyFrame() { - return anyFrame; - } + public boolean isAnyFrame() { + return anyFrame; + } - public void setAnyFrame(boolean anyFrame) { - this.anyFrame = anyFrame; - } + public void setAnyFrame(boolean anyFrame) { + this.anyFrame = anyFrame; + } - public void setActive(boolean active) { - this.active = active; - } + public void setActive(boolean active) { + this.active = active; + } - public void setId(String id) { - this.id = id; - } + public void setId(String id) { + this.id = id; + } - public boolean isLaunchDepend() { - return launch; - } + public boolean isLaunchDepend() { + return launch; + } - public void setLaunchDepend(boolean launch) { - this.launch = launch; - } + public void setLaunchDepend(boolean launch) { + this.launch = launch; + } - public boolean isComposite() { - return composite; - } + public boolean isComposite() { + return composite; + } - public void setComposite(boolean composite) { - this.composite = composite; - } + public void setComposite(boolean composite) { + this.composite = composite; + } - public abstract String getSignature(); + public abstract String getSignature(); - public abstract DependTarget getTarget(); + public abstract DependTarget getTarget(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/Depend.java b/cuebot/src/main/java/com/imageworks/spcue/depend/Depend.java index aaa907d11..364386edf 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/Depend.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/Depend.java @@ -20,6 +20,6 @@ */ public interface Depend { - void accept(DependVisitor dependVisitor); + void accept(DependVisitor dependVisitor); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/DependCreationVisitor.java b/cuebot/src/main/java/com/imageworks/spcue/depend/DependCreationVisitor.java index 66f0473b8..1d06a5e2d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/DependCreationVisitor.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/DependCreationVisitor.java @@ -19,69 +19,69 @@ public class DependCreationVisitor implements DependVisitor { - DependManager dependManager; - - public DependCreationVisitor(DependManager dependManager) { - this.dependManager = dependManager; - } - - @Override - public void accept(FrameOnFrame depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(JobOnJob depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(JobOnLayer depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(JobOnFrame depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(LayerOnJob depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(LayerOnLayer depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(LayerOnFrame depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(FrameOnJob depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(FrameOnLayer depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(FrameByFrame depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(PreviousFrame depend) { - dependManager.createDepend(depend); - } - - @Override - public void accept(LayerOnSimFrame depend) { - dependManager.createDepend(depend); - } + DependManager dependManager; + + public DependCreationVisitor(DependManager dependManager) { + this.dependManager = dependManager; + } + + @Override + public void accept(FrameOnFrame depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(JobOnJob depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(JobOnLayer depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(JobOnFrame depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(LayerOnJob depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(LayerOnLayer depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(LayerOnFrame depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(FrameOnJob depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(FrameOnLayer depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(FrameByFrame depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(PreviousFrame depend) { + dependManager.createDepend(depend); + } + + @Override + public void accept(LayerOnSimFrame depend) { + dependManager.createDepend(depend); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/DependException.java b/cuebot/src/main/java/com/imageworks/spcue/depend/DependException.java index a0956997b..b0130a308 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/DependException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/DependException.java @@ -20,23 +20,23 @@ @SuppressWarnings("serial") public class DependException extends SpcueRuntimeException { - public DependException() { - // TODO Auto-generated constructor stub - } - - public DependException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } - - public DependException(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public DependException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public DependException() { + // TODO Auto-generated constructor stub + } + + public DependException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + + public DependException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public DependException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/DependVisitor.java b/cuebot/src/main/java/com/imageworks/spcue/depend/DependVisitor.java index 40928bea2..12466d764 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/DependVisitor.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/DependVisitor.java @@ -17,27 +17,27 @@ public interface DependVisitor { - void accept(JobOnJob depend); + void accept(JobOnJob depend); - void accept(JobOnLayer depend); + void accept(JobOnLayer depend); - void accept(JobOnFrame depend); + void accept(JobOnFrame depend); - void accept(LayerOnJob depend); + void accept(LayerOnJob depend); - void accept(LayerOnLayer depend); + void accept(LayerOnLayer depend); - void accept(LayerOnFrame depend); + void accept(LayerOnFrame depend); - void accept(FrameOnJob depend); + void accept(FrameOnJob depend); - void accept(FrameOnLayer depend); + void accept(FrameOnLayer depend); - void accept(FrameOnFrame depend); + void accept(FrameOnFrame depend); - void accept(FrameByFrame depend); + void accept(FrameByFrame depend); - void accept(PreviousFrame depend); + void accept(PreviousFrame depend); - void accept(LayerOnSimFrame depend); + void accept(LayerOnSimFrame depend); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/FrameByFrame.java b/cuebot/src/main/java/com/imageworks/spcue/depend/FrameByFrame.java index 5d7c2cb57..ffdbbbc9e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/FrameByFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/FrameByFrame.java @@ -22,51 +22,51 @@ public class FrameByFrame extends AbstractDepend implements Depend { - private final LayerInterface dependErLayer; - private final LayerInterface dependOnLayer; + private final LayerInterface dependErLayer; + private final LayerInterface dependOnLayer; - public FrameByFrame(LayerInterface dependErLayer, LayerInterface dependOnLayer) { + public FrameByFrame(LayerInterface dependErLayer, LayerInterface dependOnLayer) { - if (dependErLayer.getLayerId().equals(dependOnLayer.getLayerId())) { - throw new DependException( - "Cannot make the layer " + dependErLayer.getName() + " depend on itself."); - } + if (dependErLayer.getLayerId().equals(dependOnLayer.getLayerId())) { + throw new DependException( + "Cannot make the layer " + dependErLayer.getName() + " depend on itself."); + } - this.dependErLayer = dependErLayer; - this.dependOnLayer = dependOnLayer; - setComposite(true); - } + this.dependErLayer = dependErLayer; + this.dependOnLayer = dependOnLayer; + setComposite(true); + } - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.FRAME_BY_FRAME.toString()); - key.append(dependErLayer.getJobId()); - key.append(dependOnLayer.getJobId()); - key.append(dependErLayer.getLayerId()); - key.append(dependOnLayer.getLayerId()); - return SqlUtil.genKeyByName(key.toString()); - } + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.FRAME_BY_FRAME.toString()); + key.append(dependErLayer.getJobId()); + key.append(dependOnLayer.getJobId()); + key.append(dependErLayer.getLayerId()); + key.append(dependOnLayer.getLayerId()); + return SqlUtil.genKeyByName(key.toString()); + } - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); - } + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } - @Override - public DependTarget getTarget() { - if (dependErLayer.getJobId().equals(dependOnLayer.getJobId())) { - return DependTarget.INTERNAL; - } else { - return DependTarget.EXTERNAL; + @Override + public DependTarget getTarget() { + if (dependErLayer.getJobId().equals(dependOnLayer.getJobId())) { + return DependTarget.INTERNAL; + } else { + return DependTarget.EXTERNAL; + } } - } - public LayerInterface getDependErLayer() { - return dependErLayer; - } + public LayerInterface getDependErLayer() { + return dependErLayer; + } - public LayerInterface getDependOnLayer() { - return dependOnLayer; - } + public LayerInterface getDependOnLayer() { + return dependOnLayer; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnFrame.java b/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnFrame.java index b64511c13..37be03cd5 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnFrame.java @@ -22,62 +22,62 @@ public class FrameOnFrame extends AbstractDepend implements Depend { - private final FrameInterface dependErFrame; - private final FrameInterface dependOnFrame; - private AbstractDepend parent = null; + private final FrameInterface dependErFrame; + private final FrameInterface dependOnFrame; + private AbstractDepend parent = null; - public FrameOnFrame(FrameInterface dependErFrame, FrameInterface dependOnFrame, - AbstractDepend parent) { + public FrameOnFrame(FrameInterface dependErFrame, FrameInterface dependOnFrame, + AbstractDepend parent) { - if (dependOnFrame.getFrameId().equals(dependErFrame.getFrameId())) { - throw new DependException( - "The frame " + dependErFrame.getName() + " cannot depend on itself."); - } + if (dependOnFrame.getFrameId().equals(dependErFrame.getFrameId())) { + throw new DependException( + "The frame " + dependErFrame.getName() + " cannot depend on itself."); + } - this.dependErFrame = dependErFrame; - this.dependOnFrame = dependOnFrame; - this.parent = parent; - } + this.dependErFrame = dependErFrame; + this.dependOnFrame = dependOnFrame; + this.parent = parent; + } - public FrameOnFrame(FrameInterface dependErFrame, FrameInterface dependOnFrame) { - this.dependErFrame = dependErFrame; - this.dependOnFrame = dependOnFrame; - } + public FrameOnFrame(FrameInterface dependErFrame, FrameInterface dependOnFrame) { + this.dependErFrame = dependErFrame; + this.dependOnFrame = dependOnFrame; + } - public FrameInterface getDependErFrame() { - return dependErFrame; - } + public FrameInterface getDependErFrame() { + return dependErFrame; + } - public FrameInterface getDependOnFrame() { - return dependOnFrame; - } + public FrameInterface getDependOnFrame() { + return dependOnFrame; + } - public AbstractDepend getParent() { - return parent; - } + public AbstractDepend getParent() { + return parent; + } - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); - } + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.FRAME_ON_FRAME.toString()); - key.append(dependErFrame.getJobId()); - key.append(dependOnFrame.getJobId()); - key.append(dependErFrame.getFrameId()); - key.append(dependOnFrame.getFrameId()); - return SqlUtil.genKeyByName(key.toString()); - } + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.FRAME_ON_FRAME.toString()); + key.append(dependErFrame.getJobId()); + key.append(dependOnFrame.getJobId()); + key.append(dependErFrame.getFrameId()); + key.append(dependOnFrame.getFrameId()); + return SqlUtil.genKeyByName(key.toString()); + } - @Override - public DependTarget getTarget() { - if (dependErFrame.getJobId().equals(dependOnFrame.getJobId())) { - return DependTarget.INTERNAL; - } else { - return DependTarget.EXTERNAL; + @Override + public DependTarget getTarget() { + if (dependErFrame.getJobId().equals(dependOnFrame.getJobId())) { + return DependTarget.INTERNAL; + } else { + return DependTarget.EXTERNAL; + } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnJob.java b/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnJob.java index 03866e182..cd4556c6a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnJob.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnJob.java @@ -23,43 +23,43 @@ public class FrameOnJob extends AbstractDepend implements Depend { - private final FrameInterface dependErFrame; - private final JobInterface dependOnJob; + private final FrameInterface dependErFrame; + private final JobInterface dependOnJob; - public FrameOnJob(FrameInterface dependErFrame, JobInterface dependOnJob) { + public FrameOnJob(FrameInterface dependErFrame, JobInterface dependOnJob) { - if (dependErFrame.getJobId().equals(dependOnJob.getJobId())) { - throw new DependException("A frame cannot depend on its own job."); - } + if (dependErFrame.getJobId().equals(dependOnJob.getJobId())) { + throw new DependException("A frame cannot depend on its own job."); + } - this.dependErFrame = dependErFrame; - this.dependOnJob = dependOnJob; - } + this.dependErFrame = dependErFrame; + this.dependOnJob = dependOnJob; + } - public FrameInterface getDependErFrame() { - return dependErFrame; - } + public FrameInterface getDependErFrame() { + return dependErFrame; + } - public JobInterface getDependOnJob() { - return dependOnJob; - } + public JobInterface getDependOnJob() { + return dependOnJob; + } - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.FRAME_BY_FRAME.toString()); - key.append(dependErFrame.getFrameId()); - key.append(dependOnJob.getJobId()); - return SqlUtil.genKeyByName(key.toString()); - } + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.FRAME_BY_FRAME.toString()); + key.append(dependErFrame.getFrameId()); + key.append(dependOnJob.getJobId()); + return SqlUtil.genKeyByName(key.toString()); + } - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); - } + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } - @Override - public DependTarget getTarget() { - return DependTarget.EXTERNAL; - } + @Override + public DependTarget getTarget() { + return DependTarget.EXTERNAL; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnLayer.java b/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnLayer.java index a8b001d57..80ed24744 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnLayer.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/FrameOnLayer.java @@ -23,48 +23,48 @@ public class FrameOnLayer extends AbstractDepend implements Depend { - private final FrameInterface dependErFrame; - private final LayerInterface dependOnLayer; + private final FrameInterface dependErFrame; + private final LayerInterface dependOnLayer; - public FrameOnLayer(FrameInterface dependErFrame, LayerInterface dependOnLayer) { + public FrameOnLayer(FrameInterface dependErFrame, LayerInterface dependOnLayer) { - if (dependErFrame.getLayerId().equals(dependOnLayer.getLayerId())) { - throw new DependException( - "The frame " + dependErFrame.getName() + " cannot depend " + " on its own layer."); - } + if (dependErFrame.getLayerId().equals(dependOnLayer.getLayerId())) { + throw new DependException("The frame " + dependErFrame.getName() + " cannot depend " + + " on its own layer."); + } - this.dependErFrame = dependErFrame; - this.dependOnLayer = dependOnLayer; - } + this.dependErFrame = dependErFrame; + this.dependOnLayer = dependOnLayer; + } - public FrameInterface getDependErFrame() { - return dependErFrame; - } + public FrameInterface getDependErFrame() { + return dependErFrame; + } - public LayerInterface getDependOnLayer() { - return dependOnLayer; - } + public LayerInterface getDependOnLayer() { + return dependOnLayer; + } - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.FRAME_BY_FRAME.toString()); - key.append(dependErFrame.getFrameId()); - key.append(dependOnLayer.getLayerId()); - return SqlUtil.genKeyByName(key.toString()); - } + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.FRAME_BY_FRAME.toString()); + key.append(dependErFrame.getFrameId()); + key.append(dependOnLayer.getLayerId()); + return SqlUtil.genKeyByName(key.toString()); + } - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); - } + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } - @Override - public DependTarget getTarget() { - if (dependErFrame.getJobId().equals(dependOnLayer.getJobId())) { - return DependTarget.INTERNAL; - } else { - return DependTarget.EXTERNAL; + @Override + public DependTarget getTarget() { + if (dependErFrame.getJobId().equals(dependOnLayer.getJobId())) { + return DependTarget.INTERNAL; + } else { + return DependTarget.EXTERNAL; + } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnFrame.java b/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnFrame.java index 464428842..08d955036 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnFrame.java @@ -23,43 +23,43 @@ public class JobOnFrame extends AbstractDepend implements Depend { - private final JobInterface dependErJob; - private final FrameInterface dependOnFrame; + private final JobInterface dependErJob; + private final FrameInterface dependOnFrame; - public JobOnFrame(JobInterface dependErJob, FrameInterface dependOnFrame) { + public JobOnFrame(JobInterface dependErJob, FrameInterface dependOnFrame) { - if (dependErJob.getJobId().equals(dependOnFrame.getJobId())) { - throw new DependException("A job cannot depend on one of its own frames."); - } + if (dependErJob.getJobId().equals(dependOnFrame.getJobId())) { + throw new DependException("A job cannot depend on one of its own frames."); + } - this.dependErJob = dependErJob; - this.dependOnFrame = dependOnFrame; - } + this.dependErJob = dependErJob; + this.dependOnFrame = dependOnFrame; + } - public JobInterface getDependErJob() { - return dependErJob; - } + public JobInterface getDependErJob() { + return dependErJob; + } - public FrameInterface getDependOnFrame() { - return dependOnFrame; - } + public FrameInterface getDependOnFrame() { + return dependOnFrame; + } - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.FRAME_BY_FRAME.toString()); - key.append(dependErJob.getJobId()); - key.append(dependOnFrame.getFrameId()); - return SqlUtil.genKeyByName(key.toString()); - } + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.FRAME_BY_FRAME.toString()); + key.append(dependErJob.getJobId()); + key.append(dependOnFrame.getFrameId()); + return SqlUtil.genKeyByName(key.toString()); + } - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); - } + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } - @Override - public DependTarget getTarget() { - return DependTarget.EXTERNAL; - } + @Override + public DependTarget getTarget() { + return DependTarget.EXTERNAL; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnJob.java b/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnJob.java index bd1922283..8cc67a61a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnJob.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnJob.java @@ -22,43 +22,43 @@ public class JobOnJob extends AbstractDepend implements Depend { - private final JobInterface dependErJob; - private final JobInterface dependOnJob; + private final JobInterface dependErJob; + private final JobInterface dependOnJob; - public JobOnJob(JobInterface dependErJob, JobInterface dependOnJob) { + public JobOnJob(JobInterface dependErJob, JobInterface dependOnJob) { - if (dependErJob.getJobId().equals(dependOnJob.getJobId())) { - throw new DependException("A job cannot depend on itself."); - } + if (dependErJob.getJobId().equals(dependOnJob.getJobId())) { + throw new DependException("A job cannot depend on itself."); + } - this.dependErJob = dependErJob; - this.dependOnJob = dependOnJob; - } + this.dependErJob = dependErJob; + this.dependOnJob = dependOnJob; + } - public JobInterface getDependErJob() { - return dependErJob; - } + public JobInterface getDependErJob() { + return dependErJob; + } - public JobInterface getDependOnJob() { - return dependOnJob; - } + public JobInterface getDependOnJob() { + return dependOnJob; + } - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.JOB_ON_JOB.toString()); - key.append(dependErJob.getJobId()); - key.append(dependOnJob.getJobId()); - return SqlUtil.genKeyByName(key.toString()); - } + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.JOB_ON_JOB.toString()); + key.append(dependErJob.getJobId()); + key.append(dependOnJob.getJobId()); + return SqlUtil.genKeyByName(key.toString()); + } - @Override - public void accept(DependVisitor dependCreator) { - dependCreator.accept(this); - } + @Override + public void accept(DependVisitor dependCreator) { + dependCreator.accept(this); + } - @Override - public DependTarget getTarget() { - return DependTarget.EXTERNAL; - } + @Override + public DependTarget getTarget() { + return DependTarget.EXTERNAL; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnLayer.java b/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnLayer.java index 588f90aca..2e0bbb3fa 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnLayer.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/JobOnLayer.java @@ -23,43 +23,43 @@ public class JobOnLayer extends AbstractDepend implements Depend { - private final JobInterface dependErJob; - private final LayerInterface dependOnLayer; + private final JobInterface dependErJob; + private final LayerInterface dependOnLayer; - public JobOnLayer(JobInterface dependErJob, LayerInterface dependOnLayer) { + public JobOnLayer(JobInterface dependErJob, LayerInterface dependOnLayer) { - if (dependErJob.getJobId().equals(dependOnLayer.getJobId())) { - throw new DependException("A job cannot depend on one of its own layers."); - } + if (dependErJob.getJobId().equals(dependOnLayer.getJobId())) { + throw new DependException("A job cannot depend on one of its own layers."); + } - this.dependErJob = dependErJob; - this.dependOnLayer = dependOnLayer; - } + this.dependErJob = dependErJob; + this.dependOnLayer = dependOnLayer; + } - public JobInterface getDependErJob() { - return dependErJob; - } + public JobInterface getDependErJob() { + return dependErJob; + } - public LayerInterface getDependOnLayer() { - return dependOnLayer; - } + public LayerInterface getDependOnLayer() { + return dependOnLayer; + } - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.JOB_ON_JOB.toString()); - key.append(dependErJob.getJobId()); - key.append(dependOnLayer.getLayerId()); - return SqlUtil.genKeyByName(key.toString()); - } + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.JOB_ON_JOB.toString()); + key.append(dependErJob.getJobId()); + key.append(dependOnLayer.getLayerId()); + return SqlUtil.genKeyByName(key.toString()); + } - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); - } + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } - @Override - public DependTarget getTarget() { - return DependTarget.EXTERNAL; - } + @Override + public DependTarget getTarget() { + return DependTarget.EXTERNAL; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnFrame.java b/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnFrame.java index b98de5618..0444f90c1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnFrame.java @@ -23,47 +23,47 @@ public class LayerOnFrame extends AbstractDepend implements Depend { - private final LayerInterface dependErLayer; - private final FrameInterface dependOnFrame; + private final LayerInterface dependErLayer; + private final FrameInterface dependOnFrame; - public LayerOnFrame(LayerInterface dependErLayer, FrameInterface dependOnFrame) { + public LayerOnFrame(LayerInterface dependErLayer, FrameInterface dependOnFrame) { - if (dependErLayer.getLayerId().equals(dependOnFrame.getLayerId())) { - throw new DependException("A layer cannot depend on one of its own frames."); - } + if (dependErLayer.getLayerId().equals(dependOnFrame.getLayerId())) { + throw new DependException("A layer cannot depend on one of its own frames."); + } - this.dependErLayer = dependErLayer; - this.dependOnFrame = dependOnFrame; - } + this.dependErLayer = dependErLayer; + this.dependOnFrame = dependOnFrame; + } - public LayerInterface getDependErLayer() { - return dependErLayer; - } + public LayerInterface getDependErLayer() { + return dependErLayer; + } - public FrameInterface getDependOnFrame() { - return dependOnFrame; - } + public FrameInterface getDependOnFrame() { + return dependOnFrame; + } - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.JOB_ON_JOB.toString()); - key.append(dependErLayer.getLayerId()); - key.append(dependOnFrame.getFrameId()); - return SqlUtil.genKeyByName(key.toString()); - } + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.JOB_ON_JOB.toString()); + key.append(dependErLayer.getLayerId()); + key.append(dependOnFrame.getFrameId()); + return SqlUtil.genKeyByName(key.toString()); + } - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); - } + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } - @Override - public DependTarget getTarget() { - if (dependErLayer.getJobId().equals(dependOnFrame.getJobId())) { - return DependTarget.INTERNAL; - } else { - return DependTarget.EXTERNAL; + @Override + public DependTarget getTarget() { + if (dependErLayer.getJobId().equals(dependOnFrame.getJobId())) { + return DependTarget.INTERNAL; + } else { + return DependTarget.EXTERNAL; + } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnJob.java b/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnJob.java index 721aed4f4..08348a6e8 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnJob.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnJob.java @@ -23,43 +23,43 @@ public class LayerOnJob extends AbstractDepend implements Depend { - private final LayerInterface dependErLayer; - private final JobInterface dependOnJob; + private final LayerInterface dependErLayer; + private final JobInterface dependOnJob; - public LayerOnJob(LayerInterface dependErLayer, JobInterface dependOnJob) { + public LayerOnJob(LayerInterface dependErLayer, JobInterface dependOnJob) { - if (dependErLayer.getJobId().equals(dependOnJob.getJobId())) { - throw new DependException("A layer cannot depend on its own job."); - } + if (dependErLayer.getJobId().equals(dependOnJob.getJobId())) { + throw new DependException("A layer cannot depend on its own job."); + } - this.dependErLayer = dependErLayer; - this.dependOnJob = dependOnJob; - } + this.dependErLayer = dependErLayer; + this.dependOnJob = dependOnJob; + } - public LayerInterface getDependErLayer() { - return dependErLayer; - } + public LayerInterface getDependErLayer() { + return dependErLayer; + } - public JobInterface getDependOnJob() { - return dependOnJob; - } + public JobInterface getDependOnJob() { + return dependOnJob; + } - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.FRAME_BY_FRAME.toString()); - key.append(dependErLayer.getLayerId()); - key.append(dependOnJob.getJobId()); - return SqlUtil.genKeyByName(key.toString()); - } + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.FRAME_BY_FRAME.toString()); + key.append(dependErLayer.getLayerId()); + key.append(dependOnJob.getJobId()); + return SqlUtil.genKeyByName(key.toString()); + } - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); - } + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } - @Override - public DependTarget getTarget() { - return DependTarget.EXTERNAL; - } + @Override + public DependTarget getTarget() { + return DependTarget.EXTERNAL; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnLayer.java b/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnLayer.java index d4a61a5e8..c49eab75c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnLayer.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnLayer.java @@ -22,50 +22,50 @@ public class LayerOnLayer extends AbstractDepend implements Depend { - public final LayerInterface dependErLayer; - public final LayerInterface dependOnLayer; + public final LayerInterface dependErLayer; + public final LayerInterface dependOnLayer; - public LayerOnLayer(LayerInterface dependErLayer, LayerInterface dependOnLayer) { + public LayerOnLayer(LayerInterface dependErLayer, LayerInterface dependOnLayer) { - if (dependErLayer.getLayerId().equals(dependOnLayer.getLayerId())) { - throw new DependException( - "Cannot make the layer " + dependErLayer.getName() + " depend on itself."); - } + if (dependErLayer.getLayerId().equals(dependOnLayer.getLayerId())) { + throw new DependException( + "Cannot make the layer " + dependErLayer.getName() + " depend on itself."); + } - this.dependErLayer = dependErLayer; - this.dependOnLayer = dependOnLayer; - } + this.dependErLayer = dependErLayer; + this.dependOnLayer = dependOnLayer; + } - public LayerInterface getDependErLayer() { - return dependErLayer; - } + public LayerInterface getDependErLayer() { + return dependErLayer; + } - public LayerInterface getDependOnLayer() { - return dependOnLayer; - } + public LayerInterface getDependOnLayer() { + return dependOnLayer; + } - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.LAYER_ON_LAYER.toString()); - key.append(dependErLayer.getJobId()); - key.append(dependOnLayer.getJobId()); - key.append(dependErLayer.getLayerId()); - key.append(dependOnLayer.getLayerId()); - return SqlUtil.genKeyByName(key.toString()); - } + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.LAYER_ON_LAYER.toString()); + key.append(dependErLayer.getJobId()); + key.append(dependOnLayer.getJobId()); + key.append(dependErLayer.getLayerId()); + key.append(dependOnLayer.getLayerId()); + return SqlUtil.genKeyByName(key.toString()); + } - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); - } + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } - @Override - public DependTarget getTarget() { - if (dependErLayer.getJobId().equals(dependOnLayer.getJobId())) { - return DependTarget.INTERNAL; - } else { - return DependTarget.EXTERNAL; + @Override + public DependTarget getTarget() { + if (dependErLayer.getJobId().equals(dependOnLayer.getJobId())) { + return DependTarget.INTERNAL; + } else { + return DependTarget.EXTERNAL; + } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnSimFrame.java b/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnSimFrame.java index 8af36ba95..29abfe6d9 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnSimFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/LayerOnSimFrame.java @@ -23,51 +23,51 @@ public class LayerOnSimFrame extends AbstractDepend implements Depend { - private final LayerInterface dependErLayer; - private final FrameInterface dependOnFrame; + private final LayerInterface dependErLayer; + private final FrameInterface dependOnFrame; - public LayerOnSimFrame(LayerInterface dependErLayer, FrameInterface dependOnFrame) { + public LayerOnSimFrame(LayerInterface dependErLayer, FrameInterface dependOnFrame) { - if (dependErLayer.getLayerId().equals(dependOnFrame.getLayerId())) { - throw new DependException( - "Cannot make the layer " + dependErLayer.getName() + " depend on itself."); - } + if (dependErLayer.getLayerId().equals(dependOnFrame.getLayerId())) { + throw new DependException( + "Cannot make the layer " + dependErLayer.getName() + " depend on itself."); + } - this.dependErLayer = dependErLayer; - this.dependOnFrame = dependOnFrame; - setComposite(true); - } + this.dependErLayer = dependErLayer; + this.dependOnFrame = dependOnFrame; + setComposite(true); + } - public LayerInterface getDependErLayer() { - return dependErLayer; - } + public LayerInterface getDependErLayer() { + return dependErLayer; + } - public FrameInterface getDependOnFrame() { - return dependOnFrame; - } + public FrameInterface getDependOnFrame() { + return dependOnFrame; + } - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.LAYER_ON_SIM_FRAME.toString()); - key.append(dependErLayer.getJobId()); - key.append(dependOnFrame.getJobId()); - key.append(dependErLayer.getLayerId()); - key.append(dependOnFrame.getLayerId()); - return SqlUtil.genKeyByName(key.toString()); - } + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.LAYER_ON_SIM_FRAME.toString()); + key.append(dependErLayer.getJobId()); + key.append(dependOnFrame.getJobId()); + key.append(dependErLayer.getLayerId()); + key.append(dependOnFrame.getLayerId()); + return SqlUtil.genKeyByName(key.toString()); + } - @Override - public DependTarget getTarget() { - if (dependErLayer.getJobId().equals(dependOnFrame.getJobId())) { - return DependTarget.INTERNAL; - } else { - return DependTarget.EXTERNAL; + @Override + public DependTarget getTarget() { + if (dependErLayer.getJobId().equals(dependOnFrame.getJobId())) { + return DependTarget.INTERNAL; + } else { + return DependTarget.EXTERNAL; + } } - } - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); - } + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/PreviousFrame.java b/cuebot/src/main/java/com/imageworks/spcue/depend/PreviousFrame.java index c58a824b0..560998ad0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/PreviousFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/PreviousFrame.java @@ -22,45 +22,45 @@ public class PreviousFrame extends AbstractDepend implements Depend { - public final LayerInterface dependErLayer; - public final LayerInterface dependOnLayer; + public final LayerInterface dependErLayer; + public final LayerInterface dependOnLayer; - public PreviousFrame(LayerInterface dependErLayer, LayerInterface dependOnLayer) { - this.dependErLayer = dependErLayer; - this.dependOnLayer = dependOnLayer; - setComposite(true); - } + public PreviousFrame(LayerInterface dependErLayer, LayerInterface dependOnLayer) { + this.dependErLayer = dependErLayer; + this.dependOnLayer = dependOnLayer; + setComposite(true); + } - @Override - public String getSignature() { - StringBuilder key = new StringBuilder(256); - key.append(DependType.PREVIOUS_FRAME.toString()); - key.append(dependErLayer.getJobId()); - key.append(dependOnLayer.getJobId()); - key.append(dependErLayer.getLayerId()); - key.append(dependOnLayer.getLayerId()); - return SqlUtil.genKeyByName(key.toString()); - } + @Override + public String getSignature() { + StringBuilder key = new StringBuilder(256); + key.append(DependType.PREVIOUS_FRAME.toString()); + key.append(dependErLayer.getJobId()); + key.append(dependOnLayer.getJobId()); + key.append(dependErLayer.getLayerId()); + key.append(dependOnLayer.getLayerId()); + return SqlUtil.genKeyByName(key.toString()); + } - @Override - public DependTarget getTarget() { - if (dependErLayer.getJobId().equals(dependOnLayer.getJobId())) { - return DependTarget.INTERNAL; - } else { - return DependTarget.EXTERNAL; + @Override + public DependTarget getTarget() { + if (dependErLayer.getJobId().equals(dependOnLayer.getJobId())) { + return DependTarget.INTERNAL; + } else { + return DependTarget.EXTERNAL; + } } - } - @Override - public void accept(DependVisitor dependVisitor) { - dependVisitor.accept(this); - } + @Override + public void accept(DependVisitor dependVisitor) { + dependVisitor.accept(this); + } - public LayerInterface getDependErLayer() { - return dependErLayer; - } + public LayerInterface getDependErLayer() { + return dependErLayer; + } - public LayerInterface getDependOnLayer() { - return dependOnLayer; - } + public LayerInterface getDependOnLayer() { + return dependOnLayer; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/depend/QueueDependOperation.java b/cuebot/src/main/java/com/imageworks/spcue/depend/QueueDependOperation.java index 4bc5d7fa3..a1c7de40d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/depend/QueueDependOperation.java +++ b/cuebot/src/main/java/com/imageworks/spcue/depend/QueueDependOperation.java @@ -17,16 +17,16 @@ public class QueueDependOperation implements Runnable { - private DependVisitor visitor; - private Depend depend; + private DependVisitor visitor; + private Depend depend; - public QueueDependOperation(Depend depend, DependVisitor visitor) { - this.depend = depend; - this.visitor = visitor; - } + public QueueDependOperation(Depend depend, DependVisitor visitor) { + this.depend = depend; + this.visitor = visitor; + } - @Override - public void run() { - depend.accept(visitor); - } + @Override + public void run() { + depend.accept(visitor); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/AbstractDispatcher.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/AbstractDispatcher.java index d6dce119e..cc6ca942f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/AbstractDispatcher.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/AbstractDispatcher.java @@ -30,192 +30,195 @@ */ public abstract class AbstractDispatcher { - private static final Logger logger = LogManager.getLogger(AbstractDispatcher.class); - - public DispatchSupport dispatchSupport; - public RqdClient rqdClient; - - public boolean testMode = false; - - public boolean dispatchProc(DispatchFrame frame, VirtualProc proc) { - - try { - dispatch(frame, proc); - dispatchSummary(proc, frame, "Dispatch"); - DispatchSupport.dispatchedProcs.getAndIncrement(); - - return true; - - } catch (FrameReservationException fre) { - /* - * This usually just means another thread got the frame first, so just retry on the next - * frame. - */ - DispatchSupport.bookingRetries.incrementAndGet(); - String msg = - "frame reservation error, " + "dispatchProcToJob failed to book next frame, " + fre; - logger.info(msg); - } catch (ResourceDuplicationFailureException rrfe) { - /* - * There is a resource already assigned to the frame we reserved! Don't clear the frame, let - * it keep running and continue to the next frame. - */ - DispatchSupport.bookingErrors.incrementAndGet(); - dispatchSupport.fixFrame(frame); - - String msg = "proc update error, dispatchProcToJob failed " + "to assign proc to job " + frame - + ", " + proc + " already assigned to another frame." + rrfe; - - logger.info(msg); - } catch (ResourceReservationFailureException rrfe) { - /* - * This should technically never happen since the proc is already allocated at this point, - * but, if it does it should be unbooked. - */ - DispatchSupport.bookingErrors.incrementAndGet(); - String msg = "proc update error, " + "dispatchProcToJob failed to assign proc to job " + frame - + ", " + rrfe; - logger.info(msg); - dispatchSupport.unbookProc(proc); - dispatchSupport.clearFrame(frame); - - throw new DispatcherException("proc reservation error, " + "unable to allocate proc " + proc - + "that " + "was already allocated."); - } catch (Exception e) { - /* - * Everything else means that the host/frame record was updated but another error occurred and - * the proc should be cleared. It could also be running, so use the jobManagerSupprot to kill - * it just in case. - */ - DispatchSupport.bookingErrors.incrementAndGet(); - String msg = "dispatchProcToJob failed booking proc " + proc + " on job " + frame; - logger.info(msg); - dispatchSupport.unbookProc(proc); - dispatchSupport.clearFrame(frame); - - try { - rqdClient.killFrame(proc, "An accounting error occured " + "when booking this frame."); - } catch (RqdClientException rqde) { - /* - * Its almost expected that this will fail, as this is just a precaution if the frame did - * actually launch. - */ - } - throw new DispatcherException( - "proc reservation error, " + "unable to communicate with proc " + proc); + private static final Logger logger = LogManager.getLogger(AbstractDispatcher.class); + + public DispatchSupport dispatchSupport; + public RqdClient rqdClient; + + public boolean testMode = false; + + public boolean dispatchProc(DispatchFrame frame, VirtualProc proc) { + + try { + dispatch(frame, proc); + dispatchSummary(proc, frame, "Dispatch"); + DispatchSupport.dispatchedProcs.getAndIncrement(); + + return true; + + } catch (FrameReservationException fre) { + /* + * This usually just means another thread got the frame first, so just retry on the next + * frame. + */ + DispatchSupport.bookingRetries.incrementAndGet(); + String msg = "frame reservation error, " + + "dispatchProcToJob failed to book next frame, " + fre; + logger.info(msg); + } catch (ResourceDuplicationFailureException rrfe) { + /* + * There is a resource already assigned to the frame we reserved! Don't clear the frame, + * let it keep running and continue to the next frame. + */ + DispatchSupport.bookingErrors.incrementAndGet(); + dispatchSupport.fixFrame(frame); + + String msg = "proc update error, dispatchProcToJob failed " + "to assign proc to job " + + frame + ", " + proc + " already assigned to another frame." + rrfe; + + logger.info(msg); + } catch (ResourceReservationFailureException rrfe) { + /* + * This should technically never happen since the proc is already allocated at this + * point, but, if it does it should be unbooked. + */ + DispatchSupport.bookingErrors.incrementAndGet(); + String msg = "proc update error, " + "dispatchProcToJob failed to assign proc to job " + + frame + ", " + rrfe; + logger.info(msg); + dispatchSupport.unbookProc(proc); + dispatchSupport.clearFrame(frame); + + throw new DispatcherException("proc reservation error, " + "unable to allocate proc " + + proc + "that " + "was already allocated."); + } catch (Exception e) { + /* + * Everything else means that the host/frame record was updated but another error + * occurred and the proc should be cleared. It could also be running, so use the + * jobManagerSupprot to kill it just in case. + */ + DispatchSupport.bookingErrors.incrementAndGet(); + String msg = "dispatchProcToJob failed booking proc " + proc + " on job " + frame; + logger.info(msg); + dispatchSupport.unbookProc(proc); + dispatchSupport.clearFrame(frame); + + try { + rqdClient.killFrame(proc, + "An accounting error occured " + "when booking this frame."); + } catch (RqdClientException rqde) { + /* + * Its almost expected that this will fail, as this is just a precaution if the + * frame did actually launch. + */ + } + throw new DispatcherException( + "proc reservation error, " + "unable to communicate with proc " + proc); + } + + return false; } - return false; - } - - public boolean dispatchHost(DispatchFrame frame, VirtualProc proc) { - try { - dispatch(frame, proc); - dispatchSummary(proc, frame, "Booking"); - DispatchSupport.bookedProcs.getAndIncrement(); - DispatchSupport.bookedCores.addAndGet(proc.coresReserved); - DispatchSupport.bookedGpus.addAndGet(proc.gpusReserved); - return true; - } catch (FrameReservationException fre) { - /* - * This usually just means another thread got the frame first, so just retry on the next - * frame. - */ - DispatchSupport.bookingRetries.incrementAndGet(); - logger - .info("frame reservation error, " + "dispatchHostToJob failed to book new frame: " + fre); - } catch (ResourceDuplicationFailureException rrfe) { - /* - * There is a resource already assigned to the frame we reserved! Don't clear the frame, let - * it keep running and continue to the next frame. - */ - DispatchSupport.bookingErrors.incrementAndGet(); - dispatchSupport.fixFrame(frame); - - String msg = "proc update error, dispatchProcToJob failed " + "to assign proc to job " + frame - + ", " + proc + " already assigned to another frame." + rrfe; - - logger.info(msg); - } catch (ResourceReservationFailureException rrfe) { - /* - * This generally means that the resources we're booked by another thread. We can be fairly - * certain another thread is working with the current host, so bail out. Also note here the - * proc was never committed so there is not point to clearing or unbooking it. - */ - DispatchSupport.bookingErrors.incrementAndGet(); - dispatchSupport.clearFrame(frame); - - /* Throw an exception to stop booking * */ - throw new DispatcherException( - "host reservation error, " + "dispatchHostToJob failed to allocate a new proc " + rrfe); - } catch (Exception e) { - /* - * Any other exception means that the frame/host records have been updated, so, we need to - * clear the proc. Its possible the frame is actually running, so try to kill it. - */ - DispatchSupport.bookingErrors.incrementAndGet(); - dispatchSupport.unbookProc(proc); - dispatchSupport.clearFrame(frame); - - try { - rqdClient.killFrame(proc, "An accounting error occured " + "when booking this frame."); - } catch (RqdClientException rqde) { - /* - * Its almost expected that this will fail, as this is just a precaution if the frame did - * actually launch. - */ - } - /* Thrown an exception to stop booking */ - throw new DispatcherException("stopped dispatching host " + proc + ", " + e); + public boolean dispatchHost(DispatchFrame frame, VirtualProc proc) { + try { + dispatch(frame, proc); + dispatchSummary(proc, frame, "Booking"); + DispatchSupport.bookedProcs.getAndIncrement(); + DispatchSupport.bookedCores.addAndGet(proc.coresReserved); + DispatchSupport.bookedGpus.addAndGet(proc.gpusReserved); + return true; + } catch (FrameReservationException fre) { + /* + * This usually just means another thread got the frame first, so just retry on the next + * frame. + */ + DispatchSupport.bookingRetries.incrementAndGet(); + logger.info("frame reservation error, " + "dispatchHostToJob failed to book new frame: " + + fre); + } catch (ResourceDuplicationFailureException rrfe) { + /* + * There is a resource already assigned to the frame we reserved! Don't clear the frame, + * let it keep running and continue to the next frame. + */ + DispatchSupport.bookingErrors.incrementAndGet(); + dispatchSupport.fixFrame(frame); + + String msg = "proc update error, dispatchProcToJob failed " + "to assign proc to job " + + frame + ", " + proc + " already assigned to another frame." + rrfe; + + logger.info(msg); + } catch (ResourceReservationFailureException rrfe) { + /* + * This generally means that the resources we're booked by another thread. We can be + * fairly certain another thread is working with the current host, so bail out. Also + * note here the proc was never committed so there is not point to clearing or unbooking + * it. + */ + DispatchSupport.bookingErrors.incrementAndGet(); + dispatchSupport.clearFrame(frame); + + /* Throw an exception to stop booking * */ + throw new DispatcherException("host reservation error, " + + "dispatchHostToJob failed to allocate a new proc " + rrfe); + } catch (Exception e) { + /* + * Any other exception means that the frame/host records have been updated, so, we need + * to clear the proc. Its possible the frame is actually running, so try to kill it. + */ + DispatchSupport.bookingErrors.incrementAndGet(); + dispatchSupport.unbookProc(proc); + dispatchSupport.clearFrame(frame); + + try { + rqdClient.killFrame(proc, + "An accounting error occured " + "when booking this frame."); + } catch (RqdClientException rqde) { + /* + * Its almost expected that this will fail, as this is just a precaution if the + * frame did actually launch. + */ + } + /* Thrown an exception to stop booking */ + throw new DispatcherException("stopped dispatching host " + proc + ", " + e); + } + + return false; } - return false; - } + public void dispatch(DispatchFrame frame, VirtualProc proc) { + /* + * Start frame and create proc on the database. + */ + dispatchSupport.startFrameAndProc(proc, frame); - public void dispatch(DispatchFrame frame, VirtualProc proc) { - /* - * Start frame and create proc on the database. - */ - dispatchSupport.startFrameAndProc(proc, frame); + /* + * Communicate with RQD to run the frame. + */ + if (!testMode) { + dispatchSupport.runFrame(proc, frame); + } - /* - * Communicate with RQD to run the frame. - */ - if (!testMode) { - dispatchSupport.runFrame(proc, frame); } - } - - private static void dispatchSummary(VirtualProc p, DispatchFrame f, String type) { - String msg = - type + " summary: " + p.coresReserved + " cores / " + CueUtil.KbToMb(p.memoryReserved) - + " memory / " + p.gpusReserved + " gpus / " + CueUtil.KbToMb(p.gpuMemoryReserved) - + " gpu memory " + p.getName() + " to " + f.show + "/" + f.shot; - logger.info(msg); - } + private static void dispatchSummary(VirtualProc p, DispatchFrame f, String type) { + String msg = type + " summary: " + p.coresReserved + " cores / " + + CueUtil.KbToMb(p.memoryReserved) + " memory / " + p.gpusReserved + " gpus / " + + CueUtil.KbToMb(p.gpuMemoryReserved) + " gpu memory " + p.getName() + " to " + + f.show + "/" + f.shot; + logger.info(msg); + } - public DispatchSupport getDispatchSupport() { - return dispatchSupport; - } + public DispatchSupport getDispatchSupport() { + return dispatchSupport; + } - public void setDispatchSupport(DispatchSupport dispatchSupport) { - this.dispatchSupport = dispatchSupport; - } + public void setDispatchSupport(DispatchSupport dispatchSupport) { + this.dispatchSupport = dispatchSupport; + } - public RqdClient getRqdClient() { - return rqdClient; - } + public RqdClient getRqdClient() { + return rqdClient; + } - public void setRqdClient(RqdClient rqdClient) { - this.rqdClient = rqdClient; - } + public void setRqdClient(RqdClient rqdClient) { + this.rqdClient = rqdClient; + } - public boolean isTestMode() { - return testMode; - } + public boolean isTestMode() { + return testMode; + } - public void setTestMode(boolean testMode) { - this.testMode = testMode; - } + public void setTestMode(boolean testMode) { + this.testMode = testMode; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/BookingQueue.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/BookingQueue.java index 17eae95ab..843c07266 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/BookingQueue.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/BookingQueue.java @@ -26,88 +26,89 @@ public class BookingQueue implements QueueHealthCheck { - private final int healthThreshold; - private final int minUnhealthyPeriodMin; - private final int queueCapacity; - private final int corePoolSize; - private final int maxPoolSize; - // Base value for calculating the job sleep time - // this is used to slow down the booking queue to avoid racing conditions - private static final int BASE_SLEEP_TIME_MILLIS = 300; - - private static final Logger logger = LogManager.getLogger("HEALTH"); - private HealthyThreadPool healthyThreadPool; - - public BookingQueue(int healthThreshold, int minUnhealthyPeriodMin, int queueCapacity, - int corePoolSize, int maxPoolSize) { - this.healthThreshold = healthThreshold; - this.minUnhealthyPeriodMin = minUnhealthyPeriodMin; - this.queueCapacity = queueCapacity; - this.corePoolSize = corePoolSize; - this.maxPoolSize = maxPoolSize; - initThreadPool(); - } - - public void initThreadPool() { - healthyThreadPool = new HealthyThreadPool("BookingQueue", healthThreshold, - minUnhealthyPeriodMin, queueCapacity, corePoolSize, maxPoolSize, BASE_SLEEP_TIME_MILLIS); - } - - public void shutdownUnhealthy() { - try { - if (!healthyThreadPool.shutdownUnhealthy()) { - logger.warn("BookingQueue: Unhealthy queue terminated, starting a new one"); + private final int healthThreshold; + private final int minUnhealthyPeriodMin; + private final int queueCapacity; + private final int corePoolSize; + private final int maxPoolSize; + // Base value for calculating the job sleep time + // this is used to slow down the booking queue to avoid racing conditions + private static final int BASE_SLEEP_TIME_MILLIS = 300; + + private static final Logger logger = LogManager.getLogger("HEALTH"); + private HealthyThreadPool healthyThreadPool; + + public BookingQueue(int healthThreshold, int minUnhealthyPeriodMin, int queueCapacity, + int corePoolSize, int maxPoolSize) { + this.healthThreshold = healthThreshold; + this.minUnhealthyPeriodMin = minUnhealthyPeriodMin; + this.queueCapacity = queueCapacity; + this.corePoolSize = corePoolSize; + this.maxPoolSize = maxPoolSize; initThreadPool(); - } - } catch (InterruptedException e) { - // TODO: evaluate crashing the whole springbook context here - // to force a container restart cycle - logger.error("Failed to restart BookingThreadPool", e); } - } - public boolean isHealthy() { - return healthyThreadPool.healthCheck(); - } + public void initThreadPool() { + healthyThreadPool = + new HealthyThreadPool("BookingQueue", healthThreshold, minUnhealthyPeriodMin, + queueCapacity, corePoolSize, maxPoolSize, BASE_SLEEP_TIME_MILLIS); + } - public void execute(KeyRunnable r) { - healthyThreadPool.execute(r); - } + public void shutdownUnhealthy() { + try { + if (!healthyThreadPool.shutdownUnhealthy()) { + logger.warn("BookingQueue: Unhealthy queue terminated, starting a new one"); + initThreadPool(); + } + } catch (InterruptedException e) { + // TODO: evaluate crashing the whole springbook context here + // to force a container restart cycle + logger.error("Failed to restart BookingThreadPool", e); + } + } - public long getRejectedTaskCount() { - return healthyThreadPool.getRejectedTaskCount(); - } + public boolean isHealthy() { + return healthyThreadPool.healthCheck(); + } - public int getQueueCapacity() { - return queueCapacity; - } + public void execute(KeyRunnable r) { + healthyThreadPool.execute(r); + } - public void shutdown() { - healthyThreadPool.shutdown(); - } + public long getRejectedTaskCount() { + return healthyThreadPool.getRejectedTaskCount(); + } - public int getSize() { - return healthyThreadPool.getQueue().size(); - } + public int getQueueCapacity() { + return queueCapacity; + } - public int getRemainingCapacity() { - return healthyThreadPool.getQueue().remainingCapacity(); - } + public void shutdown() { + healthyThreadPool.shutdown(); + } - public int getActiveCount() { - return healthyThreadPool.getActiveCount(); - } + public int getSize() { + return healthyThreadPool.getQueue().size(); + } - public long getCompletedTaskCount() { - return healthyThreadPool.getCompletedTaskCount(); - } + public int getRemainingCapacity() { + return healthyThreadPool.getQueue().remainingCapacity(); + } + + public int getActiveCount() { + return healthyThreadPool.getActiveCount(); + } - public long getCorePoolSize() { - return corePoolSize; - } + public long getCompletedTaskCount() { + return healthyThreadPool.getCompletedTaskCount(); + } - public long getMaximumPoolSize() { - return maxPoolSize; - } + public long getCorePoolSize() { + return corePoolSize; + } + + public long getMaximumPoolSize() { + return maxPoolSize; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/CoreUnitDispatcher.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/CoreUnitDispatcher.java index bb57d45f0..7b3cd911f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/CoreUnitDispatcher.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/CoreUnitDispatcher.java @@ -75,429 +75,437 @@ * For all other exceptions, both the frame and the proc have to be manually removed. */ public class CoreUnitDispatcher implements Dispatcher { - private static final Logger logger = LogManager.getLogger(CoreUnitDispatcher.class); - - private DispatchSupport dispatchSupport; - - private JobManager jobManager; - - private RqdClient rqdClient; - - private HostManager hostManager; - - public boolean testMode = false; - - private final long MEM_RESERVED_MIN; - private final long MEM_GPU_RESERVED_DEFAULT; - private final long MEM_GPU_RESERVED_MIN; - - private Environment env; - - /* - * Keeps a map of unique job IDs that should be skipped over for booking until the record has - * expired. - */ - private Cache jobLock; - - @Autowired - public CoreUnitDispatcher(Environment env) { - this.env = env; - MEM_RESERVED_MIN = getLongProperty("dispatcher.memory.mem_reserved_min"); - MEM_GPU_RESERVED_DEFAULT = getLongProperty("dispatcher.memory.mem_gpu_reserved_default"); - MEM_GPU_RESERVED_MIN = getLongProperty("dispatcher.memory.mem_gpu_reserved_min"); - } - - /* - * Return an integer value from the opencue.properties given a key - */ - private int getIntProperty(String property) { - return env.getRequiredProperty(property, Integer.class); - } - - /* - * Return an integer value from the opencue.properties given a key - */ - private long getLongProperty(String property) { - return env.getRequiredProperty(property, Long.class); - } - - private Cache getOrCreateJobLock() { - if (jobLock == null) { - this.jobLock = CacheBuilder.newBuilder() - .concurrencyLevel(getIntProperty("dispatcher.job_lock_concurrency_level")) - .expireAfterWrite(getIntProperty("dispatcher.job_lock_expire_seconds"), TimeUnit.SECONDS) - .build(); - } - return jobLock; - } + private static final Logger logger = LogManager.getLogger(CoreUnitDispatcher.class); - private List dispatchJobs(DispatchHost host, Set jobs) { - List procs = new ArrayList(); + private DispatchSupport dispatchSupport; - try { - for (String jobid : jobs) { + private JobManager jobManager; - if (!host.hasAdditionalResources(CORE_POINTS_RESERVED_MIN, MEM_RESERVED_MIN, - GPU_UNITS_RESERVED_MIN, MEM_GPU_RESERVED_MIN)) { - return procs; - } + private RqdClient rqdClient; - if (procs.size() >= getIntProperty("dispatcher.host_frame_dispatch_max")) { - break; - } + private HostManager hostManager; + + public boolean testMode = false; + + private final long MEM_RESERVED_MIN; + private final long MEM_GPU_RESERVED_DEFAULT; + private final long MEM_GPU_RESERVED_MIN; + + private Environment env; - if (getIntProperty("dispatcher.job_lock_expire_seconds") > 0) { - if (getOrCreateJobLock().getIfPresent(jobid) != null) { - continue; - } + /* + * Keeps a map of unique job IDs that should be skipped over for booking until the record has + * expired. + */ + private Cache jobLock; + + @Autowired + public CoreUnitDispatcher(Environment env) { + this.env = env; + MEM_RESERVED_MIN = getLongProperty("dispatcher.memory.mem_reserved_min"); + MEM_GPU_RESERVED_DEFAULT = getLongProperty("dispatcher.memory.mem_gpu_reserved_default"); + MEM_GPU_RESERVED_MIN = getLongProperty("dispatcher.memory.mem_gpu_reserved_min"); + } + + /* + * Return an integer value from the opencue.properties given a key + */ + private int getIntProperty(String property) { + return env.getRequiredProperty(property, Integer.class); + } + + /* + * Return an integer value from the opencue.properties given a key + */ + private long getLongProperty(String property) { + return env.getRequiredProperty(property, Long.class); + } - jobLock.put(jobid, jobid); + private Cache getOrCreateJobLock() { + if (jobLock == null) { + this.jobLock = CacheBuilder.newBuilder() + .concurrencyLevel(getIntProperty("dispatcher.job_lock_concurrency_level")) + .expireAfterWrite(getIntProperty("dispatcher.job_lock_expire_seconds"), + TimeUnit.SECONDS) + .build(); } + return jobLock; + } + + private List dispatchJobs(DispatchHost host, Set jobs) { + List procs = new ArrayList(); - DispatchJob job = jobManager.getDispatchJob(jobid); try { - procs.addAll(dispatchHost(host, job)); - } catch (JobDispatchException e) { - logger.info("job dispatch exception," + e); + for (String jobid : jobs) { + + if (!host.hasAdditionalResources(CORE_POINTS_RESERVED_MIN, MEM_RESERVED_MIN, + GPU_UNITS_RESERVED_MIN, MEM_GPU_RESERVED_MIN)) { + return procs; + } + + if (procs.size() >= getIntProperty("dispatcher.host_frame_dispatch_max")) { + break; + } + + if (getIntProperty("dispatcher.job_lock_expire_seconds") > 0) { + if (getOrCreateJobLock().getIfPresent(jobid) != null) { + continue; + } + + jobLock.put(jobid, jobid); + } + + DispatchJob job = jobManager.getDispatchJob(jobid); + try { + procs.addAll(dispatchHost(host, job)); + } catch (JobDispatchException e) { + logger.info("job dispatch exception," + e); + } + } + + } catch (DispatcherException e) { + logger.info(host.name + " dispatcher exception," + e); } - } - } catch (DispatcherException e) { - logger.info(host.name + " dispatcher exception," + e); - } + host.restoreGpu(); - host.restoreGpu(); + return procs; + } - return procs; - } + private Set getGpuJobs(DispatchHost host, ShowInterface show) { + Set jobs = null; + + // If the host has gpu idle, first do a query to find gpu jobs + // If no gpu jobs found remove resources to leave room for a gpu frame + if (host.hasAdditionalResources(Dispatcher.CORE_POINTS_RESERVED_DEFAULT, + this.MEM_RESERVED_MIN, Dispatcher.GPU_UNITS_RESERVED_DEFAULT, + this.MEM_GPU_RESERVED_DEFAULT)) { + if (show == null) + jobs = dispatchSupport.findDispatchJobs(host, + getIntProperty("dispatcher.job_query_max")); + else + jobs = dispatchSupport.findDispatchJobs(host, show, + getIntProperty("dispatcher.job_query_max")); + + if (jobs.size() == 0) { + host.removeGpu(); + jobs = null; + } + } - private Set getGpuJobs(DispatchHost host, ShowInterface show) { - Set jobs = null; + return jobs; + } - // If the host has gpu idle, first do a query to find gpu jobs - // If no gpu jobs found remove resources to leave room for a gpu frame - if (host.hasAdditionalResources(Dispatcher.CORE_POINTS_RESERVED_DEFAULT, this.MEM_RESERVED_MIN, - Dispatcher.GPU_UNITS_RESERVED_DEFAULT, this.MEM_GPU_RESERVED_DEFAULT)) { - if (show == null) - jobs = dispatchSupport.findDispatchJobs(host, getIntProperty("dispatcher.job_query_max")); - else - jobs = dispatchSupport.findDispatchJobs(host, show, - getIntProperty("dispatcher.job_query_max")); + @Override + public List dispatchHostToAllShows(DispatchHost host) { + Set jobs = dispatchSupport.findDispatchJobsForAllShows(host, + getIntProperty("dispatcher.job_query_max")); - if (jobs.size() == 0) { - host.removeGpu(); - jobs = null; - } + return dispatchJobs(host, jobs); } - return jobs; - } + @Override + public List dispatchHost(DispatchHost host) { - @Override - public List dispatchHostToAllShows(DispatchHost host) { - Set jobs = dispatchSupport.findDispatchJobsForAllShows(host, - getIntProperty("dispatcher.job_query_max")); + Set jobs = getGpuJobs(host, null); - return dispatchJobs(host, jobs); - } + if (jobs == null) + jobs = dispatchSupport.findDispatchJobs(host, + getIntProperty("dispatcher.job_query_max")); - @Override - public List dispatchHost(DispatchHost host) { + return dispatchJobs(host, jobs); + } - Set jobs = getGpuJobs(host, null); + @Override + public List dispatchHost(DispatchHost host, ShowInterface show) { - if (jobs == null) - jobs = dispatchSupport.findDispatchJobs(host, getIntProperty("dispatcher.job_query_max")); + Set jobs = getGpuJobs(host, show); - return dispatchJobs(host, jobs); - } + if (jobs == null) + jobs = dispatchSupport.findDispatchJobs(host, show, + getIntProperty("dispatcher.job_query_max")); - @Override - public List dispatchHost(DispatchHost host, ShowInterface show) { + return dispatchJobs(host, jobs); + } - Set jobs = getGpuJobs(host, show); + @Override + public List dispatchHost(DispatchHost host, GroupInterface group) { - if (jobs == null) - jobs = - dispatchSupport.findDispatchJobs(host, show, getIntProperty("dispatcher.job_query_max")); + Set jobs = getGpuJobs(host, null); - return dispatchJobs(host, jobs); - } + if (jobs == null) + jobs = dispatchSupport.findDispatchJobs(host, group); - @Override - public List dispatchHost(DispatchHost host, GroupInterface group) { + return dispatchJobs(host, jobs); + } - Set jobs = getGpuJobs(host, null); + @Override + public List dispatchHost(DispatchHost host, JobInterface job) { - if (jobs == null) - jobs = dispatchSupport.findDispatchJobs(host, group); + List procs = new ArrayList(); - return dispatchJobs(host, jobs); - } + if (host.strandedCores == 0 && dispatchSupport.isShowAtOrOverBurst(job, host)) { + return procs; + } - @Override - public List dispatchHost(DispatchHost host, JobInterface job) { + List frames = dispatchSupport.findNextDispatchFrames(job, host, + getIntProperty("dispatcher.frame_query_max")); + + logger.info("Frames found: " + frames.size() + " for host " + host.getName() + " " + + host.idleCores + "/" + host.idleMemory + " on job " + job.getName()); + + String[] selfishServices = + env.getProperty("dispatcher.frame.selfish.services", "").split(","); + for (DispatchFrame frame : frames) { + + VirtualProc proc = VirtualProc.build(host, frame, selfishServices); + + if (frame.minCores <= 0 && !proc.canHandleNegativeCoresRequest) { + logger.debug("Cannot dispatch job, host is busy."); + break; + } + + if (host.idleCores < host.handleNegativeCoresRequirement(frame.minCores) + || host.idleMemory < frame.getMinMemory() || host.idleGpus < frame.minGpus + || host.idleGpuMemory < frame.minGpuMemory) { + logger.debug("Cannot dispatch, insufficient resources."); + break; + } + + if (!dispatchSupport.isJobBookable(job, proc.coresReserved, proc.gpusReserved)) { + break; + } + + if (host.strandedCores == 0 && dispatchSupport.isShowAtOrOverBurst(job, host)) { + return procs; + } + + boolean success = new DispatchFrameTemplate(proc, job, frame, false) { + public void wrapDispatchFrame() { + logger.debug("Dispatching frame with " + frame.minCores + + " minCores on proc with " + proc.coresReserved + " coresReserved"); + dispatch(frame, proc); + dispatchSummary(proc, frame, "Booking"); + return; + } + }.execute(); + + if (success) { + procs.add(proc); + + DispatchSupport.bookedProcs.getAndIncrement(); + DispatchSupport.bookedCores.addAndGet(proc.coresReserved); + DispatchSupport.bookedGpus.addAndGet(proc.gpusReserved); + + if (host.strandedCores > 0) { + dispatchSupport.pickupStrandedCores(host); + break; + } + + host.useResources(proc.coresReserved, proc.memoryReserved, proc.gpusReserved, + proc.gpuMemoryReserved); + if (!host.hasAdditionalResources(Dispatcher.CORE_POINTS_RESERVED_MIN, + MEM_RESERVED_MIN, Dispatcher.GPU_UNITS_RESERVED_MIN, + MEM_GPU_RESERVED_MIN)) { + break; + } else if (procs.size() >= getIntProperty("dispatcher.job_frame_dispatch_max")) { + break; + } else if (procs.size() >= getIntProperty("dispatcher.host_frame_dispatch_max")) { + break; + } + } + } - List procs = new ArrayList(); + return procs; - if (host.strandedCores == 0 && dispatchSupport.isShowAtOrOverBurst(job, host)) { - return procs; } - List frames = dispatchSupport.findNextDispatchFrames(job, host, - getIntProperty("dispatcher.frame_query_max")); - - logger.info("Frames found: " + frames.size() + " for host " + host.getName() + " " - + host.idleCores + "/" + host.idleMemory + " on job " + job.getName()); - - String[] selfishServices = env.getProperty("dispatcher.frame.selfish.services", "").split(","); - for (DispatchFrame frame : frames) { + public void dispatchProcToJob(VirtualProc proc, JobInterface job) { + + // Do not throttle this method + for (DispatchFrame frame : dispatchSupport.findNextDispatchFrames(job, proc, + getIntProperty("dispatcher.frame_query_max"))) { + try { + boolean success = new DispatchFrameTemplate(proc, job, frame, true) { + public void wrapDispatchFrame() { + dispatch(frame, proc); + dispatchSummary(proc, frame, "Dispatch"); + return; + } + }.execute(); + if (success) + return; + } catch (DispatcherException e) { + return; + } + } - VirtualProc proc = VirtualProc.build(host, frame, selfishServices); + dispatchSupport.unbookProc(proc); + } - if (frame.minCores <= 0 && !proc.canHandleNegativeCoresRequest) { - logger.debug("Cannot dispatch job, host is busy."); - break; - } + @Override + public List dispatchHost(DispatchHost host, LayerInterface layer) { + throw new RuntimeException("not implemented)"); + } - if (host.idleCores < host.handleNegativeCoresRequirement(frame.minCores) - || host.idleMemory < frame.getMinMemory() || host.idleGpus < frame.minGpus - || host.idleGpuMemory < frame.minGpuMemory) { - logger.debug("Cannot dispatch, insufficient resources."); - break; - } + @Override + public List dispatchHost(DispatchHost host, FrameInterface frame) { + throw new RuntimeException("not implemented)"); + } - if (!dispatchSupport.isJobBookable(job, proc.coresReserved, proc.gpusReserved)) { - break; - } + @Override + public void dispatch(DispatchFrame frame, VirtualProc proc) { + // Allocate frame on the database + dispatchSupport.startFrameAndProc(proc, frame); - if (host.strandedCores == 0 && dispatchSupport.isShowAtOrOverBurst(job, host)) { - return procs; - } - - boolean success = new DispatchFrameTemplate(proc, job, frame, false) { - public void wrapDispatchFrame() { - logger.debug("Dispatching frame with " + frame.minCores + " minCores on proc with " - + proc.coresReserved + " coresReserved"); - dispatch(frame, proc); - dispatchSummary(proc, frame, "Booking"); - return; + // Communicate with RQD to run the frame. + if (!testMode) { + dispatchSupport.runFrame(proc, frame); } - }.execute(); + } - if (success) { - procs.add(proc); + @Override + public boolean isTestMode() { + return testMode; + } - DispatchSupport.bookedProcs.getAndIncrement(); - DispatchSupport.bookedCores.addAndGet(proc.coresReserved); - DispatchSupport.bookedGpus.addAndGet(proc.gpusReserved); + @Override + public void setTestMode(boolean enabled) { + testMode = enabled; + dispatchSupport.clearCache(); + } - if (host.strandedCores > 0) { - dispatchSupport.pickupStrandedCores(host); - break; - } + /** + * Log a summary of each dispatch. + * + * @param p the VirtualProc that was used + * @param f the DispatchFrame that that was used + * @param type the type of dispatch + */ + private void dispatchSummary(VirtualProc p, DispatchFrame f, String type) { + String msg = type + " summary: " + p.coresReserved + " cores / " + + CueUtil.KbToMb(p.memoryReserved) + " memory / " + p.gpusReserved + " gpus / " + + CueUtil.KbToMb(p.gpuMemoryReserved) + " gpu memory " + p.getName() + " to " + + f.show + "/" + f.shot; + logger.trace(msg); + } - host.useResources(proc.coresReserved, proc.memoryReserved, proc.gpusReserved, - proc.gpuMemoryReserved); - if (!host.hasAdditionalResources(Dispatcher.CORE_POINTS_RESERVED_MIN, MEM_RESERVED_MIN, - Dispatcher.GPU_UNITS_RESERVED_MIN, MEM_GPU_RESERVED_MIN)) { - break; - } else if (procs.size() >= getIntProperty("dispatcher.job_frame_dispatch_max")) { - break; - } else if (procs.size() >= getIntProperty("dispatcher.host_frame_dispatch_max")) { - break; - } - } + public DispatchSupport getDispatchSupport() { + return dispatchSupport; } - return procs; - - } - - public void dispatchProcToJob(VirtualProc proc, JobInterface job) { - - // Do not throttle this method - for (DispatchFrame frame : dispatchSupport.findNextDispatchFrames(job, proc, - getIntProperty("dispatcher.frame_query_max"))) { - try { - boolean success = new DispatchFrameTemplate(proc, job, frame, true) { - public void wrapDispatchFrame() { - dispatch(frame, proc); - dispatchSummary(proc, frame, "Dispatch"); - return; - } - }.execute(); - if (success) - return; - } catch (DispatcherException e) { - return; - } + public void setDispatchSupport(DispatchSupport dispatchSupport) { + this.dispatchSupport = dispatchSupport; } - dispatchSupport.unbookProc(proc); - } + public JobManager getJobManager() { + return jobManager; + } - @Override - public List dispatchHost(DispatchHost host, LayerInterface layer) { - throw new RuntimeException("not implemented)"); - } + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } - @Override - public List dispatchHost(DispatchHost host, FrameInterface frame) { - throw new RuntimeException("not implemented)"); - } + public HostManager getHostManager() { + return hostManager; + } - @Override - public void dispatch(DispatchFrame frame, VirtualProc proc) { - // Allocate frame on the database - dispatchSupport.startFrameAndProc(proc, frame); + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; + } - // Communicate with RQD to run the frame. - if (!testMode) { - dispatchSupport.runFrame(proc, frame); + public RqdClient getRqdClient() { + return rqdClient; } - } - - @Override - public boolean isTestMode() { - return testMode; - } - - @Override - public void setTestMode(boolean enabled) { - testMode = enabled; - dispatchSupport.clearCache(); - } - - /** - * Log a summary of each dispatch. - * - * @param p the VirtualProc that was used - * @param f the DispatchFrame that that was used - * @param type the type of dispatch - */ - private void dispatchSummary(VirtualProc p, DispatchFrame f, String type) { - String msg = - type + " summary: " + p.coresReserved + " cores / " + CueUtil.KbToMb(p.memoryReserved) - + " memory / " + p.gpusReserved + " gpus / " + CueUtil.KbToMb(p.gpuMemoryReserved) - + " gpu memory " + p.getName() + " to " + f.show + "/" + f.shot; - logger.trace(msg); - } - - public DispatchSupport getDispatchSupport() { - return dispatchSupport; - } - - public void setDispatchSupport(DispatchSupport dispatchSupport) { - this.dispatchSupport = dispatchSupport; - } - - public JobManager getJobManager() { - return jobManager; - } - - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } - - public HostManager getHostManager() { - return hostManager; - } - - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; - } - - public RqdClient getRqdClient() { - return rqdClient; - } - - public void setRqdClient(RqdClient rqdClient) { - this.rqdClient = rqdClient; - } - - private abstract class DispatchFrameTemplate { - protected VirtualProc proc; - protected JobInterface job; - protected DispatchFrame frame; - boolean procIndb = true; - - public DispatchFrameTemplate(VirtualProc p, JobInterface j, DispatchFrame f, boolean inDb) { - proc = p; - job = j; - frame = f; - procIndb = inDb; + + public void setRqdClient(RqdClient rqdClient) { + this.rqdClient = rqdClient; } - public abstract void wrapDispatchFrame(); - - public boolean execute() { - try { - wrapDispatchFrame(); - } catch (FrameReservationException fre) { - /* - * This usually just means another thread got the frame first, so just retry on the next - * frame. - */ - DispatchSupport.bookingRetries.incrementAndGet(); - String msg = - "frame reservation error, " + "dispatchProcToJob failed to book next frame, " + fre; - logger.info(msg); - return false; - } catch (ResourceDuplicationFailureException rrfe) { - /* - * There is a resource already assigned to the frame we reserved! Don't clear the frame, let - * it keep running and continue to the next frame. - */ - DispatchSupport.bookingErrors.incrementAndGet(); - dispatchSupport.fixFrame(frame); - - String msg = "proc update error, dispatchProcToJob failed " + "to assign proc to job " + job - + ", " + proc + " already assigned to another frame." + rrfe; - - logger.info(msg); - return false; - } catch (ResourceReservationFailureException rrfe) { - /* - * This should technically never happen since the proc is already allocated at this point, - * but, if it does it should be unbooked. - */ - DispatchSupport.bookingErrors.incrementAndGet(); - String msg = "proc update error, " + "dispatchProcToJob failed to assign proc to job " + job - + ", " + rrfe; - logger.info(msg); - if (procIndb) { - dispatchSupport.unbookProc(proc); + private abstract class DispatchFrameTemplate { + protected VirtualProc proc; + protected JobInterface job; + protected DispatchFrame frame; + boolean procIndb = true; + + public DispatchFrameTemplate(VirtualProc p, JobInterface j, DispatchFrame f, boolean inDb) { + proc = p; + job = j; + frame = f; + procIndb = inDb; } - dispatchSupport.clearFrame(frame); - /* Throw an exception to stop booking **/ - throw new DispatcherException( - "host reservation error, " + "dispatchHostToJob failed to allocate a new proc " + rrfe); - } catch (Exception e) { - /* - * Everything else means that the host/frame record was updated but another error occurred - * and the proc should be cleared. It could also be running, so use the jobManagerSupprot to - * kill it just in case. - */ - DispatchSupport.bookingErrors.incrementAndGet(); - String msg = "dispatchProcToJob failed booking proc " + proc + " on job " + job; - logger.info(msg, e); - dispatchSupport.unbookProc(proc); - dispatchSupport.clearFrame(frame); - try { - rqdClient.killFrame(proc, "An accounting error occured " + "when booking this frame."); - } catch (RqdClientException rqde) { - /* - * Its almost expected that this will fail, as this is just a precaution if the frame did - * actually launch. - */ + public abstract void wrapDispatchFrame(); + + public boolean execute() { + try { + wrapDispatchFrame(); + } catch (FrameReservationException fre) { + /* + * This usually just means another thread got the frame first, so just retry on the + * next frame. + */ + DispatchSupport.bookingRetries.incrementAndGet(); + String msg = "frame reservation error, " + + "dispatchProcToJob failed to book next frame, " + fre; + logger.info(msg); + return false; + } catch (ResourceDuplicationFailureException rrfe) { + /* + * There is a resource already assigned to the frame we reserved! Don't clear the + * frame, let it keep running and continue to the next frame. + */ + DispatchSupport.bookingErrors.incrementAndGet(); + dispatchSupport.fixFrame(frame); + + String msg = + "proc update error, dispatchProcToJob failed " + "to assign proc to job " + + job + ", " + proc + " already assigned to another frame." + rrfe; + + logger.info(msg); + return false; + } catch (ResourceReservationFailureException rrfe) { + /* + * This should technically never happen since the proc is already allocated at this + * point, but, if it does it should be unbooked. + */ + DispatchSupport.bookingErrors.incrementAndGet(); + String msg = "proc update error, " + + "dispatchProcToJob failed to assign proc to job " + job + ", " + rrfe; + logger.info(msg); + if (procIndb) { + dispatchSupport.unbookProc(proc); + } + dispatchSupport.clearFrame(frame); + /* Throw an exception to stop booking **/ + throw new DispatcherException("host reservation error, " + + "dispatchHostToJob failed to allocate a new proc " + rrfe); + } catch (Exception e) { + /* + * Everything else means that the host/frame record was updated but another error + * occurred and the proc should be cleared. It could also be running, so use the + * jobManagerSupprot to kill it just in case. + */ + DispatchSupport.bookingErrors.incrementAndGet(); + String msg = "dispatchProcToJob failed booking proc " + proc + " on job " + job; + logger.info(msg, e); + dispatchSupport.unbookProc(proc); + dispatchSupport.clearFrame(frame); + + try { + rqdClient.killFrame(proc, + "An accounting error occured " + "when booking this frame."); + } catch (RqdClientException rqde) { + /* + * Its almost expected that this will fail, as this is just a precaution if the + * frame did actually launch. + */ + } + /* Thrown an exception to stop booking */ + throw new DispatcherException("stopped dispatching host, " + e); + } + + return true; } - /* Thrown an exception to stop booking */ - throw new DispatcherException("stopped dispatching host, " + e); - } - - return true; } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueue.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueue.java index 25097de7a..7f1ec1c01 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueue.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueue.java @@ -24,75 +24,76 @@ public class DispatchQueue implements QueueHealthCheck { - private int healthThreshold; - private int minUnhealthyPeriodMin; - private int queueCapacity; - private int corePoolSize; - private int maxPoolSize; - - private static final Logger logger = LogManager.getLogger("HEALTH"); - private String name = "Default"; - private HealthyThreadPool healthyDispatchPool; - - public DispatchQueue(String name, int healthThreshold, int minUnhealthyPeriodMin, - int queueCapacity, int corePoolSize, int maxPoolSize) { - this.name = name; - this.healthThreshold = healthThreshold; - this.minUnhealthyPeriodMin = minUnhealthyPeriodMin; - this.queueCapacity = queueCapacity; - this.corePoolSize = corePoolSize; - this.maxPoolSize = maxPoolSize; - initThreadPool(); - } - - public void initThreadPool() { - healthyDispatchPool = new HealthyThreadPool(name, healthThreshold, minUnhealthyPeriodMin, - queueCapacity, corePoolSize, maxPoolSize); - } - - public void shutdownUnhealthy() { - try { - if (!healthyDispatchPool.shutdownUnhealthy()) { - logger.warn("DispatchQueue_" + name + ": Unhealthy queue terminated, starting a new one"); + private int healthThreshold; + private int minUnhealthyPeriodMin; + private int queueCapacity; + private int corePoolSize; + private int maxPoolSize; + + private static final Logger logger = LogManager.getLogger("HEALTH"); + private String name = "Default"; + private HealthyThreadPool healthyDispatchPool; + + public DispatchQueue(String name, int healthThreshold, int minUnhealthyPeriodMin, + int queueCapacity, int corePoolSize, int maxPoolSize) { + this.name = name; + this.healthThreshold = healthThreshold; + this.minUnhealthyPeriodMin = minUnhealthyPeriodMin; + this.queueCapacity = queueCapacity; + this.corePoolSize = corePoolSize; + this.maxPoolSize = maxPoolSize; initThreadPool(); - } - } catch (InterruptedException e) { - // TODO: evaluate crashing the whole springbook context here - // to force a container restart cycle - logger.error("DispatchQueue_" + name + ":Failed to restart DispatchThreadPool", e); } - } - public boolean isHealthy() { - return healthyDispatchPool.healthCheck(); - } + public void initThreadPool() { + healthyDispatchPool = new HealthyThreadPool(name, healthThreshold, minUnhealthyPeriodMin, + queueCapacity, corePoolSize, maxPoolSize); + } + + public void shutdownUnhealthy() { + try { + if (!healthyDispatchPool.shutdownUnhealthy()) { + logger.warn("DispatchQueue_" + name + + ": Unhealthy queue terminated, starting a new one"); + initThreadPool(); + } + } catch (InterruptedException e) { + // TODO: evaluate crashing the whole springbook context here + // to force a container restart cycle + logger.error("DispatchQueue_" + name + ":Failed to restart DispatchThreadPool", e); + } + } - public void execute(KeyRunnable r) { - healthyDispatchPool.execute(r); - } + public boolean isHealthy() { + return healthyDispatchPool.healthCheck(); + } + + public void execute(KeyRunnable r) { + healthyDispatchPool.execute(r); + } - public long getRejectedTaskCount() { - return healthyDispatchPool.getRejectedTaskCount(); - } + public long getRejectedTaskCount() { + return healthyDispatchPool.getRejectedTaskCount(); + } - public void shutdown() { - healthyDispatchPool.shutdown(); - } + public void shutdown() { + healthyDispatchPool.shutdown(); + } - public int getSize() { - return healthyDispatchPool.getQueue().size(); - } + public int getSize() { + return healthyDispatchPool.getQueue().size(); + } - public int getRemainingCapacity() { - return healthyDispatchPool.getQueue().remainingCapacity(); - } + public int getRemainingCapacity() { + return healthyDispatchPool.getQueue().remainingCapacity(); + } - public int getActiveCount() { - return healthyDispatchPool.getActiveCount(); - } + public int getActiveCount() { + return healthyDispatchPool.getActiveCount(); + } - public long getCompletedTaskCount() { - return healthyDispatchPool.getCompletedTaskCount(); - } + public long getCompletedTaskCount() { + return healthyDispatchPool.getCompletedTaskCount(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueueTaskRejectionException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueueTaskRejectionException.java index f7d51442b..1c5e5e26f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueueTaskRejectionException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueueTaskRejectionException.java @@ -20,23 +20,23 @@ @SuppressWarnings("serial") public class DispatchQueueTaskRejectionException extends SpcueRuntimeException { - public DispatchQueueTaskRejectionException() { - // TODO Auto-generated constructor stub - } - - public DispatchQueueTaskRejectionException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } - - public DispatchQueueTaskRejectionException(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public DispatchQueueTaskRejectionException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public DispatchQueueTaskRejectionException() { + // TODO Auto-generated constructor stub + } + + public DispatchQueueTaskRejectionException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + + public DispatchQueueTaskRejectionException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public DispatchQueueTaskRejectionException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupport.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupport.java index 2df583109..663698c1d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupport.java @@ -42,478 +42,480 @@ */ public interface DispatchSupport { - /** - * Number of procs rebalanced - */ - static final AtomicLong balanceSuccess = new AtomicLong(0); - - /** - * Number of procs - */ - static final AtomicLong balanceFailed = new AtomicLong(0); - - /** - * Number of times the worst offender was killed - */ - static final AtomicLong killedOffenderProcs = new AtomicLong(0); - - /** - * Number of frames killed because the machine totally ran out of memory. This may or may not be - * the worst offender. - */ - static final AtomicLong killedOomProcs = new AtomicLong(0); - - /** - * Long for counting how many procs have been dispatched - */ - static final AtomicLong dispatchedProcs = new AtomicLong(0); - - /** - * Long for counting how many cores have been booked - */ - static final AtomicLong bookedCores = new AtomicLong(0); - - /** - * Long for counting how many gpus have been booked - */ - static final AtomicLong bookedGpus = new AtomicLong(0); - - /** - * Long for counting how many procs have been booked - */ - static final AtomicLong bookedProcs = new AtomicLong(0); - - /** - * Long for counting unbooked procs. - */ - static final AtomicLong unbookedProcs = new AtomicLong(0); - - /** - * A proc is cleared when an error occurs - */ - static final AtomicLong clearedProcs = new AtomicLong(0); - - /** - * Long for counting dispatch errors - */ - static final AtomicLong bookingErrors = new AtomicLong(0); - - /** - * Long for counting dispatch retries - */ - static final AtomicLong bookingRetries = new AtomicLong(0); - - /** - * Incremented when RQD and the Cue DB are out of sync. - */ - static final AtomicLong accountingErrors = new AtomicLong(0); - - /** - * Incremented when RQD and the Cue DB are out of sync. - */ - static final AtomicLong fixedFrames = new AtomicLong(0); - - /** - * Count number of picked up cores. - */ - static final AtomicLong pickedUpCoresCount = new AtomicLong(0); - - /** - * Count number of stranded cores. - */ - static final AtomicLong strandedCoresCount = new AtomicLong(0); - - /** - * Count number of picked up gpus. - */ - static final AtomicLong pickedUpGpusCount = new AtomicLong(0); - - /** - * Count number of stranded gpus. - */ - static final AtomicLong strandedGpusCount = new AtomicLong(0); - - /** - * Set the proc's frame assignment to null; - * - * @param proc - * @return - */ - boolean clearVirtualProcAssignement(ProcInterface proc); - - /** - * Stops the specified frame and sets a new frame state and exit status. - * - * @param frame - * @param state - * @param exitStatus - */ - boolean stopFrame(FrameInterface frame, FrameState state, int exitStatus); - - /** - * Updates a frame with completed stats. - * - * @param frame - * @param state - * @param exitStatus - * @param maxrss - * @return - */ - boolean stopFrame(FrameInterface frame, FrameState state, int exitStatus, long maxrss); - - /** - * Updates the frame to the Running state and Reserve the resources in the specified proc for the - * specified frame. If the proc does not exist, its inserted, otherwise its updated. - * - * When a proc is created, the subscription, host, job, layer, folder, and shot proc counts get - * updated. This may cause some contention. - * - * @param proc - * @param frame - */ - public void startFrameAndProc(VirtualProc proc, DispatchFrame frame); - - /** - * This method clears out a proc that was lost track of. This can happen if the host fails and the - * proc fails to report in, a network outage occurs, or something of that nature. - * - * @param proc - * @param reason - * @param exitStatus - */ - void lostProc(VirtualProc proc, String reason, int exitStatus); - - /** - * Unbooks a proc with no message - * - * @param proc - */ - void unbookProc(VirtualProc proc); - - /** - * Unbooks a virtual proc. Takes a reason which is printed to the console. - */ - void unbookProc(VirtualProc proc, String reason); - - /** - * Returns the next N frames to be dispatched from the specified job. - * - * @param job - * @param proc - * @param limit - * @return - */ - List findNextDispatchFrames(JobInterface job, VirtualProc proc, int limit); - - /** - * - * Returns the next N frames to be dispatched from the specified job. - * - * @param job - * @param host - * @param limit - * @return - */ - List findNextDispatchFrames(JobInterface job, DispatchHost host, int limit); - - /** - * Return the next N frames to be dispatched from the specified layer. - * - * @param layer - * @param host - * @param limit - * @return - */ - List findNextDispatchFrames(LayerInterface layer, DispatchHost host, int limit); - - /** - * Return the next N frames to be dispatched from the specified layer. - * - * @param layer - * @param proc - * @param limit - * @return - */ - List findNextDispatchFrames(LayerInterface layer, VirtualProc proc, int limit); - - /** - * - * @param excludeJob - * @param proc - * @return - */ - boolean findUnderProcedJob(JobInterface excludeJob, VirtualProc proc); - - /** - * Return true if there are higher priority jobs to run. - * - * @param baseJob - * @param proc - * @return boolean - */ - boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc); - - /** - * Run the frame on the specified proc. - * - * @param proc - * @param frame - * @throws DispatcherException if an error occurs during dispatching - */ - void runFrame(VirtualProc proc, DispatchFrame frame); - - /** - * Return true if the specified show is over its burst size of the given proc's allocation. - * - * @param proc - * @return - */ - boolean isShowOverBurst(VirtualProc proc); - - /** - * Returns the job that can utilize the specified host. - * - * @param host - * @return - */ - Set findDispatchJobsForAllShows(DispatchHost host, int numJobs); - - /** - * Returns the highest priority job that can utilize the specified host - * - * @param host - * @return - */ - Set findDispatchJobs(DispatchHost host, int numJobs); - - /** - * Returns the highest priority jobs that can utilize the specified host in the specified group. - * - * @param host - * @return A set of unique job ids. - */ - Set findDispatchJobs(DispatchHost host, GroupInterface p); - - /** - * - * @param host - * @return A set of unique job ids. - */ - Set findLocalDispatchJobs(DispatchHost host); - - /** - * Creates and returns and RQD RunFrame object. - * - * Once the RunFrame object is created string replacement is done in the frame command to replace - * any tags in the command. - * - * Currently these tags are supported: [IFRAME] - integer frame (no padding) [ZFRAME] - 4 padded - * frame [LAYER] - the layer name [JOB] - the job name [IFRAME] - the full frame name [JOB] - the - * job name [LAYER] - the layer name - * - * @param proc - * @param frame - * @return RunFrame - */ - RunFrame prepareRqdRunFrame(VirtualProc proc, DispatchFrame frame); - - /** - * Checks to see if job passes basic tests for dispatchability. Tests include if the proc is over - * its max, if it has pending frames, and if its paused. - * - * @param job - * @param local indicates a local dispatch or not - * @return boolean - */ - boolean isJobDispatchable(JobInterface job, boolean local); - - /** - * returns true of the cue has jobs with pending frames that are not paused or in a non bookable - * state. - * - * @return - */ - boolean isCueBookable(FacilityInterface f); - - /** - * Increases the amount of memory reserved for a running frame. Returns true if the memory value - * actually increased. If the value is lower than current reserved memory it is ignored. - * - * @param proc - * @param value - */ - boolean increaseReservedMemory(ProcInterface proc, long value); - - /** - * Attempts to balance the reserved memory on a proc by taking away reserved memory from frames - * that are well under their reservation. - * - * @param proc - * @param value - */ - boolean balanceReservedMemory(ProcInterface proc, long value); - - /** - * Update the jobs usage counters. - * - * @param frame - * @param exitStatus - */ - void updateUsageCounters(FrameInterface frame, int exitStatus); - - /** - * Sets a frame to running if there is a proc with the frame. - * - * @param frame - */ - void fixFrame(DispatchFrame frame); - - /** - * Sets the frame state to waiting for a frame with no running proc. - * - * @param frame - */ - void clearFrame(DispatchFrame frame); - - /** - * Sets the frame state exitStatus to EXIT_STATUS_MEMORY_FAILURE - * - * @param frame - * @return whether the frame has been updated - */ - boolean updateFrameMemoryError(FrameInterface frame); - - /** - * Update Memory usage data and LLU time for the given frame. - * - * @param frame - * @param rss - * @param maxRss - * @param lluTime - */ - void updateFrameMemoryUsageAndLluTime(FrameInterface frame, long rss, long maxRss, long lluTime); - - /** - * Update memory usage data for a given frame's proc record. The frame is used to update the proc - * so the update fails if the proc has been rebooked onto a new frame. - * - * @param frame - * @param rss - * @param maxRss - * @param vsize - * @param maxVsize - * @param usedGpuMemory - * @param maxUsedGpuMemory - */ - void updateProcMemoryUsage(FrameInterface frame, long rss, long maxRss, long vsize, long maxVsize, - long usedGpuMemory, long maxUsedGpuMemory, long usedSwapMemory, byte[] children); - - /** - * Return true if adding the given core units would put the show over its burst value. - * - * @param show - * @param alloc - * @param coreUnits - * @return - */ - boolean isShowOverBurst(ShowInterface show, AllocationInterface alloc, int coreUnits); - - /** - * Return true if the job can take new procs. - * - * @param job - * @return - */ - boolean isJobBookable(JobInterface job); - - /** - * Return true if the job can take the given number of new core units. - * - * @param job - * @return - */ - boolean isJobBookable(JobInterface job, int coreUnits, int gpuUnits); - - /** - * Return true if the specified show is at or over its burst value for the given allocation. - * - * @param show - * @param alloc - * @return - */ - boolean isShowAtOrOverBurst(ShowInterface show, AllocationInterface alloc); - - /** - * Return true if the specified show is over its guaranteed subscription size. - * - * @param proc - * @return - */ - boolean isShowOverSize(VirtualProc proc); - - /** - * Pickup any cores that were stranded on the given host. - * - * @param host - */ - void pickupStrandedCores(DispatchHost host); - - /** - * Return true if the host has stranded cores. - * - * @param host - * @return - */ - boolean hasStrandedCores(HostInterface host); - - /** - * Add stranded cores for the given host. Stranded cores will automatically be added to the next - * frame dispatched from the host to make up for cores stranded with no memory. - * - * @param host - * @param cores - */ - void strandCores(DispatchHost host, int cores); - - /** - * Lowers the perceived idle cores on a machine if the load is over certain threshold. - * - * @param host - * @param load - */ - void determineIdleCores(DispatchHost host, int load); - - /** - * Return a set of job IDs that can take the given host. - * - * @param host - * @param show - * @param numJobs - * @return - */ - Set findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs); - - /** - * Return true of the job has pending frames. - * - * @param job - * @return - */ - boolean hasPendingFrames(JobInterface job); - - /** - * Return true if the layer has pending frames. - * - * @param layer - * @return - */ - boolean hasPendingFrames(LayerInterface layer); - - /** - * Clear bookableShows cache - * - * @return - */ - void clearCache(); + /** + * Number of procs rebalanced + */ + static final AtomicLong balanceSuccess = new AtomicLong(0); + + /** + * Number of procs + */ + static final AtomicLong balanceFailed = new AtomicLong(0); + + /** + * Number of times the worst offender was killed + */ + static final AtomicLong killedOffenderProcs = new AtomicLong(0); + + /** + * Number of frames killed because the machine totally ran out of memory. This may or may not be + * the worst offender. + */ + static final AtomicLong killedOomProcs = new AtomicLong(0); + + /** + * Long for counting how many procs have been dispatched + */ + static final AtomicLong dispatchedProcs = new AtomicLong(0); + + /** + * Long for counting how many cores have been booked + */ + static final AtomicLong bookedCores = new AtomicLong(0); + + /** + * Long for counting how many gpus have been booked + */ + static final AtomicLong bookedGpus = new AtomicLong(0); + + /** + * Long for counting how many procs have been booked + */ + static final AtomicLong bookedProcs = new AtomicLong(0); + + /** + * Long for counting unbooked procs. + */ + static final AtomicLong unbookedProcs = new AtomicLong(0); + + /** + * A proc is cleared when an error occurs + */ + static final AtomicLong clearedProcs = new AtomicLong(0); + + /** + * Long for counting dispatch errors + */ + static final AtomicLong bookingErrors = new AtomicLong(0); + + /** + * Long for counting dispatch retries + */ + static final AtomicLong bookingRetries = new AtomicLong(0); + + /** + * Incremented when RQD and the Cue DB are out of sync. + */ + static final AtomicLong accountingErrors = new AtomicLong(0); + + /** + * Incremented when RQD and the Cue DB are out of sync. + */ + static final AtomicLong fixedFrames = new AtomicLong(0); + + /** + * Count number of picked up cores. + */ + static final AtomicLong pickedUpCoresCount = new AtomicLong(0); + + /** + * Count number of stranded cores. + */ + static final AtomicLong strandedCoresCount = new AtomicLong(0); + + /** + * Count number of picked up gpus. + */ + static final AtomicLong pickedUpGpusCount = new AtomicLong(0); + + /** + * Count number of stranded gpus. + */ + static final AtomicLong strandedGpusCount = new AtomicLong(0); + + /** + * Set the proc's frame assignment to null; + * + * @param proc + * @return + */ + boolean clearVirtualProcAssignement(ProcInterface proc); + + /** + * Stops the specified frame and sets a new frame state and exit status. + * + * @param frame + * @param state + * @param exitStatus + */ + boolean stopFrame(FrameInterface frame, FrameState state, int exitStatus); + + /** + * Updates a frame with completed stats. + * + * @param frame + * @param state + * @param exitStatus + * @param maxrss + * @return + */ + boolean stopFrame(FrameInterface frame, FrameState state, int exitStatus, long maxrss); + + /** + * Updates the frame to the Running state and Reserve the resources in the specified proc for + * the specified frame. If the proc does not exist, its inserted, otherwise its updated. + * + * When a proc is created, the subscription, host, job, layer, folder, and shot proc counts get + * updated. This may cause some contention. + * + * @param proc + * @param frame + */ + public void startFrameAndProc(VirtualProc proc, DispatchFrame frame); + + /** + * This method clears out a proc that was lost track of. This can happen if the host fails and + * the proc fails to report in, a network outage occurs, or something of that nature. + * + * @param proc + * @param reason + * @param exitStatus + */ + void lostProc(VirtualProc proc, String reason, int exitStatus); + + /** + * Unbooks a proc with no message + * + * @param proc + */ + void unbookProc(VirtualProc proc); + + /** + * Unbooks a virtual proc. Takes a reason which is printed to the console. + */ + void unbookProc(VirtualProc proc, String reason); + + /** + * Returns the next N frames to be dispatched from the specified job. + * + * @param job + * @param proc + * @param limit + * @return + */ + List findNextDispatchFrames(JobInterface job, VirtualProc proc, int limit); + + /** + * + * Returns the next N frames to be dispatched from the specified job. + * + * @param job + * @param host + * @param limit + * @return + */ + List findNextDispatchFrames(JobInterface job, DispatchHost host, int limit); + + /** + * Return the next N frames to be dispatched from the specified layer. + * + * @param layer + * @param host + * @param limit + * @return + */ + List findNextDispatchFrames(LayerInterface layer, DispatchHost host, int limit); + + /** + * Return the next N frames to be dispatched from the specified layer. + * + * @param layer + * @param proc + * @param limit + * @return + */ + List findNextDispatchFrames(LayerInterface layer, VirtualProc proc, int limit); + + /** + * + * @param excludeJob + * @param proc + * @return + */ + boolean findUnderProcedJob(JobInterface excludeJob, VirtualProc proc); + + /** + * Return true if there are higher priority jobs to run. + * + * @param baseJob + * @param proc + * @return boolean + */ + boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc); + + /** + * Run the frame on the specified proc. + * + * @param proc + * @param frame + * @throws DispatcherException if an error occurs during dispatching + */ + void runFrame(VirtualProc proc, DispatchFrame frame); + + /** + * Return true if the specified show is over its burst size of the given proc's allocation. + * + * @param proc + * @return + */ + boolean isShowOverBurst(VirtualProc proc); + + /** + * Returns the job that can utilize the specified host. + * + * @param host + * @return + */ + Set findDispatchJobsForAllShows(DispatchHost host, int numJobs); + + /** + * Returns the highest priority job that can utilize the specified host + * + * @param host + * @return + */ + Set findDispatchJobs(DispatchHost host, int numJobs); + + /** + * Returns the highest priority jobs that can utilize the specified host in the specified group. + * + * @param host + * @return A set of unique job ids. + */ + Set findDispatchJobs(DispatchHost host, GroupInterface p); + + /** + * + * @param host + * @return A set of unique job ids. + */ + Set findLocalDispatchJobs(DispatchHost host); + + /** + * Creates and returns and RQD RunFrame object. + * + * Once the RunFrame object is created string replacement is done in the frame command to + * replace any tags in the command. + * + * Currently these tags are supported: [IFRAME] - integer frame (no padding) [ZFRAME] - 4 padded + * frame [LAYER] - the layer name [JOB] - the job name [IFRAME] - the full frame name [JOB] - + * the job name [LAYER] - the layer name + * + * @param proc + * @param frame + * @return RunFrame + */ + RunFrame prepareRqdRunFrame(VirtualProc proc, DispatchFrame frame); + + /** + * Checks to see if job passes basic tests for dispatchability. Tests include if the proc is + * over its max, if it has pending frames, and if its paused. + * + * @param job + * @param local indicates a local dispatch or not + * @return boolean + */ + boolean isJobDispatchable(JobInterface job, boolean local); + + /** + * returns true of the cue has jobs with pending frames that are not paused or in a non bookable + * state. + * + * @return + */ + boolean isCueBookable(FacilityInterface f); + + /** + * Increases the amount of memory reserved for a running frame. Returns true if the memory value + * actually increased. If the value is lower than current reserved memory it is ignored. + * + * @param proc + * @param value + */ + boolean increaseReservedMemory(ProcInterface proc, long value); + + /** + * Attempts to balance the reserved memory on a proc by taking away reserved memory from frames + * that are well under their reservation. + * + * @param proc + * @param value + */ + boolean balanceReservedMemory(ProcInterface proc, long value); + + /** + * Update the jobs usage counters. + * + * @param frame + * @param exitStatus + */ + void updateUsageCounters(FrameInterface frame, int exitStatus); + + /** + * Sets a frame to running if there is a proc with the frame. + * + * @param frame + */ + void fixFrame(DispatchFrame frame); + + /** + * Sets the frame state to waiting for a frame with no running proc. + * + * @param frame + */ + void clearFrame(DispatchFrame frame); + + /** + * Sets the frame state exitStatus to EXIT_STATUS_MEMORY_FAILURE + * + * @param frame + * @return whether the frame has been updated + */ + boolean updateFrameMemoryError(FrameInterface frame); + + /** + * Update Memory usage data and LLU time for the given frame. + * + * @param frame + * @param rss + * @param maxRss + * @param lluTime + */ + void updateFrameMemoryUsageAndLluTime(FrameInterface frame, long rss, long maxRss, + long lluTime); + + /** + * Update memory usage data for a given frame's proc record. The frame is used to update the + * proc so the update fails if the proc has been rebooked onto a new frame. + * + * @param frame + * @param rss + * @param maxRss + * @param vsize + * @param maxVsize + * @param usedGpuMemory + * @param maxUsedGpuMemory + */ + void updateProcMemoryUsage(FrameInterface frame, long rss, long maxRss, long vsize, + long maxVsize, long usedGpuMemory, long maxUsedGpuMemory, long usedSwapMemory, + byte[] children); + + /** + * Return true if adding the given core units would put the show over its burst value. + * + * @param show + * @param alloc + * @param coreUnits + * @return + */ + boolean isShowOverBurst(ShowInterface show, AllocationInterface alloc, int coreUnits); + + /** + * Return true if the job can take new procs. + * + * @param job + * @return + */ + boolean isJobBookable(JobInterface job); + + /** + * Return true if the job can take the given number of new core units. + * + * @param job + * @return + */ + boolean isJobBookable(JobInterface job, int coreUnits, int gpuUnits); + + /** + * Return true if the specified show is at or over its burst value for the given allocation. + * + * @param show + * @param alloc + * @return + */ + boolean isShowAtOrOverBurst(ShowInterface show, AllocationInterface alloc); + + /** + * Return true if the specified show is over its guaranteed subscription size. + * + * @param proc + * @return + */ + boolean isShowOverSize(VirtualProc proc); + + /** + * Pickup any cores that were stranded on the given host. + * + * @param host + */ + void pickupStrandedCores(DispatchHost host); + + /** + * Return true if the host has stranded cores. + * + * @param host + * @return + */ + boolean hasStrandedCores(HostInterface host); + + /** + * Add stranded cores for the given host. Stranded cores will automatically be added to the next + * frame dispatched from the host to make up for cores stranded with no memory. + * + * @param host + * @param cores + */ + void strandCores(DispatchHost host, int cores); + + /** + * Lowers the perceived idle cores on a machine if the load is over certain threshold. + * + * @param host + * @param load + */ + void determineIdleCores(DispatchHost host, int load); + + /** + * Return a set of job IDs that can take the given host. + * + * @param host + * @param show + * @param numJobs + * @return + */ + Set findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs); + + /** + * Return true of the job has pending frames. + * + * @param job + * @return + */ + boolean hasPendingFrames(JobInterface job); + + /** + * Return true if the layer has pending frames. + * + * @param layer + * @return + */ + boolean hasPendingFrames(LayerInterface layer); + + /** + * Clear bookableShows cache + * + * @return + */ + void clearCache(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupportService.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupportService.java index 74c3d212b..482cc1f3b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupportService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupportService.java @@ -62,617 +62,622 @@ @Transactional(propagation = Propagation.REQUIRED) public class DispatchSupportService implements DispatchSupport { - private static final Logger logger = LogManager.getLogger(DispatchSupportService.class); - - private JobDao jobDao; - private FrameDao frameDao; - private LayerDao layerDao; - private ProcDao procDao; - private HostDao hostDao; - private ShowDao showDao; - private DispatcherDao dispatcherDao; - private DependManager dependManager; - private SubscriptionDao subscriptionDao; - private RqdClient rqdClient; - private RedirectManager redirectManager; - private BookingManager bookingManager; - private BookingDao bookingDao; - - private ConcurrentHashMap strandedCores = - new ConcurrentHashMap(); - - @Override - public void pickupStrandedCores(DispatchHost host) { - logger.info(host + "picked up stranded cores"); - pickedUpCoresCount.getAndIncrement(); - strandedCores.remove(host.getHostId()); - } - - @Override - public boolean hasStrandedCores(HostInterface host) { - StrandedCores stranded = strandedCores.get(host.getHostId()); - if (stranded == null) { - return false; - } - if (stranded.isExpired()) { - return false; - } - - return true; - } - - @Override - public void strandCores(DispatchHost host, int cores) { - logger.info(host + " found " + cores + ", stranded cores"); - host.strandedCores = cores; - if (host.threadMode != ThreadMode.VARIABLE.getNumber()) { - host.threadMode = ThreadMode.ALL.getNumber(); - } - strandedCores.putIfAbsent(host.getHostId(), new StrandedCores(cores)); - strandedCoresCount.getAndIncrement(); - } - - @Transactional(readOnly = true) - public List findNextDispatchFrames(JobInterface job, VirtualProc proc, int limit) { - return dispatcherDao.findNextDispatchFrames(job, proc, limit); - } - - @Transactional(readOnly = true) - public List findNextDispatchFrames(JobInterface job, DispatchHost host, - int limit) { - return dispatcherDao.findNextDispatchFrames(job, host, limit); - } - - @Override - @Transactional(readOnly = true) - public List findNextDispatchFrames(LayerInterface layer, DispatchHost host, - int limit) { - return dispatcherDao.findNextDispatchFrames(layer, host, limit); - } - - @Override - @Transactional(readOnly = true) - public List findNextDispatchFrames(LayerInterface layer, VirtualProc proc, - int limit) { - return dispatcherDao.findNextDispatchFrames(layer, proc, limit); - } - - @Transactional(readOnly = true) - public boolean findUnderProcedJob(JobInterface excludeJob, VirtualProc proc) { - return dispatcherDao.findUnderProcedJob(excludeJob, proc); - } - - @Transactional(readOnly = true) - public boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc) { - return dispatcherDao.higherPriorityJobExists(baseJob, proc); - } - - @Transactional(readOnly = true) - public Set findDispatchJobsForAllShows(DispatchHost host, int numJobs) { - return dispatcherDao.findDispatchJobsForAllShows(host, numJobs); - } - - @Transactional(readOnly = true) - public Set findDispatchJobs(DispatchHost host, int numJobs) { - return dispatcherDao.findDispatchJobs(host, numJobs); - } - - @Transactional(readOnly = true) - public Set findDispatchJobs(DispatchHost host, GroupInterface g) { - return dispatcherDao.findDispatchJobs(host, g); - } - - @Override - @Transactional(readOnly = true) - public Set findLocalDispatchJobs(DispatchHost host) { - return dispatcherDao.findLocalDispatchJobs(host); - } - - @Override - @Transactional(readOnly = true) - public Set findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs) { - return dispatcherDao.findDispatchJobs(host, show, numJobs); - } + private static final Logger logger = LogManager.getLogger(DispatchSupportService.class); + + private JobDao jobDao; + private FrameDao frameDao; + private LayerDao layerDao; + private ProcDao procDao; + private HostDao hostDao; + private ShowDao showDao; + private DispatcherDao dispatcherDao; + private DependManager dependManager; + private SubscriptionDao subscriptionDao; + private RqdClient rqdClient; + private RedirectManager redirectManager; + private BookingManager bookingManager; + private BookingDao bookingDao; + + private ConcurrentHashMap strandedCores = + new ConcurrentHashMap(); + + @Override + public void pickupStrandedCores(DispatchHost host) { + logger.info(host + "picked up stranded cores"); + pickedUpCoresCount.getAndIncrement(); + strandedCores.remove(host.getHostId()); + } + + @Override + public boolean hasStrandedCores(HostInterface host) { + StrandedCores stranded = strandedCores.get(host.getHostId()); + if (stranded == null) { + return false; + } + if (stranded.isExpired()) { + return false; + } + + return true; + } + + @Override + public void strandCores(DispatchHost host, int cores) { + logger.info(host + " found " + cores + ", stranded cores"); + host.strandedCores = cores; + if (host.threadMode != ThreadMode.VARIABLE.getNumber()) { + host.threadMode = ThreadMode.ALL.getNumber(); + } + strandedCores.putIfAbsent(host.getHostId(), new StrandedCores(cores)); + strandedCoresCount.getAndIncrement(); + } + + @Transactional(readOnly = true) + public List findNextDispatchFrames(JobInterface job, VirtualProc proc, + int limit) { + return dispatcherDao.findNextDispatchFrames(job, proc, limit); + } + + @Transactional(readOnly = true) + public List findNextDispatchFrames(JobInterface job, DispatchHost host, + int limit) { + return dispatcherDao.findNextDispatchFrames(job, host, limit); + } + + @Override + @Transactional(readOnly = true) + public List findNextDispatchFrames(LayerInterface layer, DispatchHost host, + int limit) { + return dispatcherDao.findNextDispatchFrames(layer, host, limit); + } + + @Override + @Transactional(readOnly = true) + public List findNextDispatchFrames(LayerInterface layer, VirtualProc proc, + int limit) { + return dispatcherDao.findNextDispatchFrames(layer, proc, limit); + } + + @Transactional(readOnly = true) + public boolean findUnderProcedJob(JobInterface excludeJob, VirtualProc proc) { + return dispatcherDao.findUnderProcedJob(excludeJob, proc); + } + + @Transactional(readOnly = true) + public boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc) { + return dispatcherDao.higherPriorityJobExists(baseJob, proc); + } + + @Transactional(readOnly = true) + public Set findDispatchJobsForAllShows(DispatchHost host, int numJobs) { + return dispatcherDao.findDispatchJobsForAllShows(host, numJobs); + } + + @Transactional(readOnly = true) + public Set findDispatchJobs(DispatchHost host, int numJobs) { + return dispatcherDao.findDispatchJobs(host, numJobs); + } + + @Transactional(readOnly = true) + public Set findDispatchJobs(DispatchHost host, GroupInterface g) { + return dispatcherDao.findDispatchJobs(host, g); + } + + @Override + @Transactional(readOnly = true) + public Set findLocalDispatchJobs(DispatchHost host) { + return dispatcherDao.findLocalDispatchJobs(host); + } + + @Override + @Transactional(readOnly = true) + public Set findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs) { + return dispatcherDao.findDispatchJobs(host, show, numJobs); + } + + @Transactional(propagation = Propagation.REQUIRED) + public boolean increaseReservedMemory(ProcInterface p, long value) { + return procDao.increaseReservedMemory(p, value); + } + + @Override + public boolean clearVirtualProcAssignement(ProcInterface proc) { + try { + return procDao.clearVirtualProcAssignment(proc); + } catch (DataAccessException e) { + return false; + } + } - @Transactional(propagation = Propagation.REQUIRED) - public boolean increaseReservedMemory(ProcInterface p, long value) { - return procDao.increaseReservedMemory(p, value); - } + @Transactional(propagation = Propagation.REQUIRED) + public boolean balanceReservedMemory(ProcInterface targetProc, long targetMem) { + boolean result = procDao.balanceUnderUtilizedProcs(targetProc, targetMem); + if (result) { + DispatchSupport.balanceSuccess.incrementAndGet(); + } else { + DispatchSupport.balanceFailed.incrementAndGet(); + } + return result; + } - @Override - public boolean clearVirtualProcAssignement(ProcInterface proc) { - try { - return procDao.clearVirtualProcAssignment(proc); - } catch (DataAccessException e) { - return false; + @Transactional(propagation = Propagation.NEVER) + public void runFrame(VirtualProc proc, DispatchFrame frame) { + try { + rqdClient.launchFrame(prepareRqdRunFrame(proc, frame), proc); + dispatchedProcs.getAndIncrement(); + } catch (Exception e) { + throw new DispatcherException( + proc.getName() + " could not be booked on " + frame.getName() + ", " + e); + } } - } - @Transactional(propagation = Propagation.REQUIRED) - public boolean balanceReservedMemory(ProcInterface targetProc, long targetMem) { - boolean result = procDao.balanceUnderUtilizedProcs(targetProc, targetMem); - if (result) { - DispatchSupport.balanceSuccess.incrementAndGet(); - } else { - DispatchSupport.balanceFailed.incrementAndGet(); + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void startFrameAndProc(VirtualProc proc, DispatchFrame frame) { + logger.trace("starting frame: " + frame); + + frameDao.updateFrameStarted(proc, frame); + + reserveProc(proc, frame); } - return result; - } - @Transactional(propagation = Propagation.NEVER) - public void runFrame(VirtualProc proc, DispatchFrame frame) { - try { - rqdClient.launchFrame(prepareRqdRunFrame(proc, frame), proc); - dispatchedProcs.getAndIncrement(); - } catch (Exception e) { - throw new DispatcherException( - proc.getName() + " could not be booked on " + frame.getName() + ", " + e); + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isCueBookable(FacilityInterface f) { + return jobDao.cueHasPendingJobs(f); } - } - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void startFrameAndProc(VirtualProc proc, DispatchFrame frame) { - logger.trace("starting frame: " + frame); + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isJobDispatchable(JobInterface job, boolean local) { + + if (!jobDao.hasPendingFrames(job)) { + return false; + } + + if (!local && jobDao.isOverMaxCores(job)) { + return false; + } - frameDao.updateFrameStarted(proc, frame); + return true; + } - reserveProc(proc, frame); - } + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isJobBookable(JobInterface job) { - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean isCueBookable(FacilityInterface f) { - return jobDao.cueHasPendingJobs(f); - } + if (!jobDao.hasPendingFrames(job)) { + return false; + } - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean isJobDispatchable(JobInterface job, boolean local) { + if (jobDao.isAtMaxCores(job)) { + return false; + } - if (!jobDao.hasPendingFrames(job)) { - return false; + return true; } - if (!local && jobDao.isOverMaxCores(job)) { - return false; + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isJobBookable(JobInterface job, int coreUnits, int gpuUnits) { + + if (!jobDao.hasPendingFrames(job)) { + return false; + } + + if (jobDao.isOverMaxCores(job, coreUnits)) { + return false; + } + + if (jobDao.isOverMaxGpus(job, gpuUnits)) { + return false; + } + + return true; } - return true; - } + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean hasPendingFrames(JobInterface job) { - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean isJobBookable(JobInterface job) { + if (!jobDao.hasPendingFrames(job)) { + return false; + } - if (!jobDao.hasPendingFrames(job)) { - return false; + return true; } - if (jobDao.isAtMaxCores(job)) { - return false; + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean hasPendingFrames(LayerInterface layer) { + return layerDao.isLayerDispatchable(layer); } - return true; - } + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isShowOverBurst(VirtualProc proc) { + return subscriptionDao.isShowOverBurst((ShowInterface) proc, (AllocationInterface) proc, 0); + } - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean isJobBookable(JobInterface job, int coreUnits, int gpuUnits) { + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isShowOverBurst(ShowInterface show, AllocationInterface alloc, int coreUnits) { + return subscriptionDao.isShowOverBurst(show, alloc, coreUnits); + } - if (!jobDao.hasPendingFrames(job)) { - return false; + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isShowAtOrOverBurst(ShowInterface show, AllocationInterface alloc) { + return subscriptionDao.isShowAtOrOverBurst(show, alloc); } - if (jobDao.isOverMaxCores(job, coreUnits)) { - return false; + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isShowOverSize(VirtualProc proc) { + return subscriptionDao.isShowOverSize(proc); } - if (jobDao.isOverMaxGpus(job, gpuUnits)) { - return false; + @Override + @Transactional(propagation = Propagation.REQUIRED) + public boolean stopFrame(FrameInterface frame, FrameState state, int exitStatus) { + logger.trace("stopping frame " + frame); + if (frameDao.updateFrameStopped(frame, state, exitStatus)) { + procDao.clearVirtualProcAssignment(frame); + return true; + } + + return false; } - return true; - } + @Override + @Transactional(propagation = Propagation.REQUIRED) + public boolean stopFrame(FrameInterface frame, FrameState state, int exitStatus, long maxRss) { + logger.trace("stopping frame: " + frame); + if (frameDao.updateFrameStopped(frame, state, exitStatus, maxRss)) { + // Update max rss up the chain. + layerDao.updateLayerMaxRSS(frame, maxRss, false); + jobDao.updateMaxRSS(frame, maxRss); + + procDao.clearVirtualProcAssignment(frame); + return true; + } - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean hasPendingFrames(JobInterface job) { + return false; + } - if (!jobDao.hasPendingFrames(job)) { - return false; + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void clearFrame(DispatchFrame frame) { + logger.trace("clearing frame: " + frame); + frameDao.updateFrameCleared(frame); } - return true; - } + @Override + @Transactional(propagation = Propagation.REQUIRED) + public boolean updateFrameMemoryError(FrameInterface frame) { + return frameDao.updateFrameMemoryError(frame); + } - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean hasPendingFrames(LayerInterface layer) { - return layerDao.isLayerDispatchable(layer); - } + @Transactional(propagation = Propagation.SUPPORTS) + public RunFrame prepareRqdRunFrame(VirtualProc proc, DispatchFrame frame) { + int threads = proc.coresReserved / 100; + if (threads < 1) { + threads = 1; + } - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean isShowOverBurst(VirtualProc proc) { - return subscriptionDao.isShowOverBurst((ShowInterface) proc, (AllocationInterface) proc, 0); - } + int frameNumber = Integer.valueOf(frame.name.substring(0, frame.name.indexOf("-"))); + String zFrameNumber = String.format("%04d", frameNumber); + + FrameSet fs = new FrameSet(frame.range); + int startFrameIndex = fs.index(frameNumber); + String frameSpec = fs.getChunk(startFrameIndex, frame.chunkSize); + + FrameSet chunkFrameSet = new FrameSet(frameSpec); + int chunkEndFrame = chunkFrameSet.get(chunkFrameSet.size() - 1); + + RunFrame.Builder builder = RunFrame.newBuilder().setShot(frame.shot).setShow(frame.show) + .setUserName(frame.owner).setLogDir(frame.logDir).setJobId(frame.jobId) + .setJobName(frame.jobName).setFrameId(frame.id).setFrameName(frame.name) + .setLayerId(frame.getLayerId()).setResourceId(proc.getProcId()) + .setNumCores(proc.coresReserved).setNumGpus(proc.gpusReserved) + .setStartTime(System.currentTimeMillis()).setIgnoreNimby(proc.isLocalDispatch) + .setOs(proc.os).setSoftMemoryLimit(frame.softMemoryLimit) + .setHardMemoryLimit(frame.hardMemoryLimit) + .putAllEnvironment(jobDao.getEnvironment(frame)) + .putAllEnvironment(layerDao.getLayerEnvironment(frame)).putEnvironment("CUE3", "1") + .putEnvironment("CUE_THREADS", String.valueOf(threads)) + .putEnvironment("CUE_MEMORY", String.valueOf(proc.memoryReserved)) + .putEnvironment("CUE_GPUS", String.valueOf(proc.gpusReserved)) + .putEnvironment("CUE_GPU_MEMORY", String.valueOf(proc.gpuMemoryReserved)) + .putEnvironment("CUE_LOG_PATH", frame.logDir) + .putEnvironment("CUE_RANGE", frame.range) + .putEnvironment("CUE_CHUNK", String.valueOf(frame.chunkSize)) + .putEnvironment("CUE_IFRAME", String.valueOf(frameNumber)) + .putEnvironment("CUE_LAYER", frame.layerName) + .putEnvironment("CUE_JOB", frame.jobName).putEnvironment("CUE_FRAME", frame.name) + .putEnvironment("CUE_SHOW", frame.show).putEnvironment("CUE_SHOT", frame.shot) + .putEnvironment("CUE_USER", frame.owner).putEnvironment("CUE_JOB_ID", frame.jobId) + .putEnvironment("CUE_LAYER_ID", frame.layerId) + .putEnvironment("CUE_FRAME_ID", frame.id) + .putEnvironment("CUE_THREADABLE", frame.threadable ? "1" : "0") + .setCommand(frame.command.replaceAll("#ZFRAME#", zFrameNumber) + .replaceAll("#IFRAME#", String.valueOf(frameNumber)) + .replaceAll("#FRAME_START#", String.valueOf(frameNumber)) + .replaceAll("#FRAME_END#", String.valueOf(chunkEndFrame)) + .replaceAll("#FRAME_CHUNK#", String.valueOf(frame.chunkSize)) + .replaceAll("#LAYER#", frame.layerName).replaceAll("#JOB#", frame.jobName) + .replaceAll("#FRAMESPEC#", frameSpec).replaceAll("#FRAME#", frame.name)); + /* + * The special command tokens above (#ZFRAME# and others) are provided to the user in + * cuesubmit. see: cuesubmit/cuesubmit/Constants.py Update the Constant.py file when + * updating tokens here, they will appear in the cuesubmit tooltip popup. + */ + + frame.uid.ifPresent(builder::setUid); + + return builder.build(); + } - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean isShowOverBurst(ShowInterface show, AllocationInterface alloc, int coreUnits) { - return subscriptionDao.isShowOverBurst(show, alloc, coreUnits); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean isShowAtOrOverBurst(ShowInterface show, AllocationInterface alloc) { - return subscriptionDao.isShowAtOrOverBurst(show, alloc); - } + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void fixFrame(DispatchFrame frame) { + long numFixed = DispatchSupport.fixedFrames.incrementAndGet(); - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean isShowOverSize(VirtualProc proc) { - return subscriptionDao.isShowOverSize(proc); - } + logger.trace("fixing frame #: " + numFixed + " ," + frame); - @Override - @Transactional(propagation = Propagation.REQUIRED) - public boolean stopFrame(FrameInterface frame, FrameState state, int exitStatus) { - logger.trace("stopping frame " + frame); - if (frameDao.updateFrameStopped(frame, state, exitStatus)) { - procDao.clearVirtualProcAssignment(frame); - return true; - } - - return false; - } - - @Override - @Transactional(propagation = Propagation.REQUIRED) - public boolean stopFrame(FrameInterface frame, FrameState state, int exitStatus, long maxRss) { - logger.trace("stopping frame: " + frame); - if (frameDao.updateFrameStopped(frame, state, exitStatus, maxRss)) { - // Update max rss up the chain. - layerDao.updateLayerMaxRSS(frame, maxRss, false); - jobDao.updateMaxRSS(frame, maxRss); - - procDao.clearVirtualProcAssignment(frame); - return true; - } - - return false; - } - - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void clearFrame(DispatchFrame frame) { - logger.trace("clearing frame: " + frame); - frameDao.updateFrameCleared(frame); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED) - public boolean updateFrameMemoryError(FrameInterface frame) { - return frameDao.updateFrameMemoryError(frame); - } - - @Transactional(propagation = Propagation.SUPPORTS) - public RunFrame prepareRqdRunFrame(VirtualProc proc, DispatchFrame frame) { - int threads = proc.coresReserved / 100; - if (threads < 1) { - threads = 1; - } - - int frameNumber = Integer.valueOf(frame.name.substring(0, frame.name.indexOf("-"))); - String zFrameNumber = String.format("%04d", frameNumber); - - FrameSet fs = new FrameSet(frame.range); - int startFrameIndex = fs.index(frameNumber); - String frameSpec = fs.getChunk(startFrameIndex, frame.chunkSize); - - FrameSet chunkFrameSet = new FrameSet(frameSpec); - int chunkEndFrame = chunkFrameSet.get(chunkFrameSet.size() - 1); - - RunFrame.Builder builder = RunFrame.newBuilder().setShot(frame.shot).setShow(frame.show) - .setUserName(frame.owner).setLogDir(frame.logDir).setJobId(frame.jobId) - .setJobName(frame.jobName).setFrameId(frame.id).setFrameName(frame.name) - .setLayerId(frame.getLayerId()).setResourceId(proc.getProcId()) - .setNumCores(proc.coresReserved).setNumGpus(proc.gpusReserved) - .setStartTime(System.currentTimeMillis()).setIgnoreNimby(proc.isLocalDispatch) - .setOs(proc.os).setSoftMemoryLimit(frame.softMemoryLimit) - .setHardMemoryLimit(frame.hardMemoryLimit).putAllEnvironment(jobDao.getEnvironment(frame)) - .putAllEnvironment(layerDao.getLayerEnvironment(frame)).putEnvironment("CUE3", "1") - .putEnvironment("CUE_THREADS", String.valueOf(threads)) - .putEnvironment("CUE_MEMORY", String.valueOf(proc.memoryReserved)) - .putEnvironment("CUE_GPUS", String.valueOf(proc.gpusReserved)) - .putEnvironment("CUE_GPU_MEMORY", String.valueOf(proc.gpuMemoryReserved)) - .putEnvironment("CUE_LOG_PATH", frame.logDir).putEnvironment("CUE_RANGE", frame.range) - .putEnvironment("CUE_CHUNK", String.valueOf(frame.chunkSize)) - .putEnvironment("CUE_IFRAME", String.valueOf(frameNumber)) - .putEnvironment("CUE_LAYER", frame.layerName).putEnvironment("CUE_JOB", frame.jobName) - .putEnvironment("CUE_FRAME", frame.name).putEnvironment("CUE_SHOW", frame.show) - .putEnvironment("CUE_SHOT", frame.shot).putEnvironment("CUE_USER", frame.owner) - .putEnvironment("CUE_JOB_ID", frame.jobId).putEnvironment("CUE_LAYER_ID", frame.layerId) - .putEnvironment("CUE_FRAME_ID", frame.id) - .putEnvironment("CUE_THREADABLE", frame.threadable ? "1" : "0") - .setCommand(frame.command.replaceAll("#ZFRAME#", zFrameNumber) - .replaceAll("#IFRAME#", String.valueOf(frameNumber)) - .replaceAll("#FRAME_START#", String.valueOf(frameNumber)) - .replaceAll("#FRAME_END#", String.valueOf(chunkEndFrame)) - .replaceAll("#FRAME_CHUNK#", String.valueOf(frame.chunkSize)) - .replaceAll("#LAYER#", frame.layerName).replaceAll("#JOB#", frame.jobName) - .replaceAll("#FRAMESPEC#", frameSpec).replaceAll("#FRAME#", frame.name)); - /* - * The special command tokens above (#ZFRAME# and others) are provided to the user in cuesubmit. - * see: cuesubmit/cuesubmit/Constants.py Update the Constant.py file when updating tokens here, - * they will appear in the cuesubmit tooltip popup. - */ - - frame.uid.ifPresent(builder::setUid); - - return builder.build(); - } - - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void fixFrame(DispatchFrame frame) { - long numFixed = DispatchSupport.fixedFrames.incrementAndGet(); - - logger.trace("fixing frame #: " + numFixed + " ," + frame); - - VirtualProc proc = null; - try { - proc = procDao.findVirtualProc(frame); - } catch (Exception e) { - // Can't even find the damn proc, which i'm - logger.info("attempted to fix a frame but the proc " + "wasn't found!"); - return; - } - - if (frameDao.updateFrameFixed(proc, frame)) { - logger.info("the frame " + frame.getId() + " was fixed."); - } - } - - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void updateUsageCounters(FrameInterface frame, int exitStatus) { - try { - ResourceUsage usage = frameDao.getResourceUsage(frame); - showDao.updateFrameCounters(frame, exitStatus); - jobDao.updateUsage(frame, usage, exitStatus); - layerDao.updateUsage(frame, usage, exitStatus); - } catch (Exception e) { - logger.info("Unable to find and update resource usage for " + "frame, " + frame - + " while updating frame with " + "exit status " + exitStatus + "," + e); - } - } - - private void reserveProc(VirtualProc proc, DispatchFrame frame) { - - proc.jobId = frame.getJobId(); - proc.frameId = frame.getFrameId(); - proc.layerId = frame.getLayerId(); - proc.showId = frame.getShowId(); - - if (proc.isNew()) { - logger.info("creating proc " + proc.getName() + " for " + frame.getName()); - procDao.insertVirtualProc(proc); - } else { - logger.info("updated proc " + proc.getName() + " for " + frame.getName()); - procDao.updateVirtualProcAssignment(proc); - } - } - - @Transactional(propagation = Propagation.REQUIRED) - public void unbookProc(VirtualProc proc) { - unbookProc(proc, "was unbooked"); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void unbookProc(VirtualProc proc, String reason) { - if (proc == null) { - return; - } - if (proc.isNew()) { - return; - } - proc.unbooked = true; - procDao.deleteVirtualProc(proc); - DispatchSupport.unbookedProcs.getAndIncrement(); - logger.info(proc + " " + reason); - - /* - * Remove the local dispatch record if it has gone inactive. - */ - if (proc.isLocalDispatch) { - try { - bookingManager.removeInactiveLocalHostAssignment( - bookingDao.getLocalJobAssignment(proc.getHostId(), proc.getJobId())); - } catch (EmptyResultDataAccessException e) { - // Eat the exception. - } - } - } - - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void lostProc(VirtualProc proc, String reason, int exitStatus) { - long numCleared = clearedProcs.incrementAndGet(); - - unbookProc(proc, "proc " + proc.getName() + " is #" + numCleared + " cleared: " + reason); - - if (proc.frameId != null) { - FrameInterface f = frameDao.getFrame(proc.frameId); - /* - * Set the checkpoint state to disabled before stopping the the frame because it will go to - * the checkpoint state. This is not desirable when we're clearing off processes that were - * lost due to a machine crash. - */ - frameDao.updateFrameCheckpointState(f, CheckpointState.DISABLED); - /* - * If the proc has a frame, stop the frame. Frames can only be stopped that are running. - */ - if (frameDao.updateFrameStopped(f, FrameState.WAITING, exitStatus)) { - updateUsageCounters(proc, exitStatus); - } - /* - * If the frame is not running, check if frame is in dead state, frames that died due to host - * going down should be put back into WAITING status. - */ - else { - FrameDetail frameDetail = frameDao.getFrameDetail(f); - if ((frameDetail.state == FrameState.DEAD) - && (Dispatcher.EXIT_STATUS_DOWN_HOST == exitStatus)) { - if (frameDao.updateFrameHostDown(f)) { - logger.info("update frame " + f.getFrameId() + "to WAITING status for down host"); - } + VirtualProc proc = null; + try { + proc = procDao.findVirtualProc(frame); + } catch (Exception e) { + // Can't even find the damn proc, which i'm + logger.info("attempted to fix a frame but the proc " + "wasn't found!"); + return; } - } - } else { - logger.info("Frame ID is NULL, not updating Frame state"); - } - } - - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void updateProcMemoryUsage(FrameInterface frame, long rss, long maxRss, long vsize, - long maxVsize, long usedGpuMemory, long maxUsedGpuMemory, long usedSwapMemory, - byte[] children) { - procDao.updateProcMemoryUsage(frame, rss, maxRss, vsize, maxVsize, usedGpuMemory, - maxUsedGpuMemory, usedSwapMemory, children); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void updateFrameMemoryUsageAndLluTime(FrameInterface frame, long rss, long maxRss, - long lluTime) { - - try { - frameDao.updateFrameMemoryUsageAndLluTime(frame, maxRss, rss, lluTime); - } catch (FrameReservationException ex) { - // Eat this, the frame was not in the correct state or - // was locked by another thread. The only reason it would - // be locked by another thread would be if the state is - // changing. - logger.warn("failed to update memory usage and LLU time for frame: " + frame); - } - } - - @Override - public void determineIdleCores(DispatchHost host, int load) { - int maxLoad = host.cores + ((host.cores / 100) * Dispatcher.CORE_LOAD_THRESHOLD); - - int idleCores = maxLoad - load; - if (idleCores < host.idleCores) { - host.idleCores = idleCores; - } - } - - public DispatcherDao getDispatcherDao() { - return dispatcherDao; - } - - public void setDispatcherDao(DispatcherDao dispatcherDao) { - this.dispatcherDao = dispatcherDao; - } - - public FrameDao getFrameDao() { - return frameDao; - } - - public void setFrameDao(FrameDao frameDao) { - this.frameDao = frameDao; - } - - public JobDao getJobDao() { - return jobDao; - } - - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } - - public ProcDao getProcDao() { - return procDao; - } - - public void setProcDao(ProcDao procDao) { - this.procDao = procDao; - } - - public DependManager getDependManager() { - return dependManager; - } - - public void setDependManager(DependManager dependManager) { - this.dependManager = dependManager; - } - - public LayerDao getLayerDao() { - return layerDao; - } - - public void setLayerDao(LayerDao layerDao) { - this.layerDao = layerDao; - } - - public HostDao getHostDao() { - return hostDao; - } - - public void setHostDao(HostDao hostDao) { - this.hostDao = hostDao; - } - - public RqdClient getRqdClient() { - return rqdClient; - } - - public void setRqdClient(RqdClient rqdClient) { - this.rqdClient = rqdClient; - } - - public SubscriptionDao getSubscriptionDao() { - return subscriptionDao; - } - - public void setSubscriptionDao(SubscriptionDao subscriptionDao) { - this.subscriptionDao = subscriptionDao; - } - - public RedirectManager getRedirectManager() { - return redirectManager; - } - - public void setRedirectManager(RedirectManager redirectManager) { - this.redirectManager = redirectManager; - } - - public ShowDao getShowDao() { - return showDao; - } - public void setShowDao(ShowDao showDao) { - this.showDao = showDao; - } + if (frameDao.updateFrameFixed(proc, frame)) { + logger.info("the frame " + frame.getId() + " was fixed."); + } + } - public BookingManager getBookingManager() { - return bookingManager; - } + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void updateUsageCounters(FrameInterface frame, int exitStatus) { + try { + ResourceUsage usage = frameDao.getResourceUsage(frame); + showDao.updateFrameCounters(frame, exitStatus); + jobDao.updateUsage(frame, usage, exitStatus); + layerDao.updateUsage(frame, usage, exitStatus); + } catch (Exception e) { + logger.info("Unable to find and update resource usage for " + "frame, " + frame + + " while updating frame with " + "exit status " + exitStatus + "," + e); + } + } - public void setBookingManager(BookingManager bookingManager) { - this.bookingManager = bookingManager; - } + private void reserveProc(VirtualProc proc, DispatchFrame frame) { - public BookingDao getBookingDao() { - return bookingDao; - } + proc.jobId = frame.getJobId(); + proc.frameId = frame.getFrameId(); + proc.layerId = frame.getLayerId(); + proc.showId = frame.getShowId(); - public void setBookingDao(BookingDao bookingDao) { - this.bookingDao = bookingDao; - } + if (proc.isNew()) { + logger.info("creating proc " + proc.getName() + " for " + frame.getName()); + procDao.insertVirtualProc(proc); + } else { + logger.info("updated proc " + proc.getName() + " for " + frame.getName()); + procDao.updateVirtualProcAssignment(proc); + } + } - @Override - public void clearCache() { - dispatcherDao.clearCache(); - } + @Transactional(propagation = Propagation.REQUIRED) + public void unbookProc(VirtualProc proc) { + unbookProc(proc, "was unbooked"); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void unbookProc(VirtualProc proc, String reason) { + if (proc == null) { + return; + } + if (proc.isNew()) { + return; + } + proc.unbooked = true; + procDao.deleteVirtualProc(proc); + DispatchSupport.unbookedProcs.getAndIncrement(); + logger.info(proc + " " + reason); + + /* + * Remove the local dispatch record if it has gone inactive. + */ + if (proc.isLocalDispatch) { + try { + bookingManager.removeInactiveLocalHostAssignment( + bookingDao.getLocalJobAssignment(proc.getHostId(), proc.getJobId())); + } catch (EmptyResultDataAccessException e) { + // Eat the exception. + } + } + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void lostProc(VirtualProc proc, String reason, int exitStatus) { + long numCleared = clearedProcs.incrementAndGet(); + + unbookProc(proc, "proc " + proc.getName() + " is #" + numCleared + " cleared: " + reason); + + if (proc.frameId != null) { + FrameInterface f = frameDao.getFrame(proc.frameId); + /* + * Set the checkpoint state to disabled before stopping the the frame because it will go + * to the checkpoint state. This is not desirable when we're clearing off processes that + * were lost due to a machine crash. + */ + frameDao.updateFrameCheckpointState(f, CheckpointState.DISABLED); + /* + * If the proc has a frame, stop the frame. Frames can only be stopped that are running. + */ + if (frameDao.updateFrameStopped(f, FrameState.WAITING, exitStatus)) { + updateUsageCounters(proc, exitStatus); + } + /* + * If the frame is not running, check if frame is in dead state, frames that died due to + * host going down should be put back into WAITING status. + */ + else { + FrameDetail frameDetail = frameDao.getFrameDetail(f); + if ((frameDetail.state == FrameState.DEAD) + && (Dispatcher.EXIT_STATUS_DOWN_HOST == exitStatus)) { + if (frameDao.updateFrameHostDown(f)) { + logger.info("update frame " + f.getFrameId() + + "to WAITING status for down host"); + } + } + } + } else { + logger.info("Frame ID is NULL, not updating Frame state"); + } + } + + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void updateProcMemoryUsage(FrameInterface frame, long rss, long maxRss, long vsize, + long maxVsize, long usedGpuMemory, long maxUsedGpuMemory, long usedSwapMemory, + byte[] children) { + procDao.updateProcMemoryUsage(frame, rss, maxRss, vsize, maxVsize, usedGpuMemory, + maxUsedGpuMemory, usedSwapMemory, children); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void updateFrameMemoryUsageAndLluTime(FrameInterface frame, long rss, long maxRss, + long lluTime) { + + try { + frameDao.updateFrameMemoryUsageAndLluTime(frame, maxRss, rss, lluTime); + } catch (FrameReservationException ex) { + // Eat this, the frame was not in the correct state or + // was locked by another thread. The only reason it would + // be locked by another thread would be if the state is + // changing. + logger.warn("failed to update memory usage and LLU time for frame: " + frame); + } + } + + @Override + public void determineIdleCores(DispatchHost host, int load) { + int maxLoad = host.cores + ((host.cores / 100) * Dispatcher.CORE_LOAD_THRESHOLD); + + int idleCores = maxLoad - load; + if (idleCores < host.idleCores) { + host.idleCores = idleCores; + } + } + + public DispatcherDao getDispatcherDao() { + return dispatcherDao; + } + + public void setDispatcherDao(DispatcherDao dispatcherDao) { + this.dispatcherDao = dispatcherDao; + } + + public FrameDao getFrameDao() { + return frameDao; + } + + public void setFrameDao(FrameDao frameDao) { + this.frameDao = frameDao; + } + + public JobDao getJobDao() { + return jobDao; + } + + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } + + public ProcDao getProcDao() { + return procDao; + } + + public void setProcDao(ProcDao procDao) { + this.procDao = procDao; + } + + public DependManager getDependManager() { + return dependManager; + } + + public void setDependManager(DependManager dependManager) { + this.dependManager = dependManager; + } + + public LayerDao getLayerDao() { + return layerDao; + } + + public void setLayerDao(LayerDao layerDao) { + this.layerDao = layerDao; + } + + public HostDao getHostDao() { + return hostDao; + } + + public void setHostDao(HostDao hostDao) { + this.hostDao = hostDao; + } + + public RqdClient getRqdClient() { + return rqdClient; + } + + public void setRqdClient(RqdClient rqdClient) { + this.rqdClient = rqdClient; + } + + public SubscriptionDao getSubscriptionDao() { + return subscriptionDao; + } + + public void setSubscriptionDao(SubscriptionDao subscriptionDao) { + this.subscriptionDao = subscriptionDao; + } + + public RedirectManager getRedirectManager() { + return redirectManager; + } + + public void setRedirectManager(RedirectManager redirectManager) { + this.redirectManager = redirectManager; + } + + public ShowDao getShowDao() { + return showDao; + } + + public void setShowDao(ShowDao showDao) { + this.showDao = showDao; + } + + public BookingManager getBookingManager() { + return bookingManager; + } + + public void setBookingManager(BookingManager bookingManager) { + this.bookingManager = bookingManager; + } + + public BookingDao getBookingDao() { + return bookingDao; + } + + public void setBookingDao(BookingDao bookingDao) { + this.bookingDao = bookingDao; + } + + @Override + public void clearCache() { + dispatcherDao.clearCache(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/Dispatcher.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/Dispatcher.java index 4b9dc76c5..ae29f43df 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/Dispatcher.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/Dispatcher.java @@ -29,188 +29,188 @@ public interface Dispatcher { - // Maximum number of core points that can be assigned to a frame - public static final int CORE_POINTS_RESERVED_MAX = 2400; + // Maximum number of core points that can be assigned to a frame + public static final int CORE_POINTS_RESERVED_MAX = 2400; - // The default number of core points assigned to a frame, if no core - // point value is specified - public static final int CORE_POINTS_RESERVED_DEFAULT = 100; + // The default number of core points assigned to a frame, if no core + // point value is specified + public static final int CORE_POINTS_RESERVED_DEFAULT = 100; - // The minimum amount of core points you can assign to a frame. - public static final int CORE_POINTS_RESERVED_MIN = 10; - - // The minimum amount of gpu points you can assign to a frame. - public static final int GPU_UNITS_RESERVED_DEFAULT = 0; - public static final int GPU_UNITS_RESERVED_MIN = 0; - - // Amount of load per core a host can have before the perceived - // number of idle cores is modified to reflect load conditions - // on the host. - public static final int CORE_LOAD_THRESHOLD = 5; - - // Amount of memory that has to be idle for the rest of the cores - // on the machine to be considered stranded. - public static final long MEM_STRANDED_THRESHHOLD = CueUtil.GB + CueUtil.MB512; - - // Determines the service default minimum memory per frame. - public static final long MEM_SERVICE_RESERVED_DEFAULT = CueUtil.GB4; - - // Determines the service default minimum gpu per frame. - public static final long MEM_SERVICE_GPU_RESERVED_DEFAULT = 0; - - // Return value for cleared frame - public static final int EXIT_STATUS_FRAME_CLEARED = 299; - - /* - * An orphan proc occurs when a proc is left with no frame assignment. - */ - public static final int EXIT_STATUS_FRAME_ORPHAN = 301; - - /* - * A failed kill occurs when a user tries to kill a frame and RQD throws an exception. - */ - public static final int EXIT_STATUS_FAILED_KILL = 302; - - // Return value for cleared frame - public static final int EXIT_STATUS_DOWN_HOST = 399; - - // Upgrade the memory on the layer by 1g and retry. - public static final int EXIT_STATUS_MEMORY_FAILURE = 33; - - // Upgrade the memory on the layer by 1g and retry. - public static final int DOCKER_EXIT_STATUS_MEMORY_FAILURE = 137; - - // max retry time - public static final int FRAME_TIME_NO_RETRY = 3600 * 8; - - // The maximum amount of virtual memory a frame can be using - // without being penalized for it. - public static final long VIRTUAL_MEM_THRESHHOLD = CueUtil.GB2; - - // How long to keep track of a frame kill request - public static final int FRAME_KILL_CACHE_EXPIRE_AFTER_WRITE_MINUTES = 3; - - // A higher number gets more deep booking but less spread on the cue. - public static final int DEFAULT_MAX_FRAMES_PER_PASS = 4; - - // Disable RQD communication. - public static boolean testMode = false; - - // The time in seconds it takes for a proc or frame to orphan. - public static final int ORPHANED_SECONDS = 300; - - // The chance a frame will unbook itself to run a higher priority frame. - // 0 will never unbook, > 100 will always unbook. - public static final int UNBOOK_FREQUENCY = 101; - - // The default operating system assigned to host that don't report one. - public static final String OS_DEFAULT = "rhel40"; - - // The default minimum memory increase for when jobs fail due to not enough - // memory - public static final long MINIMUM_MEMORY_INCREASE = CueUtil.GB2; - - public static final double SOFT_MEMORY_MULTIPLIER = 1.1; - public static final double HARD_MEMORY_MULTIPLIER = 1.4; - - /** - * Dispatch a host to the facility. - * - * @param host - * @return A list of procs that were dispatched. - */ - List dispatchHostToAllShows(DispatchHost host); - - /** - * Dispatch a host to the facility. - * - * @param host - * @return A list of procs that were dispatched. - */ - List dispatchHost(DispatchHost host); - - /** - * Dispatch a host to the specified group and specify the maximum number of frames to dispatch - * from the host. - * - * @param host - * @param g - * @param numFrames - * @return - */ - List dispatchHost(DispatchHost host, GroupInterface g); - - /** - * Dispatch a host to the specified job. - * - * @param host - * @param job - * @return A list of procs that were dispatched. - * @throws DispatcherException if an error occurs. - */ - List dispatchHost(DispatchHost host, JobInterface job); - - /** - * Dispatch a host to the specified job. - * - * @param host - * @param job - * @return A list of procs that were dispatched. - * @throws DispatcherException if an error occurs. - */ - List dispatchHost(DispatchHost host, LayerInterface layer); - - /** - * Dispatch a host to the specified job. - * - * @param host - * @param job - * @return A list of procs that were dispatched. - * @throws DispatcherException if an error occurs. - */ - List dispatchHost(DispatchHost host, FrameInterface frame); - - /** - * Dispatch a proc to the specified job. - * - * @param proc - * @param job - * @throws DispatcherException if an error occurs. - */ - void dispatchProcToJob(VirtualProc proc, JobInterface job); - - /** - * Return true if the dispatcher should not talk to RQD - * - * @return - */ - boolean isTestMode(); - - /** - * Return true if the dispatcher should not talk to RQD - * - * @return - */ - void setTestMode(boolean enabled); - - /** - * Handles assigning a processor to a specified frame. - * - * @param frame - * @param proc - * - * @throws FrameReservationException if the frame cannot be reserved. - * @throws ResourceReservationFailureException if resources cannot be reserved. - * @throws RqdClientException if communication with RQD fails. - */ - void dispatch(DispatchFrame frame, VirtualProc proc); - - /** - * Dispatch the given host to the specified show. - * - * @param host - * @param show - * @return - */ - List dispatchHost(DispatchHost host, ShowInterface show); + // The minimum amount of core points you can assign to a frame. + public static final int CORE_POINTS_RESERVED_MIN = 10; + + // The minimum amount of gpu points you can assign to a frame. + public static final int GPU_UNITS_RESERVED_DEFAULT = 0; + public static final int GPU_UNITS_RESERVED_MIN = 0; + + // Amount of load per core a host can have before the perceived + // number of idle cores is modified to reflect load conditions + // on the host. + public static final int CORE_LOAD_THRESHOLD = 5; + + // Amount of memory that has to be idle for the rest of the cores + // on the machine to be considered stranded. + public static final long MEM_STRANDED_THRESHHOLD = CueUtil.GB + CueUtil.MB512; + + // Determines the service default minimum memory per frame. + public static final long MEM_SERVICE_RESERVED_DEFAULT = CueUtil.GB4; + + // Determines the service default minimum gpu per frame. + public static final long MEM_SERVICE_GPU_RESERVED_DEFAULT = 0; + + // Return value for cleared frame + public static final int EXIT_STATUS_FRAME_CLEARED = 299; + + /* + * An orphan proc occurs when a proc is left with no frame assignment. + */ + public static final int EXIT_STATUS_FRAME_ORPHAN = 301; + + /* + * A failed kill occurs when a user tries to kill a frame and RQD throws an exception. + */ + public static final int EXIT_STATUS_FAILED_KILL = 302; + + // Return value for cleared frame + public static final int EXIT_STATUS_DOWN_HOST = 399; + + // Upgrade the memory on the layer by 1g and retry. + public static final int EXIT_STATUS_MEMORY_FAILURE = 33; + + // Upgrade the memory on the layer by 1g and retry. + public static final int DOCKER_EXIT_STATUS_MEMORY_FAILURE = 137; + + // max retry time + public static final int FRAME_TIME_NO_RETRY = 3600 * 8; + + // The maximum amount of virtual memory a frame can be using + // without being penalized for it. + public static final long VIRTUAL_MEM_THRESHHOLD = CueUtil.GB2; + + // How long to keep track of a frame kill request + public static final int FRAME_KILL_CACHE_EXPIRE_AFTER_WRITE_MINUTES = 3; + + // A higher number gets more deep booking but less spread on the cue. + public static final int DEFAULT_MAX_FRAMES_PER_PASS = 4; + + // Disable RQD communication. + public static boolean testMode = false; + + // The time in seconds it takes for a proc or frame to orphan. + public static final int ORPHANED_SECONDS = 300; + + // The chance a frame will unbook itself to run a higher priority frame. + // 0 will never unbook, > 100 will always unbook. + public static final int UNBOOK_FREQUENCY = 101; + + // The default operating system assigned to host that don't report one. + public static final String OS_DEFAULT = "rhel40"; + + // The default minimum memory increase for when jobs fail due to not enough + // memory + public static final long MINIMUM_MEMORY_INCREASE = CueUtil.GB2; + + public static final double SOFT_MEMORY_MULTIPLIER = 1.1; + public static final double HARD_MEMORY_MULTIPLIER = 1.4; + + /** + * Dispatch a host to the facility. + * + * @param host + * @return A list of procs that were dispatched. + */ + List dispatchHostToAllShows(DispatchHost host); + + /** + * Dispatch a host to the facility. + * + * @param host + * @return A list of procs that were dispatched. + */ + List dispatchHost(DispatchHost host); + + /** + * Dispatch a host to the specified group and specify the maximum number of frames to dispatch + * from the host. + * + * @param host + * @param g + * @param numFrames + * @return + */ + List dispatchHost(DispatchHost host, GroupInterface g); + + /** + * Dispatch a host to the specified job. + * + * @param host + * @param job + * @return A list of procs that were dispatched. + * @throws DispatcherException if an error occurs. + */ + List dispatchHost(DispatchHost host, JobInterface job); + + /** + * Dispatch a host to the specified job. + * + * @param host + * @param job + * @return A list of procs that were dispatched. + * @throws DispatcherException if an error occurs. + */ + List dispatchHost(DispatchHost host, LayerInterface layer); + + /** + * Dispatch a host to the specified job. + * + * @param host + * @param job + * @return A list of procs that were dispatched. + * @throws DispatcherException if an error occurs. + */ + List dispatchHost(DispatchHost host, FrameInterface frame); + + /** + * Dispatch a proc to the specified job. + * + * @param proc + * @param job + * @throws DispatcherException if an error occurs. + */ + void dispatchProcToJob(VirtualProc proc, JobInterface job); + + /** + * Return true if the dispatcher should not talk to RQD + * + * @return + */ + boolean isTestMode(); + + /** + * Return true if the dispatcher should not talk to RQD + * + * @return + */ + void setTestMode(boolean enabled); + + /** + * Handles assigning a processor to a specified frame. + * + * @param frame + * @param proc + * + * @throws FrameReservationException if the frame cannot be reserved. + * @throws ResourceReservationFailureException if resources cannot be reserved. + * @throws RqdClientException if communication with RQD fails. + */ + void dispatch(DispatchFrame frame, VirtualProc proc); + + /** + * Dispatch the given host to the specified show. + * + * @param host + * @param show + * @return + */ + List dispatchHost(DispatchHost host, ShowInterface show); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatcherException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatcherException.java index 752b05f63..13403c58e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatcherException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatcherException.java @@ -20,14 +20,14 @@ @SuppressWarnings("serial") public class DispatcherException extends SpcueRuntimeException { - public DispatcherException(String arg0) { - super(arg0); - // TODO Auto-generated constructor stub - } + public DispatcherException(String arg0) { + super(arg0); + // TODO Auto-generated constructor stub + } - public DispatcherException(String arg0, Throwable arg1) { - super(arg0, arg1); - // TODO Auto-generated constructor stub - } + public DispatcherException(String arg0, Throwable arg1) { + super(arg0, arg1); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameCompleteHandler.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameCompleteHandler.java index 88d2f72cc..59abeb0e3 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameCompleteHandler.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameCompleteHandler.java @@ -63,651 +63,662 @@ */ public class FrameCompleteHandler { - private static final Logger logger = LogManager.getLogger(FrameCompleteHandler.class); - - private static final Random randomNumber = new Random(); - - private HostManager hostManager; - private JobManager jobManager; - private RedirectManager redirectManager; - private BookingManager bookingManager; - private DispatchQueue dispatchQueue; - private BookingQueue bookingQueue; - private Dispatcher dispatcher; - private Dispatcher localDispatcher; - private JobManagerSupport jobManagerSupport; - private DispatchSupport dispatchSupport; - private JmsMover jsmMover; - - private WhiteboardDao whiteboardDao; - private ServiceDao serviceDao; - private ShowDao showDao; - private Environment env; - - /* - * The last time a proc was unbooked for subscription or job balancing. Since there are so many - * more dispatch threads than booking threads, the dispatcher will over compensate and unbook too - * many cores if an imbalance occurs. Its better to keep cores running the same place for cache - * coherence. - */ - private final AtomicLong lastUnbook = new AtomicLong(0); - - /* - * The amount of time to wait before unbooking another proc for subscription or job balancing. - */ - private static final int UNBOOK_EXPIRE_MS = 2500; - - /** - * Boolean to toggle if this class is accepting data or not. - */ - private boolean shutdown = false; - - /** - * Whether or not to satisfy dependents (*_ON_FRAME and *_ON_LAYER) only on Frame success - */ - private boolean satisfyDependOnlyOnFrameSuccess; - - public boolean getSatisfyDependOnlyOnFrameSuccess() { - return satisfyDependOnlyOnFrameSuccess; - } - - public void setSatisfyDependOnlyOnFrameSuccess(boolean satisfyDependOnlyOnFrameSuccess) { - this.satisfyDependOnlyOnFrameSuccess = satisfyDependOnlyOnFrameSuccess; - } - - @Autowired - public FrameCompleteHandler(Environment env) { - this.env = env; - satisfyDependOnlyOnFrameSuccess = - env.getProperty("depend.satisfy_only_on_frame_success", Boolean.class, true); - } - - /** - * Handle the given FrameCompleteReport from RQD. - * - * @param report - */ - public void handleFrameCompleteReport(final FrameCompleteReport report) { + private static final Logger logger = LogManager.getLogger(FrameCompleteHandler.class); + + private static final Random randomNumber = new Random(); + + private HostManager hostManager; + private JobManager jobManager; + private RedirectManager redirectManager; + private BookingManager bookingManager; + private DispatchQueue dispatchQueue; + private BookingQueue bookingQueue; + private Dispatcher dispatcher; + private Dispatcher localDispatcher; + private JobManagerSupport jobManagerSupport; + private DispatchSupport dispatchSupport; + private JmsMover jsmMover; + + private WhiteboardDao whiteboardDao; + private ServiceDao serviceDao; + private ShowDao showDao; + private Environment env; /* - * A boolean we're going to set to true if we can detect a corrupted data block in Oracle. + * The last time a proc was unbooked for subscription or job balancing. Since there are so many + * more dispatch threads than booking threads, the dispatcher will over compensate and unbook + * too many cores if an imbalance occurs. Its better to keep cores running the same place for + * cache coherence. */ - if (isShutdown()) { - throw new RqdRetryReportException( - "Error processing the frame complete report, " + "cuebot not accepting packets."); - } - - try { - final VirtualProc proc = hostManager.getVirtualProc(report.getFrame().getResourceId()); - final DispatchJob job = jobManager.getDispatchJob(proc.getJobId()); - final LayerDetail layer = jobManager.getLayerDetail(report.getFrame().getLayerId()); - final FrameDetail frameDetail = jobManager.getFrameDetail(report.getFrame().getFrameId()); - final DispatchFrame frame = jobManager.getDispatchFrame(report.getFrame().getFrameId()); - final FrameState newFrameState = determineFrameState(job, layer, frame, report); - final String key = proc.getJobId() + "_" + report.getFrame().getLayerId() + "_" - + report.getFrame().getFrameId(); - - // rqd is currently not able to report exit_signal=9 when a frame is killed by - // the OOM logic. The current solution sets exitStatus to - // Dispatcher.EXIT_STATUS_MEMORY_FAILURE before killing the frame, this enables - // auto-retrying frames affected by the logic when they report with a - // frameCompleteReport. This status retouch ensures a frame complete report is - // not able to override what has been set by the previous logic. - int exitStatus = report.getExitStatus(); - if (frameDetail.exitStatus == Dispatcher.EXIT_STATUS_MEMORY_FAILURE) { - exitStatus = frameDetail.exitStatus; - } - - if (dispatchSupport.stopFrame(frame, newFrameState, exitStatus, - report.getFrame().getMaxRss())) { - if (dispatcher.isTestMode()) { - // Database modifications on a threadpool cannot be captured by the test thread - handlePostFrameCompleteOperations(proc, report, job, frame, newFrameState, frameDetail); - } else { - dispatchQueue.execute(new KeyRunnable(key) { - @Override - public void run() { - try { - handlePostFrameCompleteOperations(proc, report, job, frame, newFrameState, - frameDetail); - } catch (Exception e) { - logger.warn("Exception during handlePostFrameCompleteOperations " - + "in handleFrameCompleteReport" + CueExceptionUtil.getStackTrace(e)); - } - } - }); - } - } else { - /* - * First check if we have a redirect. When a user retries a frame the proc is redirected - * back to the same job without checking any other properties. - */ - if (redirectManager.hasRedirect(proc)) { - dispatchQueue.execute(new KeyRunnable(key) { - @Override - public void run() { - try { - redirectManager.redirect(proc); - } catch (Exception e) { - logger.warn("Exception during redirect in handleFrameCompleteReport" - + CueExceptionUtil.getStackTrace(e)); - } - } - }); - } else { - dispatchQueue.execute(new KeyRunnable(key) { - @Override - public void run() { - try { - dispatchSupport.unbookProc(proc); - } catch (Exception e) { - logger.warn("Exception during unbookProc in handleFrameCompleteReport" - + CueExceptionUtil.getStackTrace(e)); - } - } - }); - } - } - } catch (EmptyResultDataAccessException e) { - /* - * Do not propagate this exception to RQD. This usually means the cue lost connectivity to the - * host and cleared out the record of the proc. If this is propagated back to RQD, RQD will - * keep retrying the operation forever. - */ - logger.info("failed to acquire data needed to " + "process completed frame: " - + report.getFrame().getFrameName() + " in job " + report.getFrame().getJobName() + "," - + e); - } catch (Exception e) { - - /* - * Everything else we kick back to RQD. - */ - logger.info("failed to acquire data needed " + "to process completed frame: " - + report.getFrame().getFrameName() + " in job " + report.getFrame().getJobName() + "," - + e); - - throw new RqdRetryReportException( - "error processing the frame complete " + "report, sending retry message to RQD " + e, e); - } - } - - /** - * Handles frame complete operations other than the actual frame completing. - * - * Updates proc time usage counters. Drops dependencies. Sets jobs to the finished state. - * Optimizes layer memory requirements. Checks for other jobs that might need procs. Unbook proc - * if it needs to be moved. Check show subscription values. - * - * If the proc is not unbooked and moved, its re-dispatched onto the same job. - * - * @param proc - * @param report - * @param job - * @param frame - * @param newFrameState - */ - public void handlePostFrameCompleteOperations(VirtualProc proc, FrameCompleteReport report, - DispatchJob job, DispatchFrame frame, FrameState newFrameState, FrameDetail frameDetail) { - try { - - /* - * The default behavior is to keep the proc on the same job. - */ - boolean unbookProc = proc.unbooked; - - dispatchSupport.updateUsageCounters(frame, report.getExitStatus()); - - boolean isLayerComplete = false; - - if (newFrameState.equals(FrameState.SUCCEEDED) - || (!satisfyDependOnlyOnFrameSuccess && newFrameState.equals(FrameState.EATEN))) { - jobManagerSupport.satisfyWhatDependsOn(frame); - isLayerComplete = jobManager.isLayerComplete(frame); - if (isLayerComplete) { - jobManagerSupport.satisfyWhatDependsOn((LayerInterface) frame); - } - } + private final AtomicLong lastUnbook = new AtomicLong(0); - if (newFrameState.equals(FrameState.SUCCEEDED) && !isLayerComplete) { - /* - * If the layer meets some specific criteria then try to update the minimum memory and tags - * so it can run on a wider variety of cores, namely older hardware. - */ - jobManager.optimizeLayer(frame, report.getFrame().getNumCores(), - report.getFrame().getMaxRss(), report.getRunTime()); - } - - /* - * The final frame can either be Succeeded or Eaten. If you only check if the frame is - * Succeeded before doing an isJobComplete check, then jobs that finish with the auto-eat flag - * enabled will not leave the cue. - */ - if (newFrameState.equals(FrameState.SUCCEEDED) || newFrameState.equals(FrameState.EATEN)) { - if (jobManager.isJobComplete(job)) { - job.state = JobState.FINISHED; - jobManagerSupport.queueShutdownJob(job, new Source("natural"), false); - } - } - - /* - * Some exit statuses indicate that a frame was killed by the application due to a memory - * issue and should be retried. In this case, disable the optimizer and raise the memory by - * what is specified in the show's service override, service or 2GB. - */ - if (report.getExitStatus() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE - || report.getExitSignal() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE - || frameDetail.exitStatus == Dispatcher.EXIT_STATUS_MEMORY_FAILURE - || report.getExitStatus() == Dispatcher.DOCKER_EXIT_STATUS_MEMORY_FAILURE) { - long increase = CueUtil.GB2; - - // since there can be multiple services, just going for the - // first service (primary) - String serviceName = ""; - try { - serviceName = frame.services.split(",")[0]; - ServiceOverride showService = - whiteboardDao.getServiceOverride(showDao.findShowDetail(frame.show), serviceName); - // increase override is stored in Kb format so convert to Mb - // for easier reading. Note: Kb->Mb conversion uses 1024 blocks - increase = showService.getData().getMinMemoryIncrease(); - logger.info("Using " + serviceName + " service show " + "override for memory increase: " - + Math.floor(increase / 1024) + "Mb."); - } catch (NullPointerException e) { - logger.info("Frame has no associated services"); - } catch (EmptyResultDataAccessException e) { - logger.info(frame.show + " has no service override for " + serviceName + "."); - Service service = whiteboardDao.findService(serviceName); - increase = service.getMinMemoryIncrease(); - logger.info( - "Using service default for mem increase: " + Math.floor(increase / 1024) + "Mb."); - } + /* + * The amount of time to wait before unbooking another proc for subscription or job balancing. + */ + private static final int UNBOOK_EXPIRE_MS = 2500; - unbookProc = true; - jobManager.enableMemoryOptimizer(frame, false); - jobManager.increaseLayerMemoryRequirement(frame, proc.memoryReserved + increase); - logger.info("Increased mem usage to: " + (proc.memoryReserved + increase)); - } + /** + * Boolean to toggle if this class is accepting data or not. + */ + private boolean shutdown = false; - /* - * Check for local dispatching. - */ + /** + * Whether or not to satisfy dependents (*_ON_FRAME and *_ON_LAYER) only on Frame success + */ + private boolean satisfyDependOnlyOnFrameSuccess; - if (proc.isLocalDispatch) { + public boolean getSatisfyDependOnlyOnFrameSuccess() { + return satisfyDependOnlyOnFrameSuccess; + } - if (!bookingManager.hasLocalHostAssignment(proc)) { - logger.info("the proc " + proc + " no longer has a local assignment."); - unbookProc = true; - } - } - - /* - * An exit status of FAILED_LAUNCH (256) indicates that the frame could not be launched due to - * some unforeseen unrecoverable error that is not checked when the launch command is given. - * The most common cause of this is when the job log directory is removed before the job is - * complete. - * - * Frames that return a 256 are put Frame back into WAITING status - */ - - else if (report.getExitStatus() == FrameExitStatus.FAILED_LAUNCH_VALUE) { - logger.info("unbooking " + proc + " frame status was failed frame launch."); - unbookProc = true; - } - - else if (report.getHost().getNimbyLocked()) { - - if (!proc.isLocalDispatch) { - logger.info("unbooking " + proc + " was NIMBY locked."); - unbookProc = true; - } + public void setSatisfyDependOnlyOnFrameSuccess(boolean satisfyDependOnlyOnFrameSuccess) { + this.satisfyDependOnlyOnFrameSuccess = satisfyDependOnlyOnFrameSuccess; + } + + @Autowired + public FrameCompleteHandler(Environment env) { + this.env = env; + satisfyDependOnlyOnFrameSuccess = + env.getProperty("depend.satisfy_only_on_frame_success", Boolean.class, true); + } + + /** + * Handle the given FrameCompleteReport from RQD. + * + * @param report + */ + public void handleFrameCompleteReport(final FrameCompleteReport report) { - /* Update the NIMBY locked state */ - hostManager.setHostLock(proc, LockState.NIMBY_LOCKED, new Source("NIMBY")); - } else if (report.getHost().getFreeMem() < CueUtil.MB512) { - /* - * Unbook anything on a proc that has only 512MB of free memory left. - */ - logger.info("unbooking" + proc + " was low was memory "); - unbookProc = true; - } else if (dispatchSupport.isShowOverBurst(proc)) { /* - * Unbook the proc if the show is over burst. + * A boolean we're going to set to true if we can detect a corrupted data block in Oracle. */ - logger.info("show using proc " + proc + " is over burst."); - unbookProc = true; - } else if (!hostManager.isHostUp(proc)) { - - logger.info("the proc " + proc + " is not in the update state."); - unbookProc = true; - } else if (hostManager.isLocked(proc)) { - if (!proc.isLocalDispatch) { - logger.info("the proc " + proc + " is not in the open state."); - unbookProc = true; + if (isShutdown()) { + throw new RqdRetryReportException("Error processing the frame complete report, " + + "cuebot not accepting packets."); } - } else if (redirectManager.hasRedirect(proc)) { - logger.info("the proc " + proc + " has been redirected."); + try { + final VirtualProc proc = hostManager.getVirtualProc(report.getFrame().getResourceId()); + final DispatchJob job = jobManager.getDispatchJob(proc.getJobId()); + final LayerDetail layer = jobManager.getLayerDetail(report.getFrame().getLayerId()); + final FrameDetail frameDetail = + jobManager.getFrameDetail(report.getFrame().getFrameId()); + final DispatchFrame frame = jobManager.getDispatchFrame(report.getFrame().getFrameId()); + final FrameState newFrameState = determineFrameState(job, layer, frame, report); + final String key = proc.getJobId() + "_" + report.getFrame().getLayerId() + "_" + + report.getFrame().getFrameId(); + + // rqd is currently not able to report exit_signal=9 when a frame is killed by + // the OOM logic. The current solution sets exitStatus to + // Dispatcher.EXIT_STATUS_MEMORY_FAILURE before killing the frame, this enables + // auto-retrying frames affected by the logic when they report with a + // frameCompleteReport. This status retouch ensures a frame complete report is + // not able to override what has been set by the previous logic. + int exitStatus = report.getExitStatus(); + if (frameDetail.exitStatus == Dispatcher.EXIT_STATUS_MEMORY_FAILURE) { + exitStatus = frameDetail.exitStatus; + } - if (redirectManager.redirect(proc)) { - return; + if (dispatchSupport.stopFrame(frame, newFrameState, exitStatus, + report.getFrame().getMaxRss())) { + if (dispatcher.isTestMode()) { + // Database modifications on a threadpool cannot be captured by the test thread + handlePostFrameCompleteOperations(proc, report, job, frame, newFrameState, + frameDetail); + } else { + dispatchQueue.execute(new KeyRunnable(key) { + @Override + public void run() { + try { + handlePostFrameCompleteOperations(proc, report, job, frame, + newFrameState, frameDetail); + } catch (Exception e) { + logger.warn("Exception during handlePostFrameCompleteOperations " + + "in handleFrameCompleteReport" + + CueExceptionUtil.getStackTrace(e)); + } + } + }); + } + } else { + /* + * First check if we have a redirect. When a user retries a frame the proc is + * redirected back to the same job without checking any other properties. + */ + if (redirectManager.hasRedirect(proc)) { + dispatchQueue.execute(new KeyRunnable(key) { + @Override + public void run() { + try { + redirectManager.redirect(proc); + } catch (Exception e) { + logger.warn("Exception during redirect in handleFrameCompleteReport" + + CueExceptionUtil.getStackTrace(e)); + } + } + }); + } else { + dispatchQueue.execute(new KeyRunnable(key) { + @Override + public void run() { + try { + dispatchSupport.unbookProc(proc); + } catch (Exception e) { + logger.warn( + "Exception during unbookProc in handleFrameCompleteReport" + + CueExceptionUtil.getStackTrace(e)); + } + } + }); + } + } + } catch (EmptyResultDataAccessException e) { + /* + * Do not propagate this exception to RQD. This usually means the cue lost connectivity + * to the host and cleared out the record of the proc. If this is propagated back to + * RQD, RQD will keep retrying the operation forever. + */ + logger.info("failed to acquire data needed to " + "process completed frame: " + + report.getFrame().getFrameName() + " in job " + report.getFrame().getJobName() + + "," + e); + } catch (Exception e) { + + /* + * Everything else we kick back to RQD. + */ + logger.info("failed to acquire data needed " + "to process completed frame: " + + report.getFrame().getFrameName() + " in job " + report.getFrame().getJobName() + + "," + e); + + throw new RqdRetryReportException("error processing the frame complete " + + "report, sending retry message to RQD " + e, e); } - } + } - /* - * If the proc is unbooked at this point, then unbook it and return. - */ - if (unbookProc) { - dispatchSupport.unbookProc(proc); - return; - } + /** + * Handles frame complete operations other than the actual frame completing. + * + * Updates proc time usage counters. Drops dependencies. Sets jobs to the finished state. + * Optimizes layer memory requirements. Checks for other jobs that might need procs. Unbook proc + * if it needs to be moved. Check show subscription values. + * + * If the proc is not unbooked and moved, its re-dispatched onto the same job. + * + * @param proc + * @param report + * @param job + * @param frame + * @param newFrameState + */ + public void handlePostFrameCompleteOperations(VirtualProc proc, FrameCompleteReport report, + DispatchJob job, DispatchFrame frame, FrameState newFrameState, + FrameDetail frameDetail) { + try { - /* - * Check to see if the job the proc is currently assigned is still dispatchable. - */ - if (job.state.equals(JobState.FINISHED) - || !dispatchSupport.isJobDispatchable(job, proc.isLocalDispatch)) { + /* + * The default behavior is to keep the proc on the same job. + */ + boolean unbookProc = proc.unbooked; - logger.info("The " + job + " is no longer dispatchable."); - dispatchSupport.unbookProc(proc); + dispatchSupport.updateUsageCounters(frame, report.getExitStatus()); - /* - * Only rebook whole cores that have not been locally dispatched. Rebooking fractional can - * cause storms of booking requests that don't have a chance of finding a suitable frame to - * run. - */ - if (!proc.isLocalDispatch && proc.coresReserved >= 100 - && dispatchSupport.isCueBookable(job)) { + boolean isLayerComplete = false; - bookingQueue.execute( - new DispatchBookHost(hostManager.getDispatchHost(proc.getHostId()), dispatcher, env)); - } + if (newFrameState.equals(FrameState.SUCCEEDED) || (!satisfyDependOnlyOnFrameSuccess + && newFrameState.equals(FrameState.EATEN))) { + jobManagerSupport.satisfyWhatDependsOn(frame); + isLayerComplete = jobManager.isLayerComplete(frame); + if (isLayerComplete) { + jobManagerSupport.satisfyWhatDependsOn((LayerInterface) frame); + } + } - if (job.state.equals(JobState.FINISHED)) { - jsmMover.send(job); - } - return; - } - - /* - * If the job is marked unbookable and its over its minimum value, we check to see if the proc - * can be moved to a job that hasn't reached its minimum proc yet. - * - * This will handle show balancing in the future. - */ - - if (!proc.isLocalDispatch && randomNumber.nextInt(100) <= Dispatcher.UNBOOK_FREQUENCY - && System.currentTimeMillis() > lastUnbook.get()) { - - // First make sure all jobs have their min cores - // Then check for higher priority jobs - // If not, rebook this job - if (job.autoUnbook && proc.coresReserved >= 100) { - if (jobManager.isOverMinCores(job)) { - try { + if (newFrameState.equals(FrameState.SUCCEEDED) && !isLayerComplete) { + /* + * If the layer meets some specific criteria then try to update the minimum memory + * and tags so it can run on a wider variety of cores, namely older hardware. + */ + jobManager.optimizeLayer(frame, report.getFrame().getNumCores(), + report.getFrame().getMaxRss(), report.getRunTime()); + } + + /* + * The final frame can either be Succeeded or Eaten. If you only check if the frame is + * Succeeded before doing an isJobComplete check, then jobs that finish with the + * auto-eat flag enabled will not leave the cue. + */ + if (newFrameState.equals(FrameState.SUCCEEDED) + || newFrameState.equals(FrameState.EATEN)) { + if (jobManager.isJobComplete(job)) { + job.state = JobState.FINISHED; + jobManagerSupport.queueShutdownJob(job, new Source("natural"), false); + } + } + + /* + * Some exit statuses indicate that a frame was killed by the application due to a + * memory issue and should be retried. In this case, disable the optimizer and raise the + * memory by what is specified in the show's service override, service or 2GB. + */ + if (report.getExitStatus() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE + || report.getExitSignal() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE + || frameDetail.exitStatus == Dispatcher.EXIT_STATUS_MEMORY_FAILURE + || report.getExitStatus() == Dispatcher.DOCKER_EXIT_STATUS_MEMORY_FAILURE) { + long increase = CueUtil.GB2; + + // since there can be multiple services, just going for the + // first service (primary) + String serviceName = ""; + try { + serviceName = frame.services.split(",")[0]; + ServiceOverride showService = whiteboardDao + .getServiceOverride(showDao.findShowDetail(frame.show), serviceName); + // increase override is stored in Kb format so convert to Mb + // for easier reading. Note: Kb->Mb conversion uses 1024 blocks + increase = showService.getData().getMinMemoryIncrease(); + logger.info("Using " + serviceName + " service show " + + "override for memory increase: " + Math.floor(increase / 1024) + + "Mb."); + } catch (NullPointerException e) { + logger.info("Frame has no associated services"); + } catch (EmptyResultDataAccessException e) { + logger.info(frame.show + " has no service override for " + serviceName + "."); + Service service = whiteboardDao.findService(serviceName); + increase = service.getMinMemoryIncrease(); + logger.info("Using service default for mem increase: " + + Math.floor(increase / 1024) + "Mb."); + } + + unbookProc = true; + jobManager.enableMemoryOptimizer(frame, false); + jobManager.increaseLayerMemoryRequirement(frame, proc.memoryReserved + increase); + logger.info("Increased mem usage to: " + (proc.memoryReserved + increase)); + } + + /* + * Check for local dispatching. + */ - boolean unbook = dispatchSupport.findUnderProcedJob(job, proc); + if (proc.isLocalDispatch) { - if (!unbook) { - JobDetail jobDetail = jobManager.getJobDetail(job.id); - unbook = dispatchSupport.higherPriorityJobExists(jobDetail, proc); - } + if (!bookingManager.hasLocalHostAssignment(proc)) { + logger.info("the proc " + proc + " no longer has a local assignment."); + unbookProc = true; + } + } - if (unbook) { + /* + * An exit status of FAILED_LAUNCH (256) indicates that the frame could not be launched + * due to some unforeseen unrecoverable error that is not checked when the launch + * command is given. The most common cause of this is when the job log directory is + * removed before the job is complete. + * + * Frames that return a 256 are put Frame back into WAITING status + */ + + else if (report.getExitStatus() == FrameExitStatus.FAILED_LAUNCH_VALUE) { + logger.info("unbooking " + proc + " frame status was failed frame launch."); + unbookProc = true; + } - // Set a new time to allow unbooking. - lastUnbook.set(System.currentTimeMillis() + UNBOOK_EXPIRE_MS); + else if (report.getHost().getNimbyLocked()) { + + if (!proc.isLocalDispatch) { + logger.info("unbooking " + proc + " was NIMBY locked."); + unbookProc = true; + } + + /* Update the NIMBY locked state */ + hostManager.setHostLock(proc, LockState.NIMBY_LOCKED, new Source("NIMBY")); + } else if (report.getHost().getFreeMem() < CueUtil.MB512) { + /* + * Unbook anything on a proc that has only 512MB of free memory left. + */ + logger.info("unbooking" + proc + " was low was memory "); + unbookProc = true; + } else if (dispatchSupport.isShowOverBurst(proc)) { + /* + * Unbook the proc if the show is over burst. + */ + logger.info("show using proc " + proc + " is over burst."); + unbookProc = true; + } else if (!hostManager.isHostUp(proc)) { + + logger.info("the proc " + proc + " is not in the update state."); + unbookProc = true; + } else if (hostManager.isLocked(proc)) { + if (!proc.isLocalDispatch) { + logger.info("the proc " + proc + " is not in the open state."); + unbookProc = true; + } + } else if (redirectManager.hasRedirect(proc)) { + + logger.info("the proc " + proc + " has been redirected."); + + if (redirectManager.redirect(proc)) { + return; + } + } - logger.info("Transfering " + proc); + /* + * If the proc is unbooked at this point, then unbook it and return. + */ + if (unbookProc) { dispatchSupport.unbookProc(proc); + return; + } - DispatchHost host = hostManager.getDispatchHost(proc.getHostId()); + /* + * Check to see if the job the proc is currently assigned is still dispatchable. + */ + if (job.state.equals(JobState.FINISHED) + || !dispatchSupport.isJobDispatchable(job, proc.isLocalDispatch)) { - bookingQueue.execute(new DispatchBookHost(host, dispatcher, env)); + logger.info("The " + job + " is no longer dispatchable."); + dispatchSupport.unbookProc(proc); + + /* + * Only rebook whole cores that have not been locally dispatched. Rebooking + * fractional can cause storms of booking requests that don't have a chance of + * finding a suitable frame to run. + */ + if (!proc.isLocalDispatch && proc.coresReserved >= 100 + && dispatchSupport.isCueBookable(job)) { + + bookingQueue.execute(new DispatchBookHost( + hostManager.getDispatchHost(proc.getHostId()), dispatcher, env)); + } + + if (job.state.equals(JobState.FINISHED)) { + jsmMover.send(job); + } return; - } - } catch (JobLookupException e) { - // wasn't able to find new job } - } + + /* + * If the job is marked unbookable and its over its minimum value, we check to see if + * the proc can be moved to a job that hasn't reached its minimum proc yet. + * + * This will handle show balancing in the future. + */ + + if (!proc.isLocalDispatch && randomNumber.nextInt(100) <= Dispatcher.UNBOOK_FREQUENCY + && System.currentTimeMillis() > lastUnbook.get()) { + + // First make sure all jobs have their min cores + // Then check for higher priority jobs + // If not, rebook this job + if (job.autoUnbook && proc.coresReserved >= 100) { + if (jobManager.isOverMinCores(job)) { + try { + + boolean unbook = dispatchSupport.findUnderProcedJob(job, proc); + + if (!unbook) { + JobDetail jobDetail = jobManager.getJobDetail(job.id); + unbook = dispatchSupport.higherPriorityJobExists(jobDetail, proc); + } + + if (unbook) { + + // Set a new time to allow unbooking. + lastUnbook.set(System.currentTimeMillis() + UNBOOK_EXPIRE_MS); + + logger.info("Transfering " + proc); + dispatchSupport.unbookProc(proc); + + DispatchHost host = hostManager.getDispatchHost(proc.getHostId()); + + bookingQueue.execute(new DispatchBookHost(host, dispatcher, env)); + return; + } + } catch (JobLookupException e) { + // wasn't able to find new job + } + } + } + } + + if (newFrameState.equals(FrameState.WAITING) + || newFrameState.equals(FrameState.SUCCEEDED)) { + + /* + * Check for stranded cores on the host. + */ + if (!proc.isLocalDispatch && dispatchSupport.hasStrandedCores(proc) + && jobManager.isLayerThreadable(frame) + && dispatchSupport.isJobBookable(job)) { + + int stranded_cores = hostManager.getStrandedCoreUnits(proc); + if (stranded_cores >= 100) { + + DispatchHost host = hostManager.getDispatchHost(proc.getHostId()); + dispatchSupport.strandCores(host, stranded_cores); + dispatchSupport.unbookProc(proc); + bookingQueue.execute(new DispatchBookHost(host, job, dispatcher, env)); + return; + } + } + + // Book the next frame of this job on the same proc + if (proc.isLocalDispatch) { + dispatchQueue.execute(new DispatchNextFrame(job, proc, localDispatcher)); + } else { + dispatchQueue.execute(new DispatchNextFrame(job, proc, dispatcher)); + } + } else { + dispatchSupport.unbookProc(proc, "frame state was " + newFrameState.toString()); + } + } catch (Exception e) { + /* + * At this point, the proc has no place to go. Since we've run into an error its best to + * just unbook it. You can't handle this with a roll back because the record existed + * before any transactions started. + */ + logger.warn("An error occured when procssing " + "frame complete message, " + + CueExceptionUtil.getStackTrace(e)); + try { + dispatchSupport.unbookProc(proc, + "an error occured when procssing frame complete message."); + } catch (EmptyResultDataAccessException ee) { + logger.info("Failed to find proc to unbook after frame " + "complete message " + + CueExceptionUtil.getStackTrace(ee)); + } } - } + } - if (newFrameState.equals(FrameState.WAITING) || newFrameState.equals(FrameState.SUCCEEDED)) { + /** + * Determines the new FrameState for a frame based on values contained in the + * FrameCompleteReport + * + * If the frame is Waiting or Eaten, then it was manually set to that status before the frame + * was killed. In that case whatever the current state in the DB is the one we want to use. + * + * If the frame status is dead or the frame.exitStatus is a non-zero value, and the frame has + * been retried job.maxRetries times, then the frame is Dead. If the frame has an exit status of + * 256, that is a non-retry status, the frame is dead. + * + * Assuming the two previous checks are not true, then a non-zero exit status sets the frame + * back to Waiting, while a zero status sets the frame to Succeeded. + * + * @param job + * @param frame + * @param report + * @return + */ + public static final FrameState determineFrameState(DispatchJob job, LayerDetail layer, + DispatchFrame frame, FrameCompleteReport report) { - /* - * Check for stranded cores on the host. - */ - if (!proc.isLocalDispatch && dispatchSupport.hasStrandedCores(proc) - && jobManager.isLayerThreadable(frame) && dispatchSupport.isJobBookable(job)) { - - int stranded_cores = hostManager.getStrandedCoreUnits(proc); - if (stranded_cores >= 100) { - - DispatchHost host = hostManager.getDispatchHost(proc.getHostId()); - dispatchSupport.strandCores(host, stranded_cores); - dispatchSupport.unbookProc(proc); - bookingQueue.execute(new DispatchBookHost(host, job, dispatcher, env)); - return; - } + if (EnumSet.of(FrameState.WAITING, FrameState.EATEN).contains(frame.state)) { + return frame.state; } + // Checks for frames that have reached max retries. + else if (frame.state.equals(FrameState.DEAD)) { + if (job.autoEat) { + return FrameState.EATEN; + } else { + return FrameState.DEPEND; + } + } else if (report.getExitStatus() != 0) { + + long r = System.currentTimeMillis() / 1000; + long lastUpdate = (r - report.getFrame().getLluTime()) / 60; + + FrameState newState = FrameState.WAITING; + if (report.getExitStatus() == FrameExitStatus.SKIP_RETRY_VALUE + || (job.maxRetries != 0 && report.getExitSignal() == 119)) { + report = FrameCompleteReport.newBuilder(report) + .setExitStatus(FrameExitStatus.SKIP_RETRY_VALUE).build(); + newState = FrameState.WAITING; + // exemption code 256 + } else if ((report.getExitStatus() == FrameExitStatus.FAILED_LAUNCH_VALUE + || report.getExitSignal() == FrameExitStatus.FAILED_LAUNCH_VALUE) + && (frame.retries < job.maxRetries)) { + report = FrameCompleteReport.newBuilder(report) + .setExitStatus(report.getExitStatus()).build(); + newState = FrameState.WAITING; + } else if (job.autoEat) { + newState = FrameState.EATEN; + // ETC Time out and LLU timeout + } else if (layer.timeout_llu != 0 && report.getFrame().getLluTime() != 0 + && lastUpdate > (layer.timeout_llu - 1)) { + newState = FrameState.DEAD; + } else if (layer.timeout != 0 && report.getRunTime() > layer.timeout * 60) { + newState = FrameState.DEAD; + } else if (report.getRunTime() > Dispatcher.FRAME_TIME_NO_RETRY) { + newState = FrameState.DEAD; + } else if (frame.retries >= job.maxRetries) { + if (!(report.getExitStatus() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE + || report.getExitSignal() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE + || report.getExitStatus() == Dispatcher.DOCKER_EXIT_STATUS_MEMORY_FAILURE)) + newState = FrameState.DEAD; + } - // Book the next frame of this job on the same proc - if (proc.isLocalDispatch) { - dispatchQueue.execute(new DispatchNextFrame(job, proc, localDispatcher)); + return newState; } else { - dispatchQueue.execute(new DispatchNextFrame(job, proc, dispatcher)); + return FrameState.SUCCEEDED; } - } else { - dispatchSupport.unbookProc(proc, "frame state was " + newFrameState.toString()); - } - } catch (Exception e) { - /* - * At this point, the proc has no place to go. Since we've run into an error its best to just - * unbook it. You can't handle this with a roll back because the record existed before any - * transactions started. - */ - logger.warn("An error occured when procssing " + "frame complete message, " - + CueExceptionUtil.getStackTrace(e)); - try { - dispatchSupport.unbookProc(proc, "an error occured when procssing frame complete message."); - } catch (EmptyResultDataAccessException ee) { - logger.info("Failed to find proc to unbook after frame " + "complete message " - + CueExceptionUtil.getStackTrace(ee)); - } - } - } - - /** - * Determines the new FrameState for a frame based on values contained in the FrameCompleteReport - * - * If the frame is Waiting or Eaten, then it was manually set to that status before the frame was - * killed. In that case whatever the current state in the DB is the one we want to use. - * - * If the frame status is dead or the frame.exitStatus is a non-zero value, and the frame has been - * retried job.maxRetries times, then the frame is Dead. If the frame has an exit status of 256, - * that is a non-retry status, the frame is dead. - * - * Assuming the two previous checks are not true, then a non-zero exit status sets the frame back - * to Waiting, while a zero status sets the frame to Succeeded. - * - * @param job - * @param frame - * @param report - * @return - */ - public static final FrameState determineFrameState(DispatchJob job, LayerDetail layer, - DispatchFrame frame, FrameCompleteReport report) { - - if (EnumSet.of(FrameState.WAITING, FrameState.EATEN).contains(frame.state)) { - return frame.state; - } - // Checks for frames that have reached max retries. - else if (frame.state.equals(FrameState.DEAD)) { - if (job.autoEat) { - return FrameState.EATEN; - } else { - return FrameState.DEPEND; - } - } else if (report.getExitStatus() != 0) { - - long r = System.currentTimeMillis() / 1000; - long lastUpdate = (r - report.getFrame().getLluTime()) / 60; - - FrameState newState = FrameState.WAITING; - if (report.getExitStatus() == FrameExitStatus.SKIP_RETRY_VALUE - || (job.maxRetries != 0 && report.getExitSignal() == 119)) { - report = FrameCompleteReport.newBuilder(report) - .setExitStatus(FrameExitStatus.SKIP_RETRY_VALUE).build(); - newState = FrameState.WAITING; - // exemption code 256 - } else if ((report.getExitStatus() == FrameExitStatus.FAILED_LAUNCH_VALUE - || report.getExitSignal() == FrameExitStatus.FAILED_LAUNCH_VALUE) - && (frame.retries < job.maxRetries)) { - report = - FrameCompleteReport.newBuilder(report).setExitStatus(report.getExitStatus()).build(); - newState = FrameState.WAITING; - } else if (job.autoEat) { - newState = FrameState.EATEN; - // ETC Time out and LLU timeout - } else if (layer.timeout_llu != 0 && report.getFrame().getLluTime() != 0 - && lastUpdate > (layer.timeout_llu - 1)) { - newState = FrameState.DEAD; - } else if (layer.timeout != 0 && report.getRunTime() > layer.timeout * 60) { - newState = FrameState.DEAD; - } else if (report.getRunTime() > Dispatcher.FRAME_TIME_NO_RETRY) { - newState = FrameState.DEAD; - } else if (frame.retries >= job.maxRetries) { - if (!(report.getExitStatus() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE - || report.getExitSignal() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE - || report.getExitStatus() == Dispatcher.DOCKER_EXIT_STATUS_MEMORY_FAILURE)) - newState = FrameState.DEAD; - } - - return newState; - } else { - return FrameState.SUCCEEDED; - } - } - - public boolean isShutdown() { - return shutdown; - } - - public synchronized void shutdown() { - logger.info("Shutting down FrameCompleteHandler."); - shutdown = true; - } - - public HostManager getHostManager() { - return hostManager; - } - - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; - } - - public JobManager getJobManager() { - return jobManager; - } - - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } - - public RedirectManager getRedirectManager() { - return redirectManager; - } - - public void setRedirectManager(RedirectManager redirectManager) { - this.redirectManager = redirectManager; - } - - public DispatchQueue getDispatchQueue() { - return dispatchQueue; - } - - public void setDispatchQueue(DispatchQueue dispatchQueue) { - this.dispatchQueue = dispatchQueue; - } - - public BookingQueue getBookingQueue() { - return bookingQueue; - } - - public void setBookingQueue(BookingQueue bookingQueue) { - this.bookingQueue = bookingQueue; - } - - public Dispatcher getDispatcher() { - return dispatcher; - } - - public void setDispatcher(Dispatcher dispatcher) { - this.dispatcher = dispatcher; - } - - public JobManagerSupport getJobManagerSupport() { - return jobManagerSupport; - } - - public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { - this.jobManagerSupport = jobManagerSupport; - } - - public DispatchSupport getDispatchSupport() { - return dispatchSupport; - } - - public void setDispatchSupport(DispatchSupport dispatchSupport) { - this.dispatchSupport = dispatchSupport; - } - - public Dispatcher getLocalDispatcher() { - return localDispatcher; - } - - public void setLocalDispatcher(Dispatcher localDispatcher) { - this.localDispatcher = localDispatcher; - } - - public BookingManager getBookingManager() { - return bookingManager; - } - - public void setBookingManager(BookingManager bookingManager) { - this.bookingManager = bookingManager; - } - - public JmsMover getJmsMover() { - return jsmMover; - } + } + + public boolean isShutdown() { + return shutdown; + } + + public synchronized void shutdown() { + logger.info("Shutting down FrameCompleteHandler."); + shutdown = true; + } + + public HostManager getHostManager() { + return hostManager; + } + + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public RedirectManager getRedirectManager() { + return redirectManager; + } + + public void setRedirectManager(RedirectManager redirectManager) { + this.redirectManager = redirectManager; + } + + public DispatchQueue getDispatchQueue() { + return dispatchQueue; + } + + public void setDispatchQueue(DispatchQueue dispatchQueue) { + this.dispatchQueue = dispatchQueue; + } + + public BookingQueue getBookingQueue() { + return bookingQueue; + } + + public void setBookingQueue(BookingQueue bookingQueue) { + this.bookingQueue = bookingQueue; + } + + public Dispatcher getDispatcher() { + return dispatcher; + } + + public void setDispatcher(Dispatcher dispatcher) { + this.dispatcher = dispatcher; + } + + public JobManagerSupport getJobManagerSupport() { + return jobManagerSupport; + } + + public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { + this.jobManagerSupport = jobManagerSupport; + } + + public DispatchSupport getDispatchSupport() { + return dispatchSupport; + } + + public void setDispatchSupport(DispatchSupport dispatchSupport) { + this.dispatchSupport = dispatchSupport; + } + + public Dispatcher getLocalDispatcher() { + return localDispatcher; + } - public void setJmsMover(JmsMover jsmMover) { - this.jsmMover = jsmMover; - } + public void setLocalDispatcher(Dispatcher localDispatcher) { + this.localDispatcher = localDispatcher; + } - public WhiteboardDao getWhiteboardDao() { - return whiteboardDao; - } - - public void setWhiteboardDao(WhiteboardDao whiteboardDao) { - this.whiteboardDao = whiteboardDao; - } - - public ServiceDao getServiceDao() { - return serviceDao; - } + public BookingManager getBookingManager() { + return bookingManager; + } - public void setServiceDao(ServiceDao serviceDao) { - this.serviceDao = serviceDao; - } + public void setBookingManager(BookingManager bookingManager) { + this.bookingManager = bookingManager; + } - public ShowDao getShowDao() { - return showDao; - } + public JmsMover getJmsMover() { + return jsmMover; + } - public void setShowDao(ShowDao showDao) { - this.showDao = showDao; - } + public void setJmsMover(JmsMover jsmMover) { + this.jsmMover = jsmMover; + } + + public WhiteboardDao getWhiteboardDao() { + return whiteboardDao; + } + + public void setWhiteboardDao(WhiteboardDao whiteboardDao) { + this.whiteboardDao = whiteboardDao; + } + + public ServiceDao getServiceDao() { + return serviceDao; + } + + public void setServiceDao(ServiceDao serviceDao) { + this.serviceDao = serviceDao; + } + + public ShowDao getShowDao() { + return showDao; + } + + public void setShowDao(ShowDao showDao) { + this.showDao = showDao; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameLookupException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameLookupException.java index 038530499..878ab0fa2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameLookupException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameLookupException.java @@ -20,23 +20,23 @@ @SuppressWarnings("serial") public class FrameLookupException extends SpcueRuntimeException { - public FrameLookupException() { - // TODO Auto-generated constructor stub - } - - public FrameLookupException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } - - public FrameLookupException(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public FrameLookupException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public FrameLookupException() { + // TODO Auto-generated constructor stub + } + + public FrameLookupException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + + public FrameLookupException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public FrameLookupException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameReservationException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameReservationException.java index 989c4c435..dba980111 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameReservationException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameReservationException.java @@ -20,23 +20,23 @@ @SuppressWarnings("serial") public class FrameReservationException extends SpcueRuntimeException { - public FrameReservationException() { - // TODO Auto-generated constructor stub - } - - public FrameReservationException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } - - public FrameReservationException(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public FrameReservationException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public FrameReservationException() { + // TODO Auto-generated constructor stub + } + + public FrameReservationException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + + public FrameReservationException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public FrameReservationException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HealthyThreadPool.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HealthyThreadPool.java index 0a6301044..7ea426d42 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HealthyThreadPool.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HealthyThreadPool.java @@ -23,195 +23,199 @@ * */ public class HealthyThreadPool extends ThreadPoolExecutor { - // The service need s to be unhealthy for this period of time to report - private static final Logger logger = LogManager.getLogger("HEALTH"); - // Threshold to consider healthy or unhealthy - private final int healthThreshold; - private final int poolSize; - private final int minUnhealthyPeriodMin; - private final QueueRejectCounter rejectCounter = new QueueRejectCounter(); - private final Cache taskCache; - private final String name; - private Date lastCheck = new Date(); - private boolean wasHealthy = true; - protected final AtomicBoolean isShutdown = new AtomicBoolean(false); - private final int baseSleepTimeMillis; - - /** - * Start a thread pool - * - * @param name For logging purposes - * @param healthThreshold Percentage that should be available to consider healthy - * @param minUnhealthyPeriodMin Period in min to consider a queue unhealthy - * @param poolSize how many jobs can be queued - * @param threadsMinimum Minimum number of threads - * @param threadsMaximum Maximum number of threads to grow to - */ - public HealthyThreadPool(String name, int healthThreshold, int minUnhealthyPeriodMin, - int poolSize, int threadsMinimum, int threadsMaximum) { - this(name, healthThreshold, minUnhealthyPeriodMin, poolSize, threadsMinimum, threadsMaximum, 0); - } - - /** - * Start a thread pool - * - * @param name For logging purposes - * @param healthThreshold Percentage that should be available to consider healthy - * @param minUnhealthyPeriodMin Period in min to consider a queue unhealthy - * @param poolSize how many jobs can be queued - * @param threadsMinimum Minimum number of threads - * @param threadsMaximum Maximum number of threads to grow to - * @param baseSleepTimeMillis Time a thread should sleep when the service is not under pressure - */ - public HealthyThreadPool(String name, int healthThreshold, int minUnhealthyPeriodMin, - int poolSize, int threadsMinimum, int threadsMaximum, int baseSleepTimeMillis) { - super(threadsMinimum, threadsMaximum, 10, TimeUnit.SECONDS, - new LinkedBlockingQueue(poolSize)); - - logger.debug(name + ": Starting a new HealthyThreadPool"); - this.name = name; - this.healthThreshold = healthThreshold; - this.poolSize = poolSize; - this.minUnhealthyPeriodMin = minUnhealthyPeriodMin; - this.baseSleepTimeMillis = baseSleepTimeMillis; - this.setRejectedExecutionHandler(rejectCounter); - - this.taskCache = CacheBuilder.newBuilder().expireAfterWrite(3, TimeUnit.MINUTES) - // Invalidate entries that got executed by the threadPool and lost their - // reference - .weakValues().concurrencyLevel(threadsMaximum).build(); - } - - public void execute(KeyRunnable r) { - if (isShutdown.get()) { - logger.info(name + ": Task ignored, queue on hold or shutdown"); - return; + // The service need s to be unhealthy for this period of time to report + private static final Logger logger = LogManager.getLogger("HEALTH"); + // Threshold to consider healthy or unhealthy + private final int healthThreshold; + private final int poolSize; + private final int minUnhealthyPeriodMin; + private final QueueRejectCounter rejectCounter = new QueueRejectCounter(); + private final Cache taskCache; + private final String name; + private Date lastCheck = new Date(); + private boolean wasHealthy = true; + protected final AtomicBoolean isShutdown = new AtomicBoolean(false); + private final int baseSleepTimeMillis; + + /** + * Start a thread pool + * + * @param name For logging purposes + * @param healthThreshold Percentage that should be available to consider healthy + * @param minUnhealthyPeriodMin Period in min to consider a queue unhealthy + * @param poolSize how many jobs can be queued + * @param threadsMinimum Minimum number of threads + * @param threadsMaximum Maximum number of threads to grow to + */ + public HealthyThreadPool(String name, int healthThreshold, int minUnhealthyPeriodMin, + int poolSize, int threadsMinimum, int threadsMaximum) { + this(name, healthThreshold, minUnhealthyPeriodMin, poolSize, threadsMinimum, threadsMaximum, + 0); } - if (taskCache.getIfPresent(r.getKey()) == null) { - taskCache.put(r.getKey(), r); - super.execute(r); + + /** + * Start a thread pool + * + * @param name For logging purposes + * @param healthThreshold Percentage that should be available to consider healthy + * @param minUnhealthyPeriodMin Period in min to consider a queue unhealthy + * @param poolSize how many jobs can be queued + * @param threadsMinimum Minimum number of threads + * @param threadsMaximum Maximum number of threads to grow to + * @param baseSleepTimeMillis Time a thread should sleep when the service is not under pressure + */ + public HealthyThreadPool(String name, int healthThreshold, int minUnhealthyPeriodMin, + int poolSize, int threadsMinimum, int threadsMaximum, int baseSleepTimeMillis) { + super(threadsMinimum, threadsMaximum, 10, TimeUnit.SECONDS, + new LinkedBlockingQueue(poolSize)); + + logger.debug(name + ": Starting a new HealthyThreadPool"); + this.name = name; + this.healthThreshold = healthThreshold; + this.poolSize = poolSize; + this.minUnhealthyPeriodMin = minUnhealthyPeriodMin; + this.baseSleepTimeMillis = baseSleepTimeMillis; + this.setRejectedExecutionHandler(rejectCounter); + + this.taskCache = CacheBuilder.newBuilder().expireAfterWrite(3, TimeUnit.MINUTES) + // Invalidate entries that got executed by the threadPool and lost their + // reference + .weakValues().concurrencyLevel(threadsMaximum).build(); } - } - - public long getRejectedTaskCount() { - return rejectCounter.getRejectCount(); - } - - /** - * Monitor if the queue is unhealthy for MIN_UNHEALTHY_PERIOD_MIN - * - * If unhealthy, the service will start the shutdown process and the caller is responsible for - * starting a new instance after the lock on awaitTermination is released. - */ - protected boolean shutdownUnhealthy() throws InterruptedException { - Date now = new Date(); - if (diffInMinutes(lastCheck, now) > minUnhealthyPeriodMin) { - this.wasHealthy = healthCheck(); - this.lastCheck = now; + + public void execute(KeyRunnable r) { + if (isShutdown.get()) { + logger.info(name + ": Task ignored, queue on hold or shutdown"); + return; + } + if (taskCache.getIfPresent(r.getKey()) == null) { + taskCache.put(r.getKey(), r); + super.execute(r); + } } - if (healthCheck() || wasHealthy) { - logger.debug(name + ": healthy (" + "Remaining Capacity: " - + this.getQueue().remainingCapacity() + ", Running: " + this.getActiveCount() - + ", Total Executed: " + this.getCompletedTaskCount() + ")"); - return true; - } else if (isShutdown.get()) { - logger.warn("Queue shutting down"); - return false; - } else { - logger.warn(name + ": unhealthy, starting shutdown)"); - threadDump(); - - isShutdown.set(true); - super.shutdownNow(); - logger.warn(name + ": Awaiting unhealthy queue termination"); - if (super.awaitTermination(1, TimeUnit.MINUTES)) { - logger.info(name + ": Terminated successfully"); - } else { - logger.warn(name + ": Failed to terminate"); - } - // Threads will eventually terminate, proceed - taskCache.invalidateAll(); - return false; + public long getRejectedTaskCount() { + return rejectCounter.getRejectCount(); } - } - private void threadDump() { - ThreadMXBean mx = ManagementFactory.getThreadMXBean(); - for (ThreadInfo info : mx.dumpAllThreads(true, true)) { - logger.debug(info.toString()); + /** + * Monitor if the queue is unhealthy for MIN_UNHEALTHY_PERIOD_MIN + * + * If unhealthy, the service will start the shutdown process and the caller is responsible for + * starting a new instance after the lock on awaitTermination is released. + */ + protected boolean shutdownUnhealthy() throws InterruptedException { + Date now = new Date(); + if (diffInMinutes(lastCheck, now) > minUnhealthyPeriodMin) { + this.wasHealthy = healthCheck(); + this.lastCheck = now; + } + + if (healthCheck() || wasHealthy) { + logger.debug(name + ": healthy (" + "Remaining Capacity: " + + this.getQueue().remainingCapacity() + ", Running: " + this.getActiveCount() + + ", Total Executed: " + this.getCompletedTaskCount() + ")"); + return true; + } else if (isShutdown.get()) { + logger.warn("Queue shutting down"); + return false; + } else { + logger.warn(name + ": unhealthy, starting shutdown)"); + threadDump(); + + isShutdown.set(true); + super.shutdownNow(); + logger.warn(name + ": Awaiting unhealthy queue termination"); + if (super.awaitTermination(1, TimeUnit.MINUTES)) { + logger.info(name + ": Terminated successfully"); + } else { + logger.warn(name + ": Failed to terminate"); + } + // Threads will eventually terminate, proceed + taskCache.invalidateAll(); + return false; + } } - } - - private static long diffInMinutes(Date dateStart, Date dateEnd) { - return TimeUnit.MINUTES.convert(dateEnd.getTime() - dateStart.getTime(), TimeUnit.MILLISECONDS); - } - - /** - * Lowers the sleep time as the queue grows. - * - * @return - */ - public int sleepTime() { - if (!isShutdown.get()) { - int sleep = (int) (baseSleepTimeMillis - - (((this.getQueue().size() / (float) this.poolSize) * baseSleepTimeMillis)) * 2); - if (sleep < 0) { - sleep = 0; - } - return sleep; - } else { - return 0; + + private void threadDump() { + ThreadMXBean mx = ManagementFactory.getThreadMXBean(); + for (ThreadInfo info : mx.dumpAllThreads(true, true)) { + logger.debug(info.toString()); + } + } + + private static long diffInMinutes(Date dateStart, Date dateEnd) { + return TimeUnit.MINUTES.convert(dateEnd.getTime() - dateStart.getTime(), + TimeUnit.MILLISECONDS); } - } - - @Override - protected void beforeExecute(Thread t, Runnable r) { - super.beforeExecute(t, r); - if (isShutdown()) { - this.remove(r); - } else { - if (baseSleepTimeMillis > 0) { - try { - Thread.sleep(sleepTime()); - } catch (InterruptedException e) { - logger.info(name + ": booking queue was interrupted."); + + /** + * Lowers the sleep time as the queue grows. + * + * @return + */ + public int sleepTime() { + if (!isShutdown.get()) { + int sleep = (int) (baseSleepTimeMillis + - (((this.getQueue().size() / (float) this.poolSize) * baseSleepTimeMillis)) + * 2); + if (sleep < 0) { + sleep = 0; + } + return sleep; + } else { + return 0; } - } } - } - - @Override - protected void afterExecute(Runnable r, Throwable t) { - super.afterExecute(r, t); - - // Invalidate cache to avoid having to wait for GC to mark processed entries - // collectible - KeyRunnable h = (KeyRunnable) r; - taskCache.invalidate(h.getKey()); - } - - protected boolean healthCheck() { - return (this.getQueue().remainingCapacity() > 0) - || (getRejectedTaskCount() < this.poolSize / healthThreshold); - } - - public void shutdown() { - if (!isShutdown.getAndSet(true)) { - logger.info("Shutting down thread pool " + name + ", currently " + getActiveCount() - + " active threads."); - final long startTime = System.currentTimeMillis(); - while (this.getQueue().size() != 0 && this.getActiveCount() != 0) { - try { - if (System.currentTimeMillis() - startTime > 10000) { - throw new InterruptedException(name + " thread pool failed to shutdown properly"); - } - Thread.sleep(250); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - break; + + @Override + protected void beforeExecute(Thread t, Runnable r) { + super.beforeExecute(t, r); + if (isShutdown()) { + this.remove(r); + } else { + if (baseSleepTimeMillis > 0) { + try { + Thread.sleep(sleepTime()); + } catch (InterruptedException e) { + logger.info(name + ": booking queue was interrupted."); + } + } + } + } + + @Override + protected void afterExecute(Runnable r, Throwable t) { + super.afterExecute(r, t); + + // Invalidate cache to avoid having to wait for GC to mark processed entries + // collectible + KeyRunnable h = (KeyRunnable) r; + taskCache.invalidate(h.getKey()); + } + + protected boolean healthCheck() { + return (this.getQueue().remainingCapacity() > 0) + || (getRejectedTaskCount() < this.poolSize / healthThreshold); + } + + public void shutdown() { + if (!isShutdown.getAndSet(true)) { + logger.info("Shutting down thread pool " + name + ", currently " + getActiveCount() + + " active threads."); + final long startTime = System.currentTimeMillis(); + while (this.getQueue().size() != 0 && this.getActiveCount() != 0) { + try { + if (System.currentTimeMillis() - startTime > 10000) { + throw new InterruptedException( + name + " thread pool failed to shutdown properly"); + } + Thread.sleep(250); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + break; + } + } } - } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportHandler.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportHandler.java index 4f0a55bc0..86524fdf3 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportHandler.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportHandler.java @@ -70,998 +70,1018 @@ public class HostReportHandler { - private static final Logger logger = LogManager.getLogger(HostReportHandler.class); - - private BookingManager bookingManager; - private HostManager hostManager; - private BookingQueue bookingQueue; - private ThreadPoolExecutor reportQueue; - private ThreadPoolExecutor killQueue; - private DispatchSupport dispatchSupport; - private Dispatcher dispatcher; - private Dispatcher localDispatcher; - private RqdClient rqdClient; - private JobManager jobManager; - private JobDao jobDao; - private LayerDao layerDao; - - @Autowired - private Environment env; - - @Autowired - private CommentManager commentManager; - - @Autowired - private PrometheusMetricsCollector prometheusMetrics; - - // Comment constants - private static final String SUBJECT_COMMENT_FULL_TEMP_DIR = - "Host set to REPAIR for not having enough storage " - + "space on the temporary directory (mcp)"; - private static final String CUEBOT_COMMENT_USER = "cuebot"; - private static final String WINDOWS_OS = "Windows"; - - // A cache to store kill requests and count the number - // of occurrences. - // The cache expires after write to avoid growing unbounded. If a request for a - // host-frame doesn't appear - // for a period of time, the entry will be removed. - Cache killRequestCounterCache = CacheBuilder.newBuilder() - .expireAfterWrite(FRAME_KILL_CACHE_EXPIRE_AFTER_WRITE_MINUTES, TimeUnit.MINUTES).build(); - - /** - * Boolean to toggle if this class is accepting data or not. - */ - public boolean shutdown = false; - - /** - * Return true if this handler is not accepting packets anymore. - * - * @return - */ - public boolean isShutdown() { - return shutdown; - } - - /** - * Shutdown this handler so it no longer accepts packets. Any call to queue a host report will - * throw an exception. - */ - public synchronized void shutdown() { - logger.info("Shutting down HostReportHandler."); - shutdown = true; - } - - /** - * Queues up the given boot report. - * - * @param report - */ - public void queueBootReport(BootReport report) { - if (isShutdown()) { - throw new RqdRetryReportException( - "Error processing host report. Cuebot not " + "accepting packets."); + private static final Logger logger = LogManager.getLogger(HostReportHandler.class); + + private BookingManager bookingManager; + private HostManager hostManager; + private BookingQueue bookingQueue; + private ThreadPoolExecutor reportQueue; + private ThreadPoolExecutor killQueue; + private DispatchSupport dispatchSupport; + private Dispatcher dispatcher; + private Dispatcher localDispatcher; + private RqdClient rqdClient; + private JobManager jobManager; + private JobDao jobDao; + private LayerDao layerDao; + + @Autowired + private Environment env; + + @Autowired + private CommentManager commentManager; + + @Autowired + private PrometheusMetricsCollector prometheusMetrics; + + // Comment constants + private static final String SUBJECT_COMMENT_FULL_TEMP_DIR = + "Host set to REPAIR for not having enough storage " + + "space on the temporary directory (mcp)"; + private static final String CUEBOT_COMMENT_USER = "cuebot"; + private static final String WINDOWS_OS = "Windows"; + + // A cache to store kill requests and count the number + // of occurrences. + // The cache expires after write to avoid growing unbounded. If a request for a + // host-frame doesn't appear + // for a period of time, the entry will be removed. + Cache killRequestCounterCache = CacheBuilder.newBuilder() + .expireAfterWrite(FRAME_KILL_CACHE_EXPIRE_AFTER_WRITE_MINUTES, TimeUnit.MINUTES) + .build(); + + /** + * Boolean to toggle if this class is accepting data or not. + */ + public boolean shutdown = false; + + /** + * Return true if this handler is not accepting packets anymore. + * + * @return + */ + public boolean isShutdown() { + return shutdown; } - reportQueue.execute(new DispatchHandleHostReport(report, this)); - } - - /** - * Queues up the given host report. - * - * @param report - */ - public void queueHostReport(HostReport report) { - if (isShutdown()) { - throw new RqdRetryReportException( - "Error processing host report. Cuebot not " + "accepting packets."); + + /** + * Shutdown this handler so it no longer accepts packets. Any call to queue a host report will + * throw an exception. + */ + public synchronized void shutdown() { + logger.info("Shutting down HostReportHandler."); + shutdown = true; } - reportQueue.execute(new DispatchHandleHostReport(report, this)); - } - - public void handleHostReport(HostReport report, boolean isBoot) { - long startTime = System.currentTimeMillis(); - try { - long swapOut = 0; - if (report.getHost().getAttributesMap().containsKey("swapout")) { - swapOut = Integer.parseInt(report.getHost().getAttributesMap().get("swapout")); - if (swapOut > 0) - logger.info(report.getHost().getName() + " swapout: " - + report.getHost().getAttributesMap().get("swapout")); - } - - DispatchHost host; - RenderHost rhost = report.getHost(); - try { - host = hostManager.findDispatchHost(rhost.getName()); - hostManager.setHostStatistics(host, rhost.getTotalMem(), rhost.getFreeMem(), - rhost.getTotalSwap(), rhost.getFreeSwap(), rhost.getTotalMcp(), rhost.getFreeMcp(), - rhost.getTotalGpuMem(), rhost.getFreeGpuMem(), rhost.getLoad(), - new Timestamp(rhost.getBootTime() * 1000l), rhost.getAttributesMap().get("SP_OS")); - - // Both logics are conflicting, only change hardware state if - // there was no need for a tempDirStorage state change - if (!changeStateForTempDirStorage(host, report.getHost())) { - changeHardwareState(host, report.getHost().getState(), isBoot); - } - changeNimbyState(host, report.getHost()); - /** - * This should only happen at boot time or it will fight with the dispatcher over row locks. - */ - if (isBoot) { - hostManager.setHostResources(host, report); + /** + * Queues up the given boot report. + * + * @param report + */ + public void queueBootReport(BootReport report) { + if (isShutdown()) { + throw new RqdRetryReportException( + "Error processing host report. Cuebot not " + "accepting packets."); } + reportQueue.execute(new DispatchHandleHostReport(report, this)); + } - dispatchSupport.determineIdleCores(host, report.getHost().getLoad()); - } catch (DataAccessException dae) { - logger.info("Unable to find host " + rhost.getName() + "," + dae + " , creating host."); - // TODO: Skip adding it if the host name is over 30 characters - - host = hostManager.createHost(report); - } catch (Exception e) { - logger.warn("Error processing HostReport, " + e); - return; - } - - /* - * Verify all the frames in the report are valid. Frames that are not valid are removed. - */ - List runningFrames = verifyRunningFrameInfo(report); - - /* - * Updates memory usage for the proc, frames, jobs, and layers. And LLU time for the frames. - */ - updateMemoryUsageAndLluTime(runningFrames); - - /* - * kill frames that have over run. - */ - killTimedOutFrames(runningFrames, report.getHost().getName()); - - /* - * Prevent OOM (Out-Of-Memory) issues on the host and manage frame reserved memory - */ - handleMemoryUsage(host, report.getHost(), runningFrames); - - /* - * The checks are done in order of least CPU intensive to most CPU intensive, saving checks - * that hit the DB for last. - * - * These are done so we don't populate the booking queue with a bunch of hosts that can't be - * booked. - */ - String msg = null; - boolean hasLocalJob = bookingManager.hasLocalHostAssignment(host); - int coresToReserve = host.handleNegativeCoresRequirement(Dispatcher.CORE_POINTS_RESERVED_MIN); - - if (hasLocalJob) { - List lcas = bookingManager.getLocalHostAssignment(host); - for (LocalHostAssignment lca : lcas) { - bookingManager.removeInactiveLocalHostAssignment(lca); - } - } - long memReservedMin = - env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); - - if (!isTempDirStorageEnough(report.getHost().getTotalMcp(), report.getHost().getFreeMcp(), - host.getOs())) { - msg = String.format( - "%s doesn't have enough free space in the temporary directory (mcp), %dMB", host.name, - (report.getHost().getFreeMcp() / 1024)); - } else if (coresToReserve <= 0 || host.idleCores < Dispatcher.CORE_POINTS_RESERVED_MIN) { - msg = String.format("%s doesn't have enough idle cores, %d needs %d", host.name, - host.idleCores, Dispatcher.CORE_POINTS_RESERVED_MIN); - } else if (host.idleMemory < memReservedMin) { - msg = String.format("%s doesn't have enough idle memory, %d needs %d", host.name, - host.idleMemory, memReservedMin); - } else if (report.getHost().getFreeMem() < CueUtil.MB512) { - msg = String.format("%s doesn't have enough free system mem, %d needs %d", host.name, - report.getHost().getFreeMem(), memReservedMin); - } else if (!host.hardwareState.equals(HardwareState.UP)) { - msg = host + " is not in the Up state."; - } else if (host.lockState.equals(LockState.LOCKED)) { - msg = host + " is locked."; - } else if (report.getHost().getNimbyLocked()) { - if (!hasLocalJob) { - msg = host + " is NIMBY locked."; - } - } else if (!dispatchSupport.isCueBookable(host)) { - msg = "The cue has no pending jobs"; - } - - /* - * If a message was set, the host is not bookable. Log the message and move on. - */ - if (msg != null) { - logger.trace(msg); - } else { - // check again. The dangling local host assignment could be removed. - hasLocalJob = bookingManager.hasLocalHostAssignment(host); - - /* - * Check to see if a local job has been assigned. - */ - if (hasLocalJob) { - if (!bookingManager.hasResourceDeficit(host)) { - bookingQueue.execute(new DispatchBookHostLocal(host, localDispatcher)); - } - return; + /** + * Queues up the given host report. + * + * @param report + */ + public void queueHostReport(HostReport report) { + if (isShutdown()) { + throw new RqdRetryReportException( + "Error processing host report. Cuebot not " + "accepting packets."); } + reportQueue.execute(new DispatchHandleHostReport(report, this)); + } - /* - * Check if the host prefers a show. If it does , dispatch to that show first. - */ - if (hostManager.isPreferShow(host)) { - bookingQueue.execute( - new DispatchBookHost(host, hostManager.getPreferredShow(host), dispatcher, env)); - return; + public void handleHostReport(HostReport report, boolean isBoot) { + long startTime = System.currentTimeMillis(); + try { + long swapOut = 0; + if (report.getHost().getAttributesMap().containsKey("swapout")) { + swapOut = Integer.parseInt(report.getHost().getAttributesMap().get("swapout")); + if (swapOut > 0) + logger.info(report.getHost().getName() + " swapout: " + + report.getHost().getAttributesMap().get("swapout")); + } + + DispatchHost host; + RenderHost rhost = report.getHost(); + try { + host = hostManager.findDispatchHost(rhost.getName()); + hostManager.setHostStatistics(host, rhost.getTotalMem(), rhost.getFreeMem(), + rhost.getTotalSwap(), rhost.getFreeSwap(), rhost.getTotalMcp(), + rhost.getFreeMcp(), rhost.getTotalGpuMem(), rhost.getFreeGpuMem(), + rhost.getLoad(), new Timestamp(rhost.getBootTime() * 1000l), + rhost.getAttributesMap().get("SP_OS")); + + // Both logics are conflicting, only change hardware state if + // there was no need for a tempDirStorage state change + if (!changeStateForTempDirStorage(host, report.getHost())) { + changeHardwareState(host, report.getHost().getState(), isBoot); + } + changeNimbyState(host, report.getHost()); + + /** + * This should only happen at boot time or it will fight with the dispatcher over + * row locks. + */ + if (isBoot) { + hostManager.setHostResources(host, report); + } + + dispatchSupport.determineIdleCores(host, report.getHost().getLoad()); + } catch (DataAccessException dae) { + logger.info( + "Unable to find host " + rhost.getName() + "," + dae + " , creating host."); + // TODO: Skip adding it if the host name is over 30 characters + + host = hostManager.createHost(report); + } catch (Exception e) { + logger.warn("Error processing HostReport, " + e); + return; + } + + /* + * Verify all the frames in the report are valid. Frames that are not valid are removed. + */ + List runningFrames = verifyRunningFrameInfo(report); + + /* + * Updates memory usage for the proc, frames, jobs, and layers. And LLU time for the + * frames. + */ + updateMemoryUsageAndLluTime(runningFrames); + + /* + * kill frames that have over run. + */ + killTimedOutFrames(runningFrames, report.getHost().getName()); + + /* + * Prevent OOM (Out-Of-Memory) issues on the host and manage frame reserved memory + */ + handleMemoryUsage(host, report.getHost(), runningFrames); + + /* + * The checks are done in order of least CPU intensive to most CPU intensive, saving + * checks that hit the DB for last. + * + * These are done so we don't populate the booking queue with a bunch of hosts that + * can't be booked. + */ + String msg = null; + boolean hasLocalJob = bookingManager.hasLocalHostAssignment(host); + int coresToReserve = + host.handleNegativeCoresRequirement(Dispatcher.CORE_POINTS_RESERVED_MIN); + + if (hasLocalJob) { + List lcas = bookingManager.getLocalHostAssignment(host); + for (LocalHostAssignment lca : lcas) { + bookingManager.removeInactiveLocalHostAssignment(lca); + } + } + long memReservedMin = + env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); + + if (!isTempDirStorageEnough(report.getHost().getTotalMcp(), + report.getHost().getFreeMcp(), host.getOs())) { + msg = String.format( + "%s doesn't have enough free space in the temporary directory (mcp), %dMB", + host.name, (report.getHost().getFreeMcp() / 1024)); + } else if (coresToReserve <= 0 + || host.idleCores < Dispatcher.CORE_POINTS_RESERVED_MIN) { + msg = String.format("%s doesn't have enough idle cores, %d needs %d", host.name, + host.idleCores, Dispatcher.CORE_POINTS_RESERVED_MIN); + } else if (host.idleMemory < memReservedMin) { + msg = String.format("%s doesn't have enough idle memory, %d needs %d", host.name, + host.idleMemory, memReservedMin); + } else if (report.getHost().getFreeMem() < CueUtil.MB512) { + msg = String.format("%s doesn't have enough free system mem, %d needs %d", + host.name, report.getHost().getFreeMem(), memReservedMin); + } else if (!host.hardwareState.equals(HardwareState.UP)) { + msg = host + " is not in the Up state."; + } else if (host.lockState.equals(LockState.LOCKED)) { + msg = host + " is locked."; + } else if (report.getHost().getNimbyLocked()) { + if (!hasLocalJob) { + msg = host + " is NIMBY locked."; + } + } else if (!dispatchSupport.isCueBookable(host)) { + msg = "The cue has no pending jobs"; + } + + /* + * If a message was set, the host is not bookable. Log the message and move on. + */ + if (msg != null) { + logger.trace(msg); + } else { + // check again. The dangling local host assignment could be removed. + hasLocalJob = bookingManager.hasLocalHostAssignment(host); + + /* + * Check to see if a local job has been assigned. + */ + if (hasLocalJob) { + if (!bookingManager.hasResourceDeficit(host)) { + bookingQueue.execute(new DispatchBookHostLocal(host, localDispatcher)); + } + return; + } + + /* + * Check if the host prefers a show. If it does , dispatch to that show first. + */ + if (hostManager.isPreferShow(host)) { + bookingQueue.execute(new DispatchBookHost(host, + hostManager.getPreferredShow(host), dispatcher, env)); + return; + } + + bookingQueue.execute(new DispatchBookHost(host, dispatcher, env)); + } + } finally { + if (reportQueue.getQueue().size() > 0 || System.currentTimeMillis() - startTime > 100) { + /* + * Write a log if the host report takes a long time to process. + */ + CueUtil.logDuration(startTime, + "host report " + report.getHost().getName() + " with " + + report.getFramesCount() + " running frames, waiting: " + + reportQueue.getQueue().size()); + } } - - bookingQueue.execute(new DispatchBookHost(host, dispatcher, env)); - } - } finally { - if (reportQueue.getQueue().size() > 0 || System.currentTimeMillis() - startTime > 100) { - /* - * Write a log if the host report takes a long time to process. - */ - CueUtil.logDuration(startTime, - "host report " + report.getHost().getName() + " with " + report.getFramesCount() - + " running frames, waiting: " + reportQueue.getQueue().size()); - } } - } - - /** - * Check if a reported temp storage size and availability is enough for running a job - * - * Use dispatcher.min_available_temp_storage_percentage (opencue.properties) to define what's the - * accepted threshold. Providing hostOs is necessary as this feature is currently not available on - * Windows hosts - * - * @param tempTotalStorage Total storage on the temp directory - * @param tempFreeStorage Free storage on the temp directory - * @param hostOs Reported operational systems - * @return - */ - private boolean isTempDirStorageEnough(Long tempTotalStorage, Long tempFreeStorage, - String[] hostOs) { - // The minimum amount of free space in the temporary directory to book a host - int minAvailableTempPercentage = - env.getRequiredProperty("dispatcher.min_available_temp_storage_percentage", Integer.class); - - return (minAvailableTempPercentage == -1 || - // It is safe to assume multiple OSs imply windows is not the base OS, - // threfore Windows will always report a single hostOs - (hostOs.length == 1 && hostOs[0].equalsIgnoreCase(WINDOWS_OS)) - || (((tempFreeStorage * 100.0) / tempTotalStorage) >= minAvailableTempPercentage)); - } - - /** - * Update the hardware state property. - * - * If a host pings in with a different hardware state than what is currently in the DB, the state - * is updated. If the hardware state is Rebooting or RebootWhenIdle, then state can only be - * updated with a boot report. If the state is Repair, then state is never updated via RQD. - * - * @param host - * @param reportState - * @param isBoot - */ - private void changeHardwareState(DispatchHost host, HardwareState reportState, boolean isBoot) { - // If the states are the same there is no reason to do this update. - if (host.hardwareState.equals(reportState)) { - return; + + /** + * Check if a reported temp storage size and availability is enough for running a job + * + * Use dispatcher.min_available_temp_storage_percentage (opencue.properties) to define what's + * the accepted threshold. Providing hostOs is necessary as this feature is currently not + * available on Windows hosts + * + * @param tempTotalStorage Total storage on the temp directory + * @param tempFreeStorage Free storage on the temp directory + * @param hostOs Reported operational systems + * @return + */ + private boolean isTempDirStorageEnough(Long tempTotalStorage, Long tempFreeStorage, + String[] hostOs) { + // The minimum amount of free space in the temporary directory to book a host + int minAvailableTempPercentage = env.getRequiredProperty( + "dispatcher.min_available_temp_storage_percentage", Integer.class); + + return (minAvailableTempPercentage == -1 || + // It is safe to assume multiple OSs imply windows is not the base OS, + // threfore Windows will always report a single hostOs + (hostOs.length == 1 && hostOs[0].equalsIgnoreCase(WINDOWS_OS)) + || (((tempFreeStorage * 100.0) / tempTotalStorage) >= minAvailableTempPercentage)); } - switch (host.hardwareState) { - case DOWN: - hostManager.setHostState(host, HardwareState.UP); - host.hardwareState = HardwareState.UP; - break; - case REBOOTING: - case REBOOT_WHEN_IDLE: - // Rebooting hosts only change to UP when processing a boot report - if (isBoot) { - hostManager.setHostState(host, HardwareState.UP); - host.hardwareState = HardwareState.UP; + /** + * Update the hardware state property. + * + * If a host pings in with a different hardware state than what is currently in the DB, the + * state is updated. If the hardware state is Rebooting or RebootWhenIdle, then state can only + * be updated with a boot report. If the state is Repair, then state is never updated via RQD. + * + * @param host + * @param reportState + * @param isBoot + */ + private void changeHardwareState(DispatchHost host, HardwareState reportState, boolean isBoot) { + // If the states are the same there is no reason to do this update. + if (host.hardwareState.equals(reportState)) { + return; + } + + switch (host.hardwareState) { + case DOWN: + hostManager.setHostState(host, HardwareState.UP); + host.hardwareState = HardwareState.UP; + break; + case REBOOTING: + case REBOOT_WHEN_IDLE: + // Rebooting hosts only change to UP when processing a boot report + if (isBoot) { + hostManager.setHostState(host, HardwareState.UP); + host.hardwareState = HardwareState.UP; + } + break; + case REPAIR: + // Do not change the state of the host if its in a repair state. + break; + default: + hostManager.setHostState(host, reportState); + host.hardwareState = reportState; + break; } - break; - case REPAIR: - // Do not change the state of the host if its in a repair state. - break; - default: - hostManager.setHostState(host, reportState); - host.hardwareState = reportState; - break; - } - } - - /** - * Prevent cue frames from booking on hosts with full temporary directories. - * - * Change host state to REPAIR or UP according to the amount of free space in the temporary - * directory: - Set the host state to REPAIR, when the amount of free space in the temporary - * directory is less than the minimum required. - Set the host state to UP, when the amount of - * free space in the temporary directory is greater or equal to the minimum required and the host - * has a comment with subject: SUBJECT_COMMENT_FULL_TEMP_DIR - * - * @param host - * @param reportHost - * @return - */ - private boolean changeStateForTempDirStorage(DispatchHost host, RenderHost reportHost) { - // The minimum amount of free space in the temporary directory to book a host - int minAvailableTempPercentage = - env.getRequiredProperty("dispatcher.min_available_temp_storage_percentage", Integer.class); - - // Prevent cue frames from booking on hosts with full temporary directories - boolean hasEnoughTempStorage = - isTempDirStorageEnough(reportHost.getTotalMcp(), reportHost.getFreeMcp(), host.getOs()); - if (!hasEnoughTempStorage && host.hardwareState == HardwareState.UP) { - // Insert a comment indicating that the Host status = Repair with reason = Full - // temporary directory - CommentDetail c = new CommentDetail(); - c.subject = SUBJECT_COMMENT_FULL_TEMP_DIR; - c.user = CUEBOT_COMMENT_USER; - c.timestamp = null; - long requiredTempMb = - (long) (((minAvailableTempPercentage / 100.0) * reportHost.getTotalMcp()) / 1024); - c.message = - "Host " + host.getName() + " marked as REPAIR. The current amount of free space in the " - + "temporary directory (mcp) is " + (reportHost.getFreeMcp() / 1024) - + "MB. It must have at least " + ((requiredTempMb)) - + "MB of free space in temporary directory"; - commentManager.addComment(host, c); - - // Set the host state to REPAIR - hostManager.setHostState(host, HardwareState.REPAIR); - host.hardwareState = HardwareState.REPAIR; - - return true; - } else if (hasEnoughTempStorage && host.hardwareState == HardwareState.REPAIR) { - // Check if the host with REPAIR status has comments with - // subject=SUBJECT_COMMENT_FULL_TEMP_DIR and - // user=CUEBOT_COMMENT_USER and delete the comments, if they exist - boolean commentsDeleted = commentManager.deleteCommentByHostUserAndSubject(host, - CUEBOT_COMMENT_USER, SUBJECT_COMMENT_FULL_TEMP_DIR); - - if (commentsDeleted) { - // Set the host state to UP - hostManager.setHostState(host, HardwareState.UP); - host.hardwareState = HardwareState.UP; - return true; - } - } - return false; - } - - /** - * Changes the NIMBY lock state. If the DB indicates a NIMBY lock but RQD does not, then the host - * is unlocked. If the DB indicates the host is not locked but RQD indicates it is, the host is - * locked. - * - * @param host - * @param rh - */ - private void changeNimbyState(DispatchHost host, RenderHost rh) { - if (rh.getNimbyLocked()) { - if (host.lockState.equals(LockState.OPEN)) { - host.lockState = LockState.NIMBY_LOCKED; - hostManager.setHostLock(host, LockState.NIMBY_LOCKED, new Source("NIMBY")); - } - } else { - if (host.lockState.equals(LockState.NIMBY_LOCKED)) { - host.lockState = LockState.OPEN; - hostManager.setHostLock(host, LockState.OPEN, new Source("NIMBY")); - } - } - } - - /** - * Changes the Lock state of the host. Looks at the number of locked cores and sets host to locked - * if all cores are locked. - * - * @param host DispatchHost - * @param coreInfo CoreDetail - */ - private void changeLockState(DispatchHost host, CoreDetail coreInfo) { - if (host.lockState == LockState.LOCKED) { - if (coreInfo.getLockedCores() < coreInfo.getTotalCores()) { - host.lockState = LockState.OPEN; - hostManager.setHostLock(host, LockState.OPEN, new Source("cores")); - } - } else if (coreInfo.getLockedCores() >= coreInfo.getTotalCores()) { - host.lockState = LockState.LOCKED; - hostManager.setHostLock(host, LockState.LOCKED, new Source("cores")); } - } - - /** - * Prevent host from entering an OOM state where oom-killer might start killing important OS - * processes and frames start using SWAP memory The kill logic will kick in one of the following - * conditions is met: - Host has less than oom_max_safe_used_physical_memory_threshold memory - * available and less than oom_max_safe_used_swap_memory_threshold swap available - A frame is - * taking more than OOM_FRAME_OVERBOARD_PERCENT of what it had reserved For frames that are using - * more than they had reserved but not above the threshold, negotiate expanding the reservations - * with other frames on the same host - * - * @param dispatchHost - * @param report - */ - private void handleMemoryUsage(final DispatchHost dispatchHost, RenderHost renderHost, - List runningFrames) { - // Don't keep memory balances on nimby hosts and hosts with invalid memory - // information - if (dispatchHost.isNimby || renderHost.getTotalMem() <= 0) { - return; + + /** + * Prevent cue frames from booking on hosts with full temporary directories. + * + * Change host state to REPAIR or UP according to the amount of free space in the temporary + * directory: - Set the host state to REPAIR, when the amount of free space in the temporary + * directory is less than the minimum required. - Set the host state to UP, when the amount of + * free space in the temporary directory is greater or equal to the minimum required and the + * host has a comment with subject: SUBJECT_COMMENT_FULL_TEMP_DIR + * + * @param host + * @param reportHost + * @return + */ + private boolean changeStateForTempDirStorage(DispatchHost host, RenderHost reportHost) { + // The minimum amount of free space in the temporary directory to book a host + int minAvailableTempPercentage = env.getRequiredProperty( + "dispatcher.min_available_temp_storage_percentage", Integer.class); + + // Prevent cue frames from booking on hosts with full temporary directories + boolean hasEnoughTempStorage = isTempDirStorageEnough(reportHost.getTotalMcp(), + reportHost.getFreeMcp(), host.getOs()); + if (!hasEnoughTempStorage && host.hardwareState == HardwareState.UP) { + // Insert a comment indicating that the Host status = Repair with reason = Full + // temporary directory + CommentDetail c = new CommentDetail(); + c.subject = SUBJECT_COMMENT_FULL_TEMP_DIR; + c.user = CUEBOT_COMMENT_USER; + c.timestamp = null; + long requiredTempMb = + (long) (((minAvailableTempPercentage / 100.0) * reportHost.getTotalMcp()) + / 1024); + c.message = "Host " + host.getName() + + " marked as REPAIR. The current amount of free space in the " + + "temporary directory (mcp) is " + (reportHost.getFreeMcp() / 1024) + + "MB. It must have at least " + ((requiredTempMb)) + + "MB of free space in temporary directory"; + commentManager.addComment(host, c); + + // Set the host state to REPAIR + hostManager.setHostState(host, HardwareState.REPAIR); + host.hardwareState = HardwareState.REPAIR; + + return true; + } else if (hasEnoughTempStorage && host.hardwareState == HardwareState.REPAIR) { + // Check if the host with REPAIR status has comments with + // subject=SUBJECT_COMMENT_FULL_TEMP_DIR and + // user=CUEBOT_COMMENT_USER and delete the comments, if they exist + boolean commentsDeleted = commentManager.deleteCommentByHostUserAndSubject(host, + CUEBOT_COMMENT_USER, SUBJECT_COMMENT_FULL_TEMP_DIR); + + if (commentsDeleted) { + // Set the host state to UP + hostManager.setHostState(host, HardwareState.UP); + host.hardwareState = HardwareState.UP; + return true; + } + } + return false; } - final double OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD = env.getRequiredProperty( - "dispatcher.oom_max_safe_used_physical_memory_threshold", Double.class); - final double OOM_MAX_SAFE_USED_SWAP_THRESHOLD = - env.getRequiredProperty("dispatcher.oom_max_safe_used_swap_memory_threshold", Double.class); - final double OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD = - env.getRequiredProperty("dispatcher.oom_frame_overboard_allowed_threshold", Double.class); - - Double physMemoryUsageRatio = renderHost.getTotalMem() > 0 - ? 1.0 - renderHost.getFreeMem() / (double) renderHost.getTotalMem() - : 0.0; - - Double swapMemoryUsageRatio = renderHost.getTotalSwap() > 0 - ? 1.0 - renderHost.getFreeSwap() / (double) renderHost.getTotalSwap() - : 0.0; - - // If checking for the swap threshold has been disabled, only memory usage is - // taken into consideration. - // If checking for memory has been disabled, checking for swap isolated is not - // safe, therefore disabled - boolean memoryWarning = false; - if (OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD > 0.0 && OOM_MAX_SAFE_USED_SWAP_THRESHOLD > 0.0 - && !physMemoryUsageRatio.isNaN() && !swapMemoryUsageRatio.isNaN()) { - memoryWarning = physMemoryUsageRatio > OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD - && swapMemoryUsageRatio > OOM_MAX_SAFE_USED_SWAP_THRESHOLD; - } else if (OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD > 0.0 && !physMemoryUsageRatio.isNaN()) { - memoryWarning = physMemoryUsageRatio > OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD; + /** + * Changes the NIMBY lock state. If the DB indicates a NIMBY lock but RQD does not, then the + * host is unlocked. If the DB indicates the host is not locked but RQD indicates it is, the + * host is locked. + * + * @param host + * @param rh + */ + private void changeNimbyState(DispatchHost host, RenderHost rh) { + if (rh.getNimbyLocked()) { + if (host.lockState.equals(LockState.OPEN)) { + host.lockState = LockState.NIMBY_LOCKED; + hostManager.setHostLock(host, LockState.NIMBY_LOCKED, new Source("NIMBY")); + } + } else { + if (host.lockState.equals(LockState.NIMBY_LOCKED)) { + host.lockState = LockState.OPEN; + hostManager.setHostLock(host, LockState.OPEN, new Source("NIMBY")); + } + } } - if (memoryWarning) { - logger.warn("Memory warning(" + renderHost.getName() + "): physMemoryRatio: " - + physMemoryUsageRatio + ", swapRatio: " + swapMemoryUsageRatio); - // Try to kill frames using swap memory as they are probably performing poorly - long swapUsed = renderHost.getTotalSwap() - renderHost.getFreeSwap(); - long maxSwapUsageAllowed = - (long) (renderHost.getTotalSwap() * OOM_MAX_SAFE_USED_SWAP_THRESHOLD); - - // Sort runningFrames bassed on how much swap they are using - runningFrames.sort(Comparator - .comparingLong((RunningFrameInfo frame) -> frame.getUsedSwapMemory()).reversed()); - - int killAttemptsRemaining = 5; - for (RunningFrameInfo frame : runningFrames) { - // Reached the first frame on the sorted list without swap usage - if (frame.getUsedSwapMemory() <= 0) { - break; + /** + * Changes the Lock state of the host. Looks at the number of locked cores and sets host to + * locked if all cores are locked. + * + * @param host DispatchHost + * @param coreInfo CoreDetail + */ + private void changeLockState(DispatchHost host, CoreDetail coreInfo) { + if (host.lockState == LockState.LOCKED) { + if (coreInfo.getLockedCores() < coreInfo.getTotalCores()) { + host.lockState = LockState.OPEN; + hostManager.setHostLock(host, LockState.OPEN, new Source("cores")); + } + } else if (coreInfo.getLockedCores() >= coreInfo.getTotalCores()) { + host.lockState = LockState.LOCKED; + hostManager.setHostLock(host, LockState.LOCKED, new Source("cores")); } - if (killProcForMemory(frame.getFrameId(), renderHost.getName(), KillCause.HostUnderOom)) { - swapUsed -= frame.getUsedSwapMemory(); - logger.info("Memory warning(" + renderHost.getName() + "): " + "Killing frame on " - + frame.getJobName() + "." + frame.getFrameName() + ", using too much swap."); + } + + /** + * Prevent host from entering an OOM state where oom-killer might start killing important OS + * processes and frames start using SWAP memory The kill logic will kick in one of the following + * conditions is met: - Host has less than oom_max_safe_used_physical_memory_threshold memory + * available and less than oom_max_safe_used_swap_memory_threshold swap available - A frame is + * taking more than OOM_FRAME_OVERBOARD_PERCENT of what it had reserved For frames that are + * using more than they had reserved but not above the threshold, negotiate expanding the + * reservations with other frames on the same host + * + * @param dispatchHost + * @param report + */ + private void handleMemoryUsage(final DispatchHost dispatchHost, RenderHost renderHost, + List runningFrames) { + // Don't keep memory balances on nimby hosts and hosts with invalid memory + // information + if (dispatchHost.isNimby || renderHost.getTotalMem() <= 0) { + return; } - killAttemptsRemaining -= 1; - if (killAttemptsRemaining <= 0 || swapUsed <= maxSwapUsageAllowed) { - break; + final double OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD = env.getRequiredProperty( + "dispatcher.oom_max_safe_used_physical_memory_threshold", Double.class); + final double OOM_MAX_SAFE_USED_SWAP_THRESHOLD = env.getRequiredProperty( + "dispatcher.oom_max_safe_used_swap_memory_threshold", Double.class); + final double OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD = env.getRequiredProperty( + "dispatcher.oom_frame_overboard_allowed_threshold", Double.class); + + Double physMemoryUsageRatio = renderHost.getTotalMem() > 0 + ? 1.0 - renderHost.getFreeMem() / (double) renderHost.getTotalMem() + : 0.0; + + Double swapMemoryUsageRatio = renderHost.getTotalSwap() > 0 + ? 1.0 - renderHost.getFreeSwap() / (double) renderHost.getTotalSwap() + : 0.0; + + // If checking for the swap threshold has been disabled, only memory usage is + // taken into consideration. + // If checking for memory has been disabled, checking for swap isolated is not + // safe, therefore disabled + boolean memoryWarning = false; + if (OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD > 0.0 && OOM_MAX_SAFE_USED_SWAP_THRESHOLD > 0.0 + && !physMemoryUsageRatio.isNaN() && !swapMemoryUsageRatio.isNaN()) { + memoryWarning = physMemoryUsageRatio > OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD + && swapMemoryUsageRatio > OOM_MAX_SAFE_USED_SWAP_THRESHOLD; + } else if (OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD > 0.0 && !physMemoryUsageRatio.isNaN()) { + memoryWarning = physMemoryUsageRatio > OOM_MAX_SAFE_USED_PHYSICAL_THRESHOLD; } - } - } else { - // When no mass cleaning was required, check for frames going overboard - // if frames didn't go overboard, manage its reservations trying to increase - // them accordingly - for (final RunningFrameInfo frame : runningFrames) { - if (OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD > 0 && isFrameOverboard(frame)) { - if (!killFrameOverusingMemory(frame, dispatchHost.getName())) { - logger.warn("Frame " + frame.getJobName() + "." + frame.getFrameName() - + " is overboard but could not be killed"); - } + + if (memoryWarning) { + logger.warn("Memory warning(" + renderHost.getName() + "): physMemoryRatio: " + + physMemoryUsageRatio + ", swapRatio: " + swapMemoryUsageRatio); + // Try to kill frames using swap memory as they are probably performing poorly + long swapUsed = renderHost.getTotalSwap() - renderHost.getFreeSwap(); + long maxSwapUsageAllowed = + (long) (renderHost.getTotalSwap() * OOM_MAX_SAFE_USED_SWAP_THRESHOLD); + + // Sort runningFrames bassed on how much swap they are using + runningFrames.sort( + Comparator.comparingLong((RunningFrameInfo frame) -> frame.getUsedSwapMemory()) + .reversed()); + + int killAttemptsRemaining = 5; + for (RunningFrameInfo frame : runningFrames) { + // Reached the first frame on the sorted list without swap usage + if (frame.getUsedSwapMemory() <= 0) { + break; + } + if (killProcForMemory(frame.getFrameId(), renderHost.getName(), + KillCause.HostUnderOom)) { + swapUsed -= frame.getUsedSwapMemory(); + logger.info("Memory warning(" + renderHost.getName() + "): " + + "Killing frame on " + frame.getJobName() + "." + frame.getFrameName() + + ", using too much swap."); + } + + killAttemptsRemaining -= 1; + if (killAttemptsRemaining <= 0 || swapUsed <= maxSwapUsageAllowed) { + break; + } + } } else { - handleMemoryReservations(frame); + // When no mass cleaning was required, check for frames going overboard + // if frames didn't go overboard, manage its reservations trying to increase + // them accordingly + for (final RunningFrameInfo frame : runningFrames) { + if (OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD > 0 && isFrameOverboard(frame)) { + if (!killFrameOverusingMemory(frame, dispatchHost.getName())) { + logger.warn("Frame " + frame.getJobName() + "." + frame.getFrameName() + + " is overboard but could not be killed"); + } + } else { + handleMemoryReservations(frame); + } + } } - } } - } - public enum KillCause { - FrameOverboard("This frame is using more memory than it had reserved."), HostUnderOom( - "Frame killed by host under OOM pressure"), FrameTimedOut( - "Frame timed out"), FrameLluTimedOut("Frame LLU timed out"), FrameVerificationFailure( - "Frame failed to be verified on the database"); + public enum KillCause { + FrameOverboard("This frame is using more memory than it had reserved."), HostUnderOom( + "Frame killed by host under OOM pressure"), FrameTimedOut( + "Frame timed out"), FrameLluTimedOut( + "Frame LLU timed out"), FrameVerificationFailure( + "Frame failed to be verified on the database"); - private final String message; + private final String message; - private KillCause(String message) { - this.message = message; - } + private KillCause(String message) { + this.message = message; + } - @Override - public String toString() { - return message; + @Override + public String toString() { + return message; + } } - } - private boolean killFrameOverusingMemory(RunningFrameInfo frame, String hostname) { - try { - VirtualProc proc = hostManager.getVirtualProc(frame.getResourceId()); - - // Don't mess with localDispatch procs - if (proc.isLocalDispatch) { - return false; - } - boolean killed = killProcForMemory(proc.frameId, hostname, KillCause.FrameOverboard); - if (killed) { - logger.info("Killing frame on " + frame.getJobName() + "." + frame.getFrameName() - + ", using too much memory."); - } - return killed; - } catch (EmptyResultDataAccessException e) { - return false; - } - } - - private boolean getKillClearance(String hostname, String frameId) { - String cacheKey = hostname + "-" + frameId; - final int FRAME_KILL_RETRY_LIMIT = - env.getRequiredProperty("dispatcher.frame_kill_retry_limit", Integer.class); - - // Cache frame+host receiving a killRequest and count how many times the request - // is being retried - // meaning rqd is probably failing at attempting to kill the related proc - long cachedCount; - try { - cachedCount = 1 + killRequestCounterCache.get(cacheKey, () -> 0L); - } catch (ExecutionException e) { - return false; - } - killRequestCounterCache.put(cacheKey, cachedCount); - if (cachedCount > FRAME_KILL_RETRY_LIMIT) { - // If the kill retry limit has been reached, notify prometheus of the issue and - // give up - if (!dispatcher.isTestMode()) { + private boolean killFrameOverusingMemory(RunningFrameInfo frame, String hostname) { try { - FrameInterface frame = jobManager.getFrame(frameId); - JobInterface job = jobManager.getJob(frame.getJobId()); - prometheusMetrics.incrementFrameKillFailureCounter(hostname, job.getName(), - frame.getName(), frameId); + VirtualProc proc = hostManager.getVirtualProc(frame.getResourceId()); + + // Don't mess with localDispatch procs + if (proc.isLocalDispatch) { + return false; + } + boolean killed = killProcForMemory(proc.frameId, hostname, KillCause.FrameOverboard); + if (killed) { + logger.info("Killing frame on " + frame.getJobName() + "." + frame.getFrameName() + + ", using too much memory."); + } + return killed; } catch (EmptyResultDataAccessException e) { - logger.info("Trying to kill a frame that no longer exists: host=" + hostname + " frameId=" - + frameId); + return false; } - } - return false; } - return true; - } - private boolean killProcForMemory(String frameId, String hostname, KillCause killCause) { - if (!getKillClearance(hostname, frameId)) { - return false; - } + private boolean getKillClearance(String hostname, String frameId) { + String cacheKey = hostname + "-" + frameId; + final int FRAME_KILL_RETRY_LIMIT = + env.getRequiredProperty("dispatcher.frame_kill_retry_limit", Integer.class); - FrameInterface frame = jobManager.getFrame(frameId); - if (dispatcher.isTestMode()) { - // Different threads don't share the same database state on the test environment - (new DispatchRqdKillFrameMemory(hostname, frame, killCause.toString(), rqdClient, - dispatchSupport, dispatcher.isTestMode())).run(); - } else { - try { - killQueue.execute(new DispatchRqdKillFrameMemory(hostname, frame, killCause.toString(), - rqdClient, dispatchSupport, dispatcher.isTestMode())); - prometheusMetrics.incrementFrameKilledCounter(hostname, killCause); - } catch (TaskRejectedException e) { - logger.warn("Unable to add a DispatchRqdKillFrame request, task rejected, " + e); - return false; - } + // Cache frame+host receiving a killRequest and count how many times the request + // is being retried + // meaning rqd is probably failing at attempting to kill the related proc + long cachedCount; + try { + cachedCount = 1 + killRequestCounterCache.get(cacheKey, () -> 0L); + } catch (ExecutionException e) { + return false; + } + killRequestCounterCache.put(cacheKey, cachedCount); + if (cachedCount > FRAME_KILL_RETRY_LIMIT) { + // If the kill retry limit has been reached, notify prometheus of the issue and + // give up + if (!dispatcher.isTestMode()) { + try { + FrameInterface frame = jobManager.getFrame(frameId); + JobInterface job = jobManager.getJob(frame.getJobId()); + prometheusMetrics.incrementFrameKillFailureCounter(hostname, job.getName(), + frame.getName(), frameId); + } catch (EmptyResultDataAccessException e) { + logger.info("Trying to kill a frame that no longer exists: host=" + hostname + + " frameId=" + frameId); + } + } + return false; + } + return true; } - DispatchSupport.killedOffenderProcs.incrementAndGet(); - return true; - } - private boolean killFrame(String frameId, String hostname, KillCause killCause) { - if (!getKillClearance(hostname, frameId)) { - return false; - } + private boolean killProcForMemory(String frameId, String hostname, KillCause killCause) { + if (!getKillClearance(hostname, frameId)) { + return false; + } - if (dispatcher.isTestMode()) { - // Different threads don't share the same database state on the test environment - (new DispatchRqdKillFrame(hostname, frameId, killCause.toString(), rqdClient)).run(); - } else { - try { - killQueue - .execute(new DispatchRqdKillFrame(hostname, frameId, killCause.toString(), rqdClient)); - prometheusMetrics.incrementFrameKilledCounter(hostname, killCause); - } catch (TaskRejectedException e) { - logger.warn("Unable to add a DispatchRqdKillFrame request, task rejected, " + e); - } - } - DispatchSupport.killedOffenderProcs.incrementAndGet(); - return true; - } - - /** - * Check frame memory usage comparing the amount used with the amount it had reserved - * - * @param frame - * @return - */ - private boolean isFrameOverboard(final RunningFrameInfo frame) { - final double OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD = - env.getRequiredProperty("dispatcher.oom_frame_overboard_allowed_threshold", Double.class); - - if (OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD < 0) { - return false; + FrameInterface frame = jobManager.getFrame(frameId); + if (dispatcher.isTestMode()) { + // Different threads don't share the same database state on the test environment + (new DispatchRqdKillFrameMemory(hostname, frame, killCause.toString(), rqdClient, + dispatchSupport, dispatcher.isTestMode())).run(); + } else { + try { + killQueue.execute(new DispatchRqdKillFrameMemory(hostname, frame, + killCause.toString(), rqdClient, dispatchSupport, dispatcher.isTestMode())); + prometheusMetrics.incrementFrameKilledCounter(hostname, killCause); + } catch (TaskRejectedException e) { + logger.warn("Unable to add a DispatchRqdKillFrame request, task rejected, " + e); + return false; + } + } + DispatchSupport.killedOffenderProcs.incrementAndGet(); + return true; } - double rss = (double) frame.getRss(); - double maxRss = (double) frame.getMaxRss(); - final double MAX_RSS_OVERBOARD_THRESHOLD = OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD * 2; - final double RSS_AVAILABLE_FOR_MAX_RSS_TRIGGER = 0.1; - - try { - VirtualProc proc = hostManager.getVirtualProc(frame.getResourceId()); - double reserved = (double) proc.memoryReserved; + private boolean killFrame(String frameId, String hostname, KillCause killCause) { + if (!getKillClearance(hostname, frameId)) { + return false; + } - // Last memory report is higher than the threshold - if (isOverboard(rss, reserved, OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD)) { + if (dispatcher.isTestMode()) { + // Different threads don't share the same database state on the test environment + (new DispatchRqdKillFrame(hostname, frameId, killCause.toString(), rqdClient)).run(); + } else { + try { + killQueue.execute(new DispatchRqdKillFrame(hostname, frameId, killCause.toString(), + rqdClient)); + prometheusMetrics.incrementFrameKilledCounter(hostname, killCause); + } catch (TaskRejectedException e) { + logger.warn("Unable to add a DispatchRqdKillFrame request, task rejected, " + e); + } + } + DispatchSupport.killedOffenderProcs.incrementAndGet(); return true; - } - // If rss is not overboard, handle the situation where the frame might be going - // overboard from - // time to time but the last report wasn't during a spike. For this case, - // consider a combination - // of rss and maxRss. maxRss > 2 * threshold and rss > 0.9 - else { - return (isOverboard(maxRss, reserved, MAX_RSS_OVERBOARD_THRESHOLD) - && isOverboard(rss, reserved, -RSS_AVAILABLE_FOR_MAX_RSS_TRIGGER)); - } - } catch (EmptyResultDataAccessException e) { - logger.info( - "HostReportHandler(isFrameOverboard): Virtual proc for frame " + frame.getFrameName() - + " on job " + frame.getJobName() + " doesn't exist on the database"); - // Not able to mark the frame overboard is it couldn't be found on the db. - // Proc accounting (verifyRunningProc) should take care of it - return false; } - } - - private boolean isOverboard(double value, double total, double threshold) { - return value / total >= (1 + threshold); - } - - /** - * Handle memory reservations for the given frame - * - * @param frame - */ - private void handleMemoryReservations(final RunningFrameInfo frame) { - VirtualProc proc = null; - try { - proc = hostManager.getVirtualProc(frame.getResourceId()); - - if (proc.isLocalDispatch) { - return; - } - - if (dispatchSupport.increaseReservedMemory(proc, frame.getRss())) { - proc.memoryReserved = frame.getRss(); - logger.info("frame " + frame.getFrameName() + " on job " + frame.getJobName() - + " increased its reserved memory to " + CueUtil.KbToMb(frame.getRss())); - } - } catch (ResourceReservationFailureException e) { - if (proc != null) { - long memNeeded = frame.getRss() - proc.memoryReserved; - logger.info("frame " + frame.getFrameName() + " on job " + frame.getJobName() - + "was unable to reserve an additional " + CueUtil.KbToMb(memNeeded) + "on proc " - + proc.getName() + ", " + e); + + /** + * Check frame memory usage comparing the amount used with the amount it had reserved + * + * @param frame + * @return + */ + private boolean isFrameOverboard(final RunningFrameInfo frame) { + final double OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD = env.getRequiredProperty( + "dispatcher.oom_frame_overboard_allowed_threshold", Double.class); + + if (OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD < 0) { + return false; + } + + double rss = (double) frame.getRss(); + double maxRss = (double) frame.getMaxRss(); + final double MAX_RSS_OVERBOARD_THRESHOLD = OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD * 2; + final double RSS_AVAILABLE_FOR_MAX_RSS_TRIGGER = 0.1; + try { - if (dispatchSupport.balanceReservedMemory(proc, memNeeded)) { - proc.memoryReserved = frame.getRss(); - logger.info("was able to balance host: " + proc.getName()); - } else { - logger.info("failed to balance host: " + proc.getName()); - } - } catch (Exception ex) { - logger.warn("failed to balance host: " + proc.getName() + ", " + e); + VirtualProc proc = hostManager.getVirtualProc(frame.getResourceId()); + double reserved = (double) proc.memoryReserved; + + // Last memory report is higher than the threshold + if (isOverboard(rss, reserved, OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD)) { + return true; + } + // If rss is not overboard, handle the situation where the frame might be going + // overboard from + // time to time but the last report wasn't during a spike. For this case, + // consider a combination + // of rss and maxRss. maxRss > 2 * threshold and rss > 0.9 + else { + return (isOverboard(maxRss, reserved, MAX_RSS_OVERBOARD_THRESHOLD) + && isOverboard(rss, reserved, -RSS_AVAILABLE_FOR_MAX_RSS_TRIGGER)); + } + } catch (EmptyResultDataAccessException e) { + logger.info("HostReportHandler(isFrameOverboard): Virtual proc for frame " + + frame.getFrameName() + " on job " + frame.getJobName() + + " doesn't exist on the database"); + // Not able to mark the frame overboard is it couldn't be found on the db. + // Proc accounting (verifyRunningProc) should take care of it + return false; } - } else { - logger.info("frame " + frame.getFrameName() + " on job " + frame.getJobName() - + "was unable to reserve an additional memory. Proc could not be found"); - } - } catch (EmptyResultDataAccessException e) { - logger.info("HostReportHandler: Memory reservations for frame " + frame.getFrameName() - + " on job " + frame.getJobName() + " proc could not be found"); } - } - - /** - * Kill frames that over run. - * - * @param rFrames - */ - private void killTimedOutFrames(List runningFrames, String hostname) { - for (RunningFrameInfo frame : runningFrames) { - String layerId = frame.getLayerId(); - - try { - LayerDetail layer = layerDao.getLayerDetail(layerId); - long runtimeMinutes = ((System.currentTimeMillis() - frame.getStartTime()) / 1000l) / 60; - - if (layer.timeout != 0 && runtimeMinutes > layer.timeout) { - killFrame(frame.getFrameId(), hostname, KillCause.FrameTimedOut); - } else if (layer.timeout_llu != 0 && frame.getLluTime() != 0) { - long r = System.currentTimeMillis() / 1000; - long lastUpdate = (r - frame.getLluTime()) / 60; - - if (layer.timeout_llu != 0 && lastUpdate > (layer.timeout_llu - 1)) { - killFrame(frame.getFrameId(), hostname, KillCause.FrameLluTimedOut); - } - } - } catch (EmptyResultDataAccessException e) { - logger.info("Unable to get layer with id=" + layerId); - } + + private boolean isOverboard(double value, double total, double threshold) { + return value / total >= (1 + threshold); } - } - - /** - * Update memory usage and LLU time for the given list of frames. - * - * @param rFrames - */ - private void updateMemoryUsageAndLluTime(List rFrames) { - for (RunningFrameInfo rf : rFrames) { - FrameInterface frame = jobManager.getFrame(rf.getFrameId()); - - dispatchSupport.updateFrameMemoryUsageAndLluTime(frame, rf.getRss(), rf.getMaxRss(), - rf.getLluTime()); - - dispatchSupport.updateProcMemoryUsage(frame, rf.getRss(), rf.getMaxRss(), rf.getVsize(), - rf.getMaxVsize(), rf.getUsedGpuMemory(), rf.getMaxUsedGpuMemory(), rf.getUsedSwapMemory(), - rf.getChildren().toByteArray()); + + /** + * Handle memory reservations for the given frame + * + * @param frame + */ + private void handleMemoryReservations(final RunningFrameInfo frame) { + VirtualProc proc = null; + try { + proc = hostManager.getVirtualProc(frame.getResourceId()); + + if (proc.isLocalDispatch) { + return; + } + + if (dispatchSupport.increaseReservedMemory(proc, frame.getRss())) { + proc.memoryReserved = frame.getRss(); + logger.info("frame " + frame.getFrameName() + " on job " + frame.getJobName() + + " increased its reserved memory to " + CueUtil.KbToMb(frame.getRss())); + } + } catch (ResourceReservationFailureException e) { + if (proc != null) { + long memNeeded = frame.getRss() - proc.memoryReserved; + logger.info("frame " + frame.getFrameName() + " on job " + frame.getJobName() + + "was unable to reserve an additional " + CueUtil.KbToMb(memNeeded) + + "on proc " + proc.getName() + ", " + e); + try { + if (dispatchSupport.balanceReservedMemory(proc, memNeeded)) { + proc.memoryReserved = frame.getRss(); + logger.info("was able to balance host: " + proc.getName()); + } else { + logger.info("failed to balance host: " + proc.getName()); + } + } catch (Exception ex) { + logger.warn("failed to balance host: " + proc.getName() + ", " + e); + } + } else { + logger.info("frame " + frame.getFrameName() + " on job " + frame.getJobName() + + "was unable to reserve an additional memory. Proc could not be found"); + } + } catch (EmptyResultDataAccessException e) { + logger.info("HostReportHandler: Memory reservations for frame " + frame.getFrameName() + + " on job " + frame.getJobName() + " proc could not be found"); + } } - updateJobMemoryUsage(rFrames); - updateLayerMemoryUsage(rFrames); - } - - /** - * Update job memory using for the given list of frames. - * - * @param frames - */ - private void updateJobMemoryUsage(List frames) { - final Map jobs = new HashMap(frames.size()); - - for (RunningFrameInfo frame : frames) { - JobEntity job = new JobEntity(frame.getJobId()); - if (jobs.containsKey(job)) { - if (jobs.get(job) < frame.getMaxRss()) { - jobs.put(job, frame.getMaxRss()); + /** + * Kill frames that over run. + * + * @param rFrames + */ + private void killTimedOutFrames(List runningFrames, String hostname) { + for (RunningFrameInfo frame : runningFrames) { + String layerId = frame.getLayerId(); + + try { + LayerDetail layer = layerDao.getLayerDetail(layerId); + long runtimeMinutes = + ((System.currentTimeMillis() - frame.getStartTime()) / 1000l) / 60; + + if (layer.timeout != 0 && runtimeMinutes > layer.timeout) { + killFrame(frame.getFrameId(), hostname, KillCause.FrameTimedOut); + } else if (layer.timeout_llu != 0 && frame.getLluTime() != 0) { + long r = System.currentTimeMillis() / 1000; + long lastUpdate = (r - frame.getLluTime()) / 60; + + if (layer.timeout_llu != 0 && lastUpdate > (layer.timeout_llu - 1)) { + killFrame(frame.getFrameId(), hostname, KillCause.FrameLluTimedOut); + } + } + } catch (EmptyResultDataAccessException e) { + logger.info("Unable to get layer with id=" + layerId); + } } - } else { - jobs.put(job, frame.getMaxRss()); - } } - for (Map.Entry set : jobs.entrySet()) { - jobDao.updateMaxRSS(set.getKey(), set.getValue()); + /** + * Update memory usage and LLU time for the given list of frames. + * + * @param rFrames + */ + private void updateMemoryUsageAndLluTime(List rFrames) { + for (RunningFrameInfo rf : rFrames) { + FrameInterface frame = jobManager.getFrame(rf.getFrameId()); + + dispatchSupport.updateFrameMemoryUsageAndLluTime(frame, rf.getRss(), rf.getMaxRss(), + rf.getLluTime()); + + dispatchSupport.updateProcMemoryUsage(frame, rf.getRss(), rf.getMaxRss(), rf.getVsize(), + rf.getMaxVsize(), rf.getUsedGpuMemory(), rf.getMaxUsedGpuMemory(), + rf.getUsedSwapMemory(), rf.getChildren().toByteArray()); + } + + updateJobMemoryUsage(rFrames); + updateLayerMemoryUsage(rFrames); } - } - - /** - * Update layer memory usage for the given list of frames. - * - * @param frames - */ - private void updateLayerMemoryUsage(List frames) { - final Map layers = new HashMap(frames.size()); - - for (RunningFrameInfo frame : frames) { - LayerEntity layer = new LayerEntity(frame.getLayerId()); - if (layers.containsKey(layer)) { - if (layers.get(layer) < frame.getMaxRss()) { - layers.put(layer, frame.getMaxRss()); + + /** + * Update job memory using for the given list of frames. + * + * @param frames + */ + private void updateJobMemoryUsage(List frames) { + final Map jobs = new HashMap(frames.size()); + + for (RunningFrameInfo frame : frames) { + JobEntity job = new JobEntity(frame.getJobId()); + if (jobs.containsKey(job)) { + if (jobs.get(job) < frame.getMaxRss()) { + jobs.put(job, frame.getMaxRss()); + } + } else { + jobs.put(job, frame.getMaxRss()); + } + } + + for (Map.Entry set : jobs.entrySet()) { + jobDao.updateMaxRSS(set.getKey(), set.getValue()); } - } else { - layers.put(layer, frame.getMaxRss()); - } } - /* Attempt to update the max RSS value for the job **/ - for (Map.Entry set : layers.entrySet()) { - layerDao.increaseLayerMinMemory(set.getKey(), set.getValue()); - layerDao.updateLayerMaxRSS(set.getKey(), set.getValue(), false); + /** + * Update layer memory usage for the given list of frames. + * + * @param frames + */ + private void updateLayerMemoryUsage(List frames) { + final Map layers = new HashMap(frames.size()); + + for (RunningFrameInfo frame : frames) { + LayerEntity layer = new LayerEntity(frame.getLayerId()); + if (layers.containsKey(layer)) { + if (layers.get(layer) < frame.getMaxRss()) { + layers.put(layer, frame.getMaxRss()); + } + } else { + layers.put(layer, frame.getMaxRss()); + } + } + + /* Attempt to update the max RSS value for the job **/ + for (Map.Entry set : layers.entrySet()) { + layerDao.increaseLayerMinMemory(set.getKey(), set.getValue()); + layerDao.updateLayerMaxRSS(set.getKey(), set.getValue(), false); + } } - } - - /** - * Number of seconds before running frames have to exist before being verified against the DB. - */ - private static final long FRAME_VERIFICATION_GRACE_PERIOD_SECONDS = 120; - - /** - * Verify all running frames in the given report against the DB. Frames that have not been running - * for at least FRAME_VERIFICATION_GRACE_PERIOD_SECONDS are skipped. - * - * If a frame->proc mapping is not verified then the record for the proc is pulled from the DB. If - * the proc doesn't exist at all, then the frame is killed with the message: "but the DB did not - * reflect this" - * - * The main reason why a proc no longer exists is that the cue though the host went down and - * cleared out all running frames. - * - * @param report - */ - public List verifyRunningFrameInfo(HostReport report) { - List runningFrames = new ArrayList(report.getFramesCount()); - - for (RunningFrameInfo runningFrame : report.getFramesList()) { - long runtimeSeconds = (System.currentTimeMillis() - runningFrame.getStartTime()) / 1000l; - - // Don't test frames that haven't been running long enough. - if (runtimeSeconds < FRAME_VERIFICATION_GRACE_PERIOD_SECONDS) { - logger.info( - "verified " + runningFrame.getJobName() + "/" + runningFrame.getFrameName() + " on " - + report.getHost().getName() + " by grace period " + runtimeSeconds + " seconds."); - runningFrames.add(runningFrame); - continue; - } - - if (hostManager.verifyRunningProc(runningFrame.getResourceId(), runningFrame.getFrameId())) { - runningFrames.add(runningFrame); - continue; - } - - /* - * The frame this proc is running is no longer assigned to this proc. Don't ever touch the - * frame record. If we make it here that means the proc has been running for over 2 min. - */ - String msg; - VirtualProc proc = null; - - try { - proc = hostManager.getVirtualProc(runningFrame.getResourceId()); - msg = "Virtual proc " + proc.getProcId() + "is assigned to " + proc.getFrameId() + " not " - + runningFrame.getFrameId(); - } catch (Exception e) { - /* - * This will happen if the host goes offline and then comes back. In this case, we don't - * touch the frame since it might already be running somewhere else. We do however kill the - * proc. - */ - msg = "Virtual proc did not exist."; - } - - DispatchSupport.accountingErrors.incrementAndGet(); - if (proc != null && hostManager.isOprhan(proc)) { - dispatchSupport.clearVirtualProcAssignement(proc); - dispatchSupport.unbookProc(proc); - proc = null; - } - if (proc == null) { - // A frameCompleteReport might have been delivered before this report was - // processed - FrameDetail frameLatestVersion = jobManager.getFrameDetail(runningFrame.getFrameId()); - if (frameLatestVersion.state != FrameState.RUNNING) { - logger.info("DelayedVerification, the proc " + runningFrame.getResourceId() + " on host " - + report.getHost().getName() + " has already Completed " + runningFrame.getJobName() - + "/" + runningFrame.getFrameName()); - } else if (killFrame(runningFrame.getFrameId(), report.getHost().getName(), - KillCause.FrameVerificationFailure)) { - logger.info("FrameVerificationError, the proc " + runningFrame.getResourceId() - + " on host " + report.getHost().getName() + " was running for " - + (runtimeSeconds / 60.0f) + " minutes " + runningFrame.getJobName() + "/" - + runningFrame.getFrameName() + " but the DB did not " + "reflect this. " + msg); - } else { - logger.warn("FrameStuckWarning: frameId=" + runningFrame.getFrameId() + " render_node=" - + report.getHost().getName() + " - " + runningFrame.getJobName() + "/" - + runningFrame.getFrameName()); + + /** + * Number of seconds before running frames have to exist before being verified against the DB. + */ + private static final long FRAME_VERIFICATION_GRACE_PERIOD_SECONDS = 120; + + /** + * Verify all running frames in the given report against the DB. Frames that have not been + * running for at least FRAME_VERIFICATION_GRACE_PERIOD_SECONDS are skipped. + * + * If a frame->proc mapping is not verified then the record for the proc is pulled from the DB. + * If the proc doesn't exist at all, then the frame is killed with the message: "but the DB did + * not reflect this" + * + * The main reason why a proc no longer exists is that the cue though the host went down and + * cleared out all running frames. + * + * @param report + */ + public List verifyRunningFrameInfo(HostReport report) { + List runningFrames = + new ArrayList(report.getFramesCount()); + + for (RunningFrameInfo runningFrame : report.getFramesList()) { + long runtimeSeconds = + (System.currentTimeMillis() - runningFrame.getStartTime()) / 1000l; + + // Don't test frames that haven't been running long enough. + if (runtimeSeconds < FRAME_VERIFICATION_GRACE_PERIOD_SECONDS) { + logger.info("verified " + runningFrame.getJobName() + "/" + + runningFrame.getFrameName() + " on " + report.getHost().getName() + + " by grace period " + runtimeSeconds + " seconds."); + runningFrames.add(runningFrame); + continue; + } + + if (hostManager.verifyRunningProc(runningFrame.getResourceId(), + runningFrame.getFrameId())) { + runningFrames.add(runningFrame); + continue; + } + + /* + * The frame this proc is running is no longer assigned to this proc. Don't ever touch + * the frame record. If we make it here that means the proc has been running for over 2 + * min. + */ + String msg; + VirtualProc proc = null; + + try { + proc = hostManager.getVirtualProc(runningFrame.getResourceId()); + msg = "Virtual proc " + proc.getProcId() + "is assigned to " + proc.getFrameId() + + " not " + runningFrame.getFrameId(); + } catch (Exception e) { + /* + * This will happen if the host goes offline and then comes back. In this case, we + * don't touch the frame since it might already be running somewhere else. We do + * however kill the proc. + */ + msg = "Virtual proc did not exist."; + } + + DispatchSupport.accountingErrors.incrementAndGet(); + if (proc != null && hostManager.isOprhan(proc)) { + dispatchSupport.clearVirtualProcAssignement(proc); + dispatchSupport.unbookProc(proc); + proc = null; + } + if (proc == null) { + // A frameCompleteReport might have been delivered before this report was + // processed + FrameDetail frameLatestVersion = + jobManager.getFrameDetail(runningFrame.getFrameId()); + if (frameLatestVersion.state != FrameState.RUNNING) { + logger.info("DelayedVerification, the proc " + runningFrame.getResourceId() + + " on host " + report.getHost().getName() + " has already Completed " + + runningFrame.getJobName() + "/" + runningFrame.getFrameName()); + } else if (killFrame(runningFrame.getFrameId(), report.getHost().getName(), + KillCause.FrameVerificationFailure)) { + logger.info("FrameVerificationError, the proc " + runningFrame.getResourceId() + + " on host " + report.getHost().getName() + " was running for " + + (runtimeSeconds / 60.0f) + " minutes " + runningFrame.getJobName() + + "/" + runningFrame.getFrameName() + " but the DB did not " + + "reflect this. " + msg); + } else { + logger.warn("FrameStuckWarning: frameId=" + runningFrame.getFrameId() + + " render_node=" + report.getHost().getName() + " - " + + runningFrame.getJobName() + "/" + runningFrame.getFrameName()); + } + } } - } + return runningFrames; } - return runningFrames; - } - public HostManager getHostManager() { - return hostManager; - } + public HostManager getHostManager() { + return hostManager; + } - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; - } + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; + } - public BookingQueue getBookingQueue() { - return bookingQueue; - } + public BookingQueue getBookingQueue() { + return bookingQueue; + } - public void setBookingQueue(BookingQueue bookingQueue) { - this.bookingQueue = bookingQueue; - } + public void setBookingQueue(BookingQueue bookingQueue) { + this.bookingQueue = bookingQueue; + } - public ThreadPoolExecutor getReportQueue() { - return reportQueue; - } + public ThreadPoolExecutor getReportQueue() { + return reportQueue; + } - public void setReportQueue(ThreadPoolExecutor reportQueue) { - this.reportQueue = reportQueue; - } + public void setReportQueue(ThreadPoolExecutor reportQueue) { + this.reportQueue = reportQueue; + } - public DispatchSupport getDispatchSupport() { - return dispatchSupport; - } + public DispatchSupport getDispatchSupport() { + return dispatchSupport; + } - public void setDispatchSupport(DispatchSupport dispatchSupport) { - this.dispatchSupport = dispatchSupport; - } + public void setDispatchSupport(DispatchSupport dispatchSupport) { + this.dispatchSupport = dispatchSupport; + } - public Dispatcher getDispatcher() { - return dispatcher; - } + public Dispatcher getDispatcher() { + return dispatcher; + } - public void setDispatcher(Dispatcher dispatcher) { - this.dispatcher = dispatcher; - } + public void setDispatcher(Dispatcher dispatcher) { + this.dispatcher = dispatcher; + } - public RqdClient getRqdClient() { - return rqdClient; - } + public RqdClient getRqdClient() { + return rqdClient; + } - public void setRqdClient(RqdClient rqdClient) { - this.rqdClient = rqdClient; - } + public void setRqdClient(RqdClient rqdClient) { + this.rqdClient = rqdClient; + } - public JobManager getJobManager() { - return jobManager; - } + public JobManager getJobManager() { + return jobManager; + } - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } - public JobDao getJobDao() { - return jobDao; - } + public JobDao getJobDao() { + return jobDao; + } - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } - public LayerDao getLayerDao() { - return layerDao; - } + public LayerDao getLayerDao() { + return layerDao; + } - public void setLayerDao(LayerDao layerDao) { - this.layerDao = layerDao; - } + public void setLayerDao(LayerDao layerDao) { + this.layerDao = layerDao; + } - public BookingManager getBookingManager() { - return bookingManager; - } + public BookingManager getBookingManager() { + return bookingManager; + } - public void setBookingManager(BookingManager bookingManager) { - this.bookingManager = bookingManager; - } + public void setBookingManager(BookingManager bookingManager) { + this.bookingManager = bookingManager; + } - public Dispatcher getLocalDispatcher() { - return localDispatcher; - } + public Dispatcher getLocalDispatcher() { + return localDispatcher; + } - public void setLocalDispatcher(Dispatcher localDispatcher) { - this.localDispatcher = localDispatcher; - } + public void setLocalDispatcher(Dispatcher localDispatcher) { + this.localDispatcher = localDispatcher; + } - public ThreadPoolExecutor getKillQueue() { - return killQueue; - } + public ThreadPoolExecutor getKillQueue() { + return killQueue; + } - public void setKillQueue(ThreadPoolExecutor killQueue) { - this.killQueue = killQueue; - } + public void setKillQueue(ThreadPoolExecutor killQueue) { + this.killQueue = killQueue; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportQueue.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportQueue.java index 7875c459b..28cd659d8 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportQueue.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportQueue.java @@ -32,109 +32,111 @@ public class HostReportQueue extends ThreadPoolExecutor { - private static final Logger logger = LogManager.getLogger(HostReportQueue.class); - private QueueRejectCounter rejectCounter = new QueueRejectCounter(); - private AtomicBoolean isShutdown = new AtomicBoolean(false); - private int queueCapacity; - - private Cache hostMap = - CacheBuilder.newBuilder().expireAfterWrite(1, TimeUnit.HOURS).build(); - - /** - * Wrapper around protobuf object HostReport to add reportTi - */ - private class HostReportWrapper { - private final HostReport hostReport; - private final WeakReference reportTaskRef; - public long taskTime = System.currentTimeMillis(); - - public HostReportWrapper(HostReport hostReport, DispatchHandleHostReport reportTask) { - this.hostReport = hostReport; - this.reportTaskRef = new WeakReference<>(reportTask); - } + private static final Logger logger = LogManager.getLogger(HostReportQueue.class); + private QueueRejectCounter rejectCounter = new QueueRejectCounter(); + private AtomicBoolean isShutdown = new AtomicBoolean(false); + private int queueCapacity; + + private Cache hostMap = + CacheBuilder.newBuilder().expireAfterWrite(1, TimeUnit.HOURS).build(); + + /** + * Wrapper around protobuf object HostReport to add reportTi + */ + private class HostReportWrapper { + private final HostReport hostReport; + private final WeakReference reportTaskRef; + public long taskTime = System.currentTimeMillis(); + + public HostReportWrapper(HostReport hostReport, DispatchHandleHostReport reportTask) { + this.hostReport = hostReport; + this.reportTaskRef = new WeakReference<>(reportTask); + } - public HostReport getHostReport() { - return hostReport; - } + public HostReport getHostReport() { + return hostReport; + } - public DispatchHandleHostReport getReportTask() { - return reportTaskRef.get(); + public DispatchHandleHostReport getReportTask() { + return reportTaskRef.get(); + } + + public long getTaskTime() { + return taskTime; + } } - public long getTaskTime() { - return taskTime; + public HostReportQueue(int threadPoolSizeInitial, int threadPoolSizeMax, int queueSize) { + super(threadPoolSizeInitial, threadPoolSizeMax, 10, TimeUnit.SECONDS, + new LinkedBlockingQueue(queueSize)); + this.setRejectedExecutionHandler(rejectCounter); } - } - public HostReportQueue(int threadPoolSizeInitial, int threadPoolSizeMax, int queueSize) { - super(threadPoolSizeInitial, threadPoolSizeMax, 10, TimeUnit.SECONDS, - new LinkedBlockingQueue(queueSize)); - this.setRejectedExecutionHandler(rejectCounter); - } + public void execute(DispatchHandleHostReport newReport) { + if (isShutdown.get()) { + return; + } + HostReportWrapper oldWrappedReport = hostMap.getIfPresent(newReport.getKey()); + // If hostReport exists on the cache and there's also a task waiting to be + // executed + // replace the old report by the new on, but refrain from creating another task + if (oldWrappedReport != null) { + DispatchHandleHostReport oldReport = oldWrappedReport.getReportTask(); + if (oldReport != null) { + // Replace report, but keep the reference of the existing task + hostMap.put(newReport.getKey(), + new HostReportWrapper(newReport.getHostReport(), oldReport)); + return; + } + } + hostMap.put(newReport.getKey(), + new HostReportWrapper(newReport.getHostReport(), newReport)); + super.execute(newReport); + } - public void execute(DispatchHandleHostReport newReport) { - if (isShutdown.get()) { - return; + public HostReport removePendingHostReport(String key) { + if (key != null) { + HostReportWrapper r = hostMap.getIfPresent(key); + if (r != null) { + hostMap.asMap().remove(key, r); + return r.getHostReport(); + } + } + return null; } - HostReportWrapper oldWrappedReport = hostMap.getIfPresent(newReport.getKey()); - // If hostReport exists on the cache and there's also a task waiting to be - // executed - // replace the old report by the new on, but refrain from creating another task - if (oldWrappedReport != null) { - DispatchHandleHostReport oldReport = oldWrappedReport.getReportTask(); - if (oldReport != null) { - // Replace report, but keep the reference of the existing task - hostMap.put(newReport.getKey(), - new HostReportWrapper(newReport.getHostReport(), oldReport)); - return; - } + + public long getRejectedTaskCount() { + return rejectCounter.getRejectCount(); } - hostMap.put(newReport.getKey(), new HostReportWrapper(newReport.getHostReport(), newReport)); - super.execute(newReport); - } - - public HostReport removePendingHostReport(String key) { - if (key != null) { - HostReportWrapper r = hostMap.getIfPresent(key); - if (r != null) { - hostMap.asMap().remove(key, r); - return r.getHostReport(); - } + + public int getQueueCapacity() { + return queueCapacity; } - return null; - } - - public long getRejectedTaskCount() { - return rejectCounter.getRejectCount(); - } - - public int getQueueCapacity() { - return queueCapacity; - } - - public void shutdown() { - if (!isShutdown.getAndSet(true)) { - logger.info( - "Shutting down report pool, currently " + this.getActiveCount() + " active threads."); - - final long startTime = System.currentTimeMillis(); - while (this.getQueue().size() != 0 && this.getActiveCount() != 0) { - try { - logger.info( - "report pool is waiting for " + this.getQueue().size() + " more units to complete"); - if (System.currentTimeMillis() - startTime > 10000) { - throw new InterruptedException("report thread pool failed to shutdown properly"); - } - Thread.sleep(250); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - break; + + public void shutdown() { + if (!isShutdown.getAndSet(true)) { + logger.info("Shutting down report pool, currently " + this.getActiveCount() + + " active threads."); + + final long startTime = System.currentTimeMillis(); + while (this.getQueue().size() != 0 && this.getActiveCount() != 0) { + try { + logger.info("report pool is waiting for " + this.getQueue().size() + + " more units to complete"); + if (System.currentTimeMillis() - startTime > 10000) { + throw new InterruptedException( + "report thread pool failed to shutdown properly"); + } + Thread.sleep(250); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + break; + } + } } - } } - } - public boolean isHealthy() { - return getQueue().remainingCapacity() > 0; - } + public boolean isHealthy() { + return getQueue().remainingCapacity() > 0; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/JobLookupException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/JobLookupException.java index 897324666..8995a9517 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/JobLookupException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/JobLookupException.java @@ -20,23 +20,23 @@ @SuppressWarnings("serial") public class JobLookupException extends SpcueRuntimeException { - public JobLookupException() { - // TODO Auto-generated constructor stub - } - - public JobLookupException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } - - public JobLookupException(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public JobLookupException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public JobLookupException() { + // TODO Auto-generated constructor stub + } + + public JobLookupException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + + public JobLookupException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public JobLookupException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/LocalDispatcher.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/LocalDispatcher.java index 46d0b9e30..d443cb941 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/LocalDispatcher.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/LocalDispatcher.java @@ -40,372 +40,373 @@ public class LocalDispatcher extends AbstractDispatcher implements Dispatcher { - @Autowired - private Environment env; - - private static final Logger logger = LogManager.getLogger(LocalDispatcher.class); - - private BookingManager bookingManager; - private JobManager jobManager; - private HostManager hostManager; - - private static final int MAX_QUERY_FRAMES = 10; - private static final int MAX_DISPATCHED_FRAMES = 10; - - @Override - public List dispatchHostToAllShows(DispatchHost host) { - return new ArrayList(); - } - - @Override - public List dispatchHost(DispatchHost host) { - - List lhas = bookingManager.getLocalHostAssignment(host); - host.isLocalDispatch = true; - - ArrayList procs = new ArrayList(); - for (LocalHostAssignment lha : lhas) { - prepHost(host, lha); - switch (lha.getType()) { - case JOB_PARTITION: - procs.addAll(dispatchHost(host, jobManager.getJob(lha.getJobId()), lha)); - break; - case LAYER_PARTITION: - procs.addAll(dispatchHost(host, jobManager.getLayerDetail(lha.getLayerId()), lha)); - break; - case FRAME_PARTITION: - procs.addAll(dispatchHost(host, jobManager.getFrame(lha.getFrameId()), lha)); - break; - default: - logger.warn("Error, invalid render " + "partition type: " + lha.getType()); - } + @Autowired + private Environment env; + + private static final Logger logger = LogManager.getLogger(LocalDispatcher.class); + + private BookingManager bookingManager; + private JobManager jobManager; + private HostManager hostManager; + + private static final int MAX_QUERY_FRAMES = 10; + private static final int MAX_DISPATCHED_FRAMES = 10; + + @Override + public List dispatchHostToAllShows(DispatchHost host) { + return new ArrayList(); } - return procs; - } + @Override + public List dispatchHost(DispatchHost host) { + + List lhas = bookingManager.getLocalHostAssignment(host); + host.isLocalDispatch = true; + + ArrayList procs = new ArrayList(); + for (LocalHostAssignment lha : lhas) { + prepHost(host, lha); + switch (lha.getType()) { + case JOB_PARTITION: + procs.addAll(dispatchHost(host, jobManager.getJob(lha.getJobId()), lha)); + break; + case LAYER_PARTITION: + procs.addAll( + dispatchHost(host, jobManager.getLayerDetail(lha.getLayerId()), lha)); + break; + case FRAME_PARTITION: + procs.addAll(dispatchHost(host, jobManager.getFrame(lha.getFrameId()), lha)); + break; + default: + logger.warn("Error, invalid render " + "partition type: " + lha.getType()); + } + } - private List dispatchHost(DispatchHost host, JobInterface job, - LocalHostAssignment lha) { + return procs; + } - List procs = new ArrayList(MAX_DISPATCHED_FRAMES); + private List dispatchHost(DispatchHost host, JobInterface job, + LocalHostAssignment lha) { + + List procs = new ArrayList(MAX_DISPATCHED_FRAMES); - /* - * Grab a list of frames to dispatch. - */ - List frames = - dispatchSupport.findNextDispatchFrames(job, host, MAX_QUERY_FRAMES); - - logger.info("Frames found: " + frames.size() + " for host " + host.getName() + " " - + host.idleCores + "/" + host.idleMemory + " on job " + job.getName()); - - for (DispatchFrame frame : frames) { - - /* - * Check if we have enough memory/cores for this frame, if not move on. - */ - if (!lha.hasAdditionalResources(lha.getThreads() * 100, frame.getMinMemory(), frame.minGpus, - frame.minGpuMemory)) { - continue; - } - - /* - * Build our virtual proc. - */ - VirtualProc proc = VirtualProc.build(host, frame, lha); - - /* - * Double check the job has pending frames. - */ - if (!dispatchSupport.hasPendingFrames(job)) { - break; - } - - /* - * Dispatch the frame. If a frame is booked, dispatchHost returns true, else if returns false. - * If the dispatch fails in a way that we should stop dispatching immediately (the host is - * down), a DispatcherException is thrown. - */ - if (dispatchHost(frame, proc)) { - - procs.add(proc); - - long memReservedMin = - env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); - long memGpuReservedMin = - env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_min", Long.class); /* - * This should stay here and not go into VirtualProc or else the count will be off if you - * fail to book. + * Grab a list of frames to dispatch. */ - lha.useResources(proc.coresReserved, proc.memoryReserved, proc.gpusReserved, - proc.gpuMemoryReserved); - if (!lha.hasAdditionalResources(lha.getThreads() * 100, memReservedMin, - Dispatcher.GPU_UNITS_RESERVED_MIN, memGpuReservedMin)) { - break; + List frames = + dispatchSupport.findNextDispatchFrames(job, host, MAX_QUERY_FRAMES); + + logger.info("Frames found: " + frames.size() + " for host " + host.getName() + " " + + host.idleCores + "/" + host.idleMemory + " on job " + job.getName()); + + for (DispatchFrame frame : frames) { + + /* + * Check if we have enough memory/cores for this frame, if not move on. + */ + if (!lha.hasAdditionalResources(lha.getThreads() * 100, frame.getMinMemory(), + frame.minGpus, frame.minGpuMemory)) { + continue; + } + + /* + * Build our virtual proc. + */ + VirtualProc proc = VirtualProc.build(host, frame, lha); + + /* + * Double check the job has pending frames. + */ + if (!dispatchSupport.hasPendingFrames(job)) { + break; + } + + /* + * Dispatch the frame. If a frame is booked, dispatchHost returns true, else if returns + * false. If the dispatch fails in a way that we should stop dispatching immediately + * (the host is down), a DispatcherException is thrown. + */ + if (dispatchHost(frame, proc)) { + + procs.add(proc); + + long memReservedMin = + env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); + long memGpuReservedMin = env + .getRequiredProperty("dispatcher.memory.mem_gpu_reserved_min", Long.class); + /* + * This should stay here and not go into VirtualProc or else the count will be off + * if you fail to book. + */ + lha.useResources(proc.coresReserved, proc.memoryReserved, proc.gpusReserved, + proc.gpuMemoryReserved); + if (!lha.hasAdditionalResources(lha.getThreads() * 100, memReservedMin, + Dispatcher.GPU_UNITS_RESERVED_MIN, memGpuReservedMin)) { + break; + } + + if (procs.size() >= MAX_DISPATCHED_FRAMES) { + break; + } + } } - if (procs.size() >= MAX_DISPATCHED_FRAMES) { - break; + if (procs.size() == 0) { + bookingManager.removeInactiveLocalHostAssignment(lha); } - } - } - if (procs.size() == 0) { - bookingManager.removeInactiveLocalHostAssignment(lha); + return procs; } - return procs; - } - - @Override - public List dispatchHost(DispatchHost host, JobInterface job) { - /* - * Load up the local assignment. If one doesn't exist, that means the user has removed it and no - * booking action should be taken. - */ - LocalHostAssignment lha = - bookingManager.getLocalHostAssignment(host.getHostId(), job.getJobId()); - prepHost(host, lha); - - return dispatchHost(host, job, lha); - } + @Override + public List dispatchHost(DispatchHost host, JobInterface job) { + /* + * Load up the local assignment. If one doesn't exist, that means the user has removed it + * and no booking action should be taken. + */ + LocalHostAssignment lha = + bookingManager.getLocalHostAssignment(host.getHostId(), job.getJobId()); + prepHost(host, lha); - private List dispatchHost(DispatchHost host, LayerInterface layer, - LocalHostAssignment lha) { + return dispatchHost(host, job, lha); + } - List procs = new ArrayList(MAX_DISPATCHED_FRAMES); - /* - * Grab a list of frames to dispatch. - */ - List frames = - dispatchSupport.findNextDispatchFrames(layer, host, MAX_QUERY_FRAMES); - - logger.info("Frames found: " + frames.size() + " for host " + host.getName() + " " - + host.idleCores + "/" + host.idleMemory + " on layer " + layer); - - for (DispatchFrame frame : frames) { - - /* - * Check if we have enough memory/cores for this frame, if not move on. - */ - if (!lha.hasAdditionalResources(lha.getThreads() * 100, frame.getMinMemory(), frame.minGpus, - frame.minGpuMemory)) { - continue; - } - - /* - * Create our virtual proc. - */ - VirtualProc proc = VirtualProc.build(host, frame, lha); - - /* - * Double check if the layer we're booking has pending frames. - */ - if (!dispatchSupport.hasPendingFrames(layer)) { - break; - } - - /* - * Dispatch the frame. If a frame is booked, dispatchHost returns true, else if returns false. - * If the dispatch fails in a way that we should stop dispatching immediately (the host is - * down), a DispatcherException is thrown. - */ - if (dispatchHost(frame, proc)) { - - procs.add(proc); - - long memReservedMin = - env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); - long memGpuReservedMin = - env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_min", Long.class); + private List dispatchHost(DispatchHost host, LayerInterface layer, + LocalHostAssignment lha) { + List procs = new ArrayList(MAX_DISPATCHED_FRAMES); /* - * This should stay here and not go into VirtualProc or else the count will be off if you - * fail to book. + * Grab a list of frames to dispatch. */ - lha.useResources(proc.coresReserved, proc.memoryReserved, proc.gpusReserved, - proc.gpuMemoryReserved); - if (!lha.hasAdditionalResources(100, memReservedMin, Dispatcher.GPU_UNITS_RESERVED_MIN, - memGpuReservedMin)) { - break; + List frames = + dispatchSupport.findNextDispatchFrames(layer, host, MAX_QUERY_FRAMES); + + logger.info("Frames found: " + frames.size() + " for host " + host.getName() + " " + + host.idleCores + "/" + host.idleMemory + " on layer " + layer); + + for (DispatchFrame frame : frames) { + + /* + * Check if we have enough memory/cores for this frame, if not move on. + */ + if (!lha.hasAdditionalResources(lha.getThreads() * 100, frame.getMinMemory(), + frame.minGpus, frame.minGpuMemory)) { + continue; + } + + /* + * Create our virtual proc. + */ + VirtualProc proc = VirtualProc.build(host, frame, lha); + + /* + * Double check if the layer we're booking has pending frames. + */ + if (!dispatchSupport.hasPendingFrames(layer)) { + break; + } + + /* + * Dispatch the frame. If a frame is booked, dispatchHost returns true, else if returns + * false. If the dispatch fails in a way that we should stop dispatching immediately + * (the host is down), a DispatcherException is thrown. + */ + if (dispatchHost(frame, proc)) { + + procs.add(proc); + + long memReservedMin = + env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); + long memGpuReservedMin = env + .getRequiredProperty("dispatcher.memory.mem_gpu_reserved_min", Long.class); + + /* + * This should stay here and not go into VirtualProc or else the count will be off + * if you fail to book. + */ + lha.useResources(proc.coresReserved, proc.memoryReserved, proc.gpusReserved, + proc.gpuMemoryReserved); + if (!lha.hasAdditionalResources(100, memReservedMin, + Dispatcher.GPU_UNITS_RESERVED_MIN, memGpuReservedMin)) { + break; + } + + if (procs.size() >= MAX_DISPATCHED_FRAMES) { + break; + } + } } - if (procs.size() >= MAX_DISPATCHED_FRAMES) { - break; + if (procs.size() == 0) { + bookingManager.removeInactiveLocalHostAssignment(lha); } - } - } - if (procs.size() == 0) { - bookingManager.removeInactiveLocalHostAssignment(lha); + return procs; } - return procs; - } + @Override + public List dispatchHost(DispatchHost host, LayerInterface layer) { - @Override - public List dispatchHost(DispatchHost host, LayerInterface layer) { + /* + * Load up the local assignment. If one doesn't exist, that means the user has removed it + * and no booking action should be taken. + */ - /* - * Load up the local assignment. If one doesn't exist, that means the user has removed it and no - * booking action should be taken. - */ + LocalHostAssignment lha = + bookingManager.getLocalHostAssignment(host.getHostId(), layer.getJobId()); + prepHost(host, lha); - LocalHostAssignment lha = - bookingManager.getLocalHostAssignment(host.getHostId(), layer.getJobId()); - prepHost(host, lha); + return dispatchHost(host, layer, lha); + } - return dispatchHost(host, layer, lha); - } + private List dispatchHost(DispatchHost host, FrameInterface frame, + LocalHostAssignment lha) { - private List dispatchHost(DispatchHost host, FrameInterface frame, - LocalHostAssignment lha) { + List procs = new ArrayList(1); - List procs = new ArrayList(1); + /* + * Grab a dispatch frame record for the frame we want to dispatch. + */ + DispatchFrame dframe = jobManager.getDispatchFrame(frame.getId()); + if (!lha.hasAdditionalResources(lha.getMaxCoreUnits(), dframe.getMinMemory(), + lha.getMaxGpuUnits(), dframe.minGpuMemory)) { + return procs; + } - /* - * Grab a dispatch frame record for the frame we want to dispatch. - */ - DispatchFrame dframe = jobManager.getDispatchFrame(frame.getId()); - if (!lha.hasAdditionalResources(lha.getMaxCoreUnits(), dframe.getMinMemory(), - lha.getMaxGpuUnits(), dframe.minGpuMemory)) { - return procs; - } + VirtualProc proc = VirtualProc.build(host, dframe, lha); - VirtualProc proc = VirtualProc.build(host, dframe, lha); + /* + * Dispatch the frame. If a frame is booked, dispatchHost returns true, else if returns + * false. If the dispatch fails in a way that we should stop dispatching immediately (the + * host is down), a DispatcherException is thrown. + */ + if (dispatchHost(dframe, proc)) { + procs.add(proc); + } - /* - * Dispatch the frame. If a frame is booked, dispatchHost returns true, else if returns false. - * If the dispatch fails in a way that we should stop dispatching immediately (the host is - * down), a DispatcherException is thrown. - */ - if (dispatchHost(dframe, proc)) { - procs.add(proc); + if (procs.size() == 0) { + bookingManager.removeInactiveLocalHostAssignment(lha); + } + + return procs; } - if (procs.size() == 0) { - bookingManager.removeInactiveLocalHostAssignment(lha); + public List dispatchHost(DispatchHost host, FrameInterface frame) { + /* + * Load up the local assignment. If one doesn't exist, that means the user has removed it + * and no booking action should be taken. + */ + + LocalHostAssignment lha = + bookingManager.getLocalHostAssignment(host.getHostId(), frame.getJobId()); + prepHost(host, lha); + + return dispatchHost(host, frame, lha); } - return procs; - } + @Override + public void dispatchProcToJob(VirtualProc proc, JobInterface job) { - public List dispatchHost(DispatchHost host, FrameInterface frame) { - /* - * Load up the local assignment. If one doesn't exist, that means the user has removed it and no - * booking action should be taken. - */ + LocalHostAssignment lha = null; + proc.isLocalDispatch = true; - LocalHostAssignment lha = - bookingManager.getLocalHostAssignment(host.getHostId(), frame.getJobId()); - prepHost(host, lha); + try { + lha = bookingManager.getLocalHostAssignment(proc.getHostId(), job.getJobId()); + } catch (EmptyResultDataAccessException e) { + logger.warn("Unable to find local host assignment for " + proc); + dispatchSupport.unbookProc(proc); + return; + } - return dispatchHost(host, frame, lha); - } + List frames = null; + switch (lha.getType()) { + case JOB_PARTITION: + frames = dispatchSupport.findNextDispatchFrames(job, proc, MAX_QUERY_FRAMES); + if (frames.size() == 0) { + dispatchSupport.unbookProc(proc); + dispatchHost(hostManager.getDispatchHost(proc.getHostId()), job); + return; + } - @Override - public void dispatchProcToJob(VirtualProc proc, JobInterface job) { + break; - LocalHostAssignment lha = null; - proc.isLocalDispatch = true; + case LAYER_PARTITION: + frames = dispatchSupport.findNextDispatchFrames( + jobManager.getLayer(proc.getLayerId()), proc, MAX_QUERY_FRAMES); + break; - try { - lha = bookingManager.getLocalHostAssignment(proc.getHostId(), job.getJobId()); - } catch (EmptyResultDataAccessException e) { - logger.warn("Unable to find local host assignment for " + proc); - dispatchSupport.unbookProc(proc); - return; - } + case FRAME_PARTITION: - List frames = null; - switch (lha.getType()) { - case JOB_PARTITION: - frames = dispatchSupport.findNextDispatchFrames(job, proc, MAX_QUERY_FRAMES); - if (frames.size() == 0) { - dispatchSupport.unbookProc(proc); - dispatchHost(hostManager.getDispatchHost(proc.getHostId()), job); - return; - } + DispatchFrame dispatchFrame = jobManager.getDispatchFrame(lha.getFrameId()); + frames = new ArrayList(1); - break; + if (dispatchFrame.state.equals(FrameState.WAITING)) { + frames.add(dispatchFrame); + } + break; - case LAYER_PARTITION: - frames = dispatchSupport.findNextDispatchFrames(jobManager.getLayer(proc.getLayerId()), - proc, MAX_QUERY_FRAMES); - break; + default: + throw new DispatcherException("Invalid local host assignment: " + lha.getType()); - case FRAME_PARTITION: + } - DispatchFrame dispatchFrame = jobManager.getDispatchFrame(lha.getFrameId()); - frames = new ArrayList(1); + logger.info("Frames found: " + frames.size() + " for host " + proc + " " + + proc.coresReserved + "/" + proc.memoryReserved + " on job " + job.getName()); - if (dispatchFrame.state.equals(FrameState.WAITING)) { - frames.add(dispatchFrame); + for (DispatchFrame frame : frames) { + if (dispatchProc(frame, proc)) { + return; + } } - break; - default: - throw new DispatcherException("Invalid local host assignment: " + lha.getType()); + dispatchSupport.unbookProc(proc); + } + /** + * Copy the local host assignment into the DispatchHost + * + * @param host + * @param lha + */ + private void prepHost(DispatchHost host, LocalHostAssignment lha) { + host.isLocalDispatch = true; + host.idleCores = lha.getIdleCoreUnits(); + host.idleMemory = lha.getIdleMemory(); + host.idleGpus = lha.getIdleGpuUnits(); + host.idleGpuMemory = lha.getIdleGpuMemory(); + } + + @Override + public List dispatchHost(DispatchHost host, ShowInterface show) { + throw new RuntimeException("not implemented"); } - logger.info("Frames found: " + frames.size() + " for host " + proc + " " + proc.coresReserved - + "/" + proc.memoryReserved + " on job " + job.getName()); + @Override + public List dispatchHost(DispatchHost host, GroupInterface g) { + throw new RuntimeException("not implemented"); + } - for (DispatchFrame frame : frames) { - if (dispatchProc(frame, proc)) { - return; - } + public JobManager getJobManager() { + return jobManager; } - dispatchSupport.unbookProc(proc); - } - - /** - * Copy the local host assignment into the DispatchHost - * - * @param host - * @param lha - */ - private void prepHost(DispatchHost host, LocalHostAssignment lha) { - host.isLocalDispatch = true; - host.idleCores = lha.getIdleCoreUnits(); - host.idleMemory = lha.getIdleMemory(); - host.idleGpus = lha.getIdleGpuUnits(); - host.idleGpuMemory = lha.getIdleGpuMemory(); - } - - @Override - public List dispatchHost(DispatchHost host, ShowInterface show) { - throw new RuntimeException("not implemented"); - } - - @Override - public List dispatchHost(DispatchHost host, GroupInterface g) { - throw new RuntimeException("not implemented"); - } - - public JobManager getJobManager() { - return jobManager; - } - - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } - - public BookingManager getBookingManager() { - return bookingManager; - } - - public void setBookingManager(BookingManager bookingManager) { - this.bookingManager = bookingManager; - } - - public HostManager getHostManager() { - return hostManager; - } - - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; - } + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public BookingManager getBookingManager() { + return bookingManager; + } + + public void setBookingManager(BookingManager bookingManager) { + this.bookingManager = bookingManager; + } + + public HostManager getHostManager() { + return hostManager; + } + + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueHealthCheck.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueHealthCheck.java index ecb7f4c88..a2df5dede 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueHealthCheck.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueHealthCheck.java @@ -1,7 +1,7 @@ package com.imageworks.spcue.dispatcher; public interface QueueHealthCheck { - boolean isHealthy(); + boolean isHealthy(); - void shutdownUnhealthy(); + void shutdownUnhealthy(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueRejectCounter.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueRejectCounter.java index 6b3757c01..a359271c6 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueRejectCounter.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueRejectCounter.java @@ -21,19 +21,19 @@ public class QueueRejectCounter implements RejectedExecutionHandler { - private AtomicLong rejectCounter = new AtomicLong(0); + private AtomicLong rejectCounter = new AtomicLong(0); - @Override - public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { - rejectCounter.getAndIncrement(); - } + @Override + public void rejectedExecution(Runnable r, ThreadPoolExecutor executor) { + rejectCounter.getAndIncrement(); + } - public long getRejectCount() { - return rejectCounter.get(); - } + public long getRejectCount() { + return rejectCounter.get(); + } - public void clear() { - rejectCounter.set(0); - } + public void clear() { + rejectCounter.set(0); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RedirectManager.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RedirectManager.java index 5a7b9b8c7..7fe75c151 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RedirectManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RedirectManager.java @@ -46,372 +46,375 @@ public class RedirectManager { - private static final Logger logger = LogManager.getLogger(RedirectManager.class); - - private JobDao jobDao; - private ProcDao procDao; - private GroupDao groupDao; - private Dispatcher dispatcher; - private BookingQueue bookingQueue; - private HostManager hostManager; - private JobManagerSupport jobManagerSupport; - private DispatchSupport dispatchSupport; - private RedirectService redirectService; - private ProcSearchFactory procSearchFactory; - private Environment env; - - @Autowired - public RedirectManager(RedirectService redirectService, Environment env) { - this.env = env; - this.redirectService = redirectService; - } - - /** - * Delete all redirects that are past expiration age. - * - * @return count of redirects deleted - */ - public int deleteExpired() { - return redirectService.deleteExpired(); - } - - /** - * Remove a redirect for a specific proc. - * - * @param proc - */ - public boolean removeRedirect(ProcInterface proc) { - procDao.setRedirectTarget(proc, null); - return redirectService.remove(proc.getProcId()) != null; - } - - /** - * Return true if a redirect for a specific Proc exists. False if it does not. - * - * @param proc - * @return - */ - public boolean hasRedirect(ProcInterface proc) { - return redirectService.containsKey(proc.getProcId()); - } - - /** - * Redirects procs found by the ProcSearchCriteria to the specified group. - * - * @param criteria - * @param group - * @param kill - * @param source - * @return - */ - public List addRedirect(ProcSearchCriteria criteria, GroupInterface group, - boolean kill, Source source) { - - List groups = new ArrayList(1); - groups.add(group); - - ProcSearchInterface search = procSearchFactory.create(criteria); - search.sortByBookedTime(); - search.notGroups(groups); - - List procs = hostManager.findBookedVirtualProcs(search); - if (procs.size() == 0) { - return procs; + private static final Logger logger = LogManager.getLogger(RedirectManager.class); + + private JobDao jobDao; + private ProcDao procDao; + private GroupDao groupDao; + private Dispatcher dispatcher; + private BookingQueue bookingQueue; + private HostManager hostManager; + private JobManagerSupport jobManagerSupport; + private DispatchSupport dispatchSupport; + private RedirectService redirectService; + private ProcSearchFactory procSearchFactory; + private Environment env; + + @Autowired + public RedirectManager(RedirectService redirectService, Environment env) { + this.env = env; + this.redirectService = redirectService; } - for (VirtualProc proc : procs) { - logger.info("Adding redirect from " + proc + " to group " + group.getName()); - - Redirect r = new Redirect(group); - if (procDao.setRedirectTarget(proc, r)) { - redirectService.put(proc.getProcId(), r); - } else { - procs.remove(proc); - } + /** + * Delete all redirects that are past expiration age. + * + * @return count of redirects deleted + */ + public int deleteExpired() { + return redirectService.deleteExpired(); } - if (kill) { - jobManagerSupport.kill(procs, source); + /** + * Remove a redirect for a specific proc. + * + * @param proc + */ + public boolean removeRedirect(ProcInterface proc) { + procDao.setRedirectTarget(proc, null); + return redirectService.remove(proc.getProcId()) != null; } - return procs; - } - - /** - * Redirects procs found by the proc search criteria to an array of jobs. - * - * @param criteria - * @param jobs - * @param kill - * @param source - * @return - */ - public List addRedirect(ProcSearchCriteria criteria, List jobs, - boolean kill, Source source) { - int index = 0; - - ProcSearchInterface procSearch = procSearchFactory.create(criteria); - procSearch.notJobs(jobs); - List procs = hostManager.findBookedVirtualProcs(procSearch); - if (procs.size() == 0) { - return procs; + /** + * Return true if a redirect for a specific Proc exists. False if it does not. + * + * @param proc + * @return + */ + public boolean hasRedirect(ProcInterface proc) { + return redirectService.containsKey(proc.getProcId()); } - for (VirtualProc proc : procs) { - if (index >= jobs.size()) { - index = 0; - } - - logger.info("Adding redirect from " + proc + " to job " + jobs.get(index).getName()); - - Redirect r = new Redirect(jobs.get(index)); - if (procDao.setRedirectTarget(proc, r)) { - redirectService.put(proc.getProcId(), r); - index++; - } else { - procs.remove(proc); - } - } - if (kill) { - jobManagerSupport.kill(procs, source); + /** + * Redirects procs found by the ProcSearchCriteria to the specified group. + * + * @param criteria + * @param group + * @param kill + * @param source + * @return + */ + public List addRedirect(ProcSearchCriteria criteria, GroupInterface group, + boolean kill, Source source) { + + List groups = new ArrayList(1); + groups.add(group); + + ProcSearchInterface search = procSearchFactory.create(criteria); + search.sortByBookedTime(); + search.notGroups(groups); + + List procs = hostManager.findBookedVirtualProcs(search); + if (procs.size() == 0) { + return procs; + } + + for (VirtualProc proc : procs) { + logger.info("Adding redirect from " + proc + " to group " + group.getName()); + + Redirect r = new Redirect(group); + if (procDao.setRedirectTarget(proc, r)) { + redirectService.put(proc.getProcId(), r); + } else { + procs.remove(proc); + } + } + + if (kill) { + jobManagerSupport.kill(procs, source); + } + + return procs; } - return procs; - } - - /** - * Redirect a list of procs to the specified job. Using redirect counters, the redirect only - * happens one all procs have reported in. This gives users the ability to kill multiple frames - * and open up large amounts of memory and cores. - * - * @param procs - * @param job - * @param source - * @return true if the redirect succeeds. - */ - public boolean addRedirect(List procs, JobInterface job, Source source) { - - String redirectGroupId = SqlUtil.genKeyRandom(); - - for (VirtualProc proc : procs) { - Redirect r = new Redirect(redirectGroupId, job); - if (procDao.setRedirectTarget(proc, r)) { - redirectService.put(proc.getProcId(), r); - } + /** + * Redirects procs found by the proc search criteria to an array of jobs. + * + * @param criteria + * @param jobs + * @param kill + * @param source + * @return + */ + public List addRedirect(ProcSearchCriteria criteria, List jobs, + boolean kill, Source source) { + int index = 0; + + ProcSearchInterface procSearch = procSearchFactory.create(criteria); + procSearch.notJobs(jobs); + List procs = hostManager.findBookedVirtualProcs(procSearch); + if (procs.size() == 0) { + return procs; + } + + for (VirtualProc proc : procs) { + if (index >= jobs.size()) { + index = 0; + } + + logger.info("Adding redirect from " + proc + " to job " + jobs.get(index).getName()); + + Redirect r = new Redirect(jobs.get(index)); + if (procDao.setRedirectTarget(proc, r)) { + redirectService.put(proc.getProcId(), r); + index++; + } else { + procs.remove(proc); + } + } + if (kill) { + jobManagerSupport.kill(procs, source); + } + + return procs; } - for (VirtualProc proc : procs) { - jobManagerSupport.kill(proc, source); + /** + * Redirect a list of procs to the specified job. Using redirect counters, the redirect only + * happens one all procs have reported in. This gives users the ability to kill multiple frames + * and open up large amounts of memory and cores. + * + * @param procs + * @param job + * @param source + * @return true if the redirect succeeds. + */ + public boolean addRedirect(List procs, JobInterface job, Source source) { + + String redirectGroupId = SqlUtil.genKeyRandom(); + + for (VirtualProc proc : procs) { + Redirect r = new Redirect(redirectGroupId, job); + if (procDao.setRedirectTarget(proc, r)) { + redirectService.put(proc.getProcId(), r); + } + } + + for (VirtualProc proc : procs) { + jobManagerSupport.kill(proc, source); + } + + return true; } - return true; - } - - /** - * Redirect a proc to the specified job. - * - * @param proc - * @param job - * @param kill - * @param source - * @return true if the redirect succeeds. - */ - public boolean addRedirect(VirtualProc proc, JobInterface job, boolean kill, Source source) { - - if (dispatchSupport.findNextDispatchFrames(job, proc, 1).size() < 1) { - return false; - } - - Redirect r = new Redirect(job); - if (procDao.setRedirectTarget(proc, r)) { - redirectService.put(proc.getProcId(), r); - if (kill) { - jobManagerSupport.kill(proc, source); - } - return true; - } - - return false; - } - - /** - * Redirect a proc to the specified group. - * - * @param proc - * @param group - * @param kill - * @param source - * @return true if the redirect succeeds. - */ - public boolean addRedirect(VirtualProc proc, GroupInterface group, boolean kill, Source source) { - - // Test a dispatch - DispatchHost host = hostManager.getDispatchHost(proc.getHostId()); - host.idleCores = proc.coresReserved; - host.idleMemory = proc.memoryReserved; - host.idleGpus = proc.gpusReserved; - host.idleGpuMemory = proc.gpuMemoryReserved; - - if (dispatchSupport.findDispatchJobs(host, group).size() < 1) { - logger.info("Failed to find a pending job in group: " + group.getName()); - return false; - } + /** + * Redirect a proc to the specified job. + * + * @param proc + * @param job + * @param kill + * @param source + * @return true if the redirect succeeds. + */ + public boolean addRedirect(VirtualProc proc, JobInterface job, boolean kill, Source source) { + + if (dispatchSupport.findNextDispatchFrames(job, proc, 1).size() < 1) { + return false; + } + + Redirect r = new Redirect(job); + if (procDao.setRedirectTarget(proc, r)) { + redirectService.put(proc.getProcId(), r); + if (kill) { + jobManagerSupport.kill(proc, source); + } + return true; + } - Redirect r = new Redirect(group); - if (procDao.setRedirectTarget(proc, r)) { - redirectService.put(proc.getProcId(), r); - if (kill) { - jobManagerSupport.kill(proc, source); - } - return true; + return false; } - return false; - } - - /** - * Redirect the specified proc to its redirect destination; - * - * @param proc - * @return - */ - public boolean redirect(VirtualProc proc) { + /** + * Redirect a proc to the specified group. + * + * @param proc + * @param group + * @param kill + * @param source + * @return true if the redirect succeeds. + */ + public boolean addRedirect(VirtualProc proc, GroupInterface group, boolean kill, + Source source) { + + // Test a dispatch + DispatchHost host = hostManager.getDispatchHost(proc.getHostId()); + host.idleCores = proc.coresReserved; + host.idleMemory = proc.memoryReserved; + host.idleGpus = proc.gpusReserved; + host.idleGpuMemory = proc.gpuMemoryReserved; + + if (dispatchSupport.findDispatchJobs(host, group).size() < 1) { + logger.info("Failed to find a pending job in group: " + group.getName()); + return false; + } + + Redirect r = new Redirect(group); + if (procDao.setRedirectTarget(proc, r)) { + redirectService.put(proc.getProcId(), r); + if (kill) { + jobManagerSupport.kill(proc, source); + } + return true; + } - try { - - Redirect r = redirectService.remove(proc.getProcId()); - if (r == null) { - logger.info("Failed to find redirect for proc " + proc); return false; - } - - int other_redirects_with_same_group = redirectService.countRedirectsWithGroup(r.getGroupId()); + } - if (other_redirects_with_same_group > 0) { - logger.warn("Redirect waiting on " + other_redirects_with_same_group + " more frames."); - return false; - } - - /* - * The proc must be unbooked before its resources can be redirected. - */ - dispatchSupport.unbookProc(proc, "is being redirected"); - - /* - * Set the free cores and memory to the exact amount on the proc we just unbooked so we don't - * stomp on other redirects. - */ - DispatchHost host = hostManager.getDispatchHost(proc.getHostId()); - - switch (r.getType()) { - - case JOB_REDIRECT: - logger.info("attempting a job redirect to " + r.getDestinationId()); - JobInterface job = jobDao.getJob(r.getDestinationId()); - logger.info("redirecting proc " + proc + " to job " + job.getName()); - - if (dispatcher.isTestMode()) { - dispatcher.dispatchHost(host, job); - } else { - bookingQueue.execute(new DispatchBookHost(host, job, dispatcher, env)); - } - return true; - - case GROUP_REDIRECT: - logger.info("attempting a group redirect to " + r.getDestinationId()); - GroupInterface group = groupDao.getGroup(r.getDestinationId()); - logger.info("redirecting group " + proc + " to job " + group.getName()); - - if (dispatcher.isTestMode()) { - dispatcher.dispatchHost(host, group); - } else { - bookingQueue.execute(new DispatchBookHost(host, group, dispatcher, env)); - } - return true; - - default: - logger.info("redirect failed, invalid redirect type: " + r.getType()); - return false; - } - - } catch (Exception e) { - /* - * If anything fails the redirect fails, so just return false after logging. - */ - CueExceptionUtil.logStackTrace("redirect failed", e); - return false; + /** + * Redirect the specified proc to its redirect destination; + * + * @param proc + * @return + */ + public boolean redirect(VirtualProc proc) { + + try { + + Redirect r = redirectService.remove(proc.getProcId()); + if (r == null) { + logger.info("Failed to find redirect for proc " + proc); + return false; + } + + int other_redirects_with_same_group = + redirectService.countRedirectsWithGroup(r.getGroupId()); + + if (other_redirects_with_same_group > 0) { + logger.warn( + "Redirect waiting on " + other_redirects_with_same_group + " more frames."); + return false; + } + + /* + * The proc must be unbooked before its resources can be redirected. + */ + dispatchSupport.unbookProc(proc, "is being redirected"); + + /* + * Set the free cores and memory to the exact amount on the proc we just unbooked so we + * don't stomp on other redirects. + */ + DispatchHost host = hostManager.getDispatchHost(proc.getHostId()); + + switch (r.getType()) { + + case JOB_REDIRECT: + logger.info("attempting a job redirect to " + r.getDestinationId()); + JobInterface job = jobDao.getJob(r.getDestinationId()); + logger.info("redirecting proc " + proc + " to job " + job.getName()); + + if (dispatcher.isTestMode()) { + dispatcher.dispatchHost(host, job); + } else { + bookingQueue.execute(new DispatchBookHost(host, job, dispatcher, env)); + } + return true; + + case GROUP_REDIRECT: + logger.info("attempting a group redirect to " + r.getDestinationId()); + GroupInterface group = groupDao.getGroup(r.getDestinationId()); + logger.info("redirecting group " + proc + " to job " + group.getName()); + + if (dispatcher.isTestMode()) { + dispatcher.dispatchHost(host, group); + } else { + bookingQueue.execute(new DispatchBookHost(host, group, dispatcher, env)); + } + return true; + + default: + logger.info("redirect failed, invalid redirect type: " + r.getType()); + return false; + } + + } catch (Exception e) { + /* + * If anything fails the redirect fails, so just return false after logging. + */ + CueExceptionUtil.logStackTrace("redirect failed", e); + return false; + } } - } - public JobDao getJobDao() { - return jobDao; - } + public JobDao getJobDao() { + return jobDao; + } - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } - public GroupDao getGroupDao() { - return groupDao; - } + public GroupDao getGroupDao() { + return groupDao; + } - public void setGroupDao(GroupDao groupDao) { - this.groupDao = groupDao; - } + public void setGroupDao(GroupDao groupDao) { + this.groupDao = groupDao; + } - public Dispatcher getDispatcher() { - return dispatcher; - } + public Dispatcher getDispatcher() { + return dispatcher; + } - public void setDispatcher(Dispatcher dispatcher) { - this.dispatcher = dispatcher; - } + public void setDispatcher(Dispatcher dispatcher) { + this.dispatcher = dispatcher; + } - public BookingQueue getBookingQueue() { - return bookingQueue; - } + public BookingQueue getBookingQueue() { + return bookingQueue; + } - public void setBookingQueue(BookingQueue bookingQueue) { - this.bookingQueue = bookingQueue; - } + public void setBookingQueue(BookingQueue bookingQueue) { + this.bookingQueue = bookingQueue; + } - public HostManager getHostManager() { - return hostManager; - } + public HostManager getHostManager() { + return hostManager; + } - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; - } + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; + } - public JobManagerSupport getJobManagerSupport() { - return jobManagerSupport; - } + public JobManagerSupport getJobManagerSupport() { + return jobManagerSupport; + } - public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { - this.jobManagerSupport = jobManagerSupport; - } + public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { + this.jobManagerSupport = jobManagerSupport; + } - public DispatchSupport getDispatchSupport() { - return dispatchSupport; - } + public DispatchSupport getDispatchSupport() { + return dispatchSupport; + } - public void setDispatchSupport(DispatchSupport dispatchSupport) { - this.dispatchSupport = dispatchSupport; - } + public void setDispatchSupport(DispatchSupport dispatchSupport) { + this.dispatchSupport = dispatchSupport; + } - public ProcDao getProcDao() { - return procDao; - } + public ProcDao getProcDao() { + return procDao; + } - public void setProcDao(ProcDao procDao) { - this.procDao = procDao; - } + public void setProcDao(ProcDao procDao) { + this.procDao = procDao; + } - public ProcSearchFactory getProcSearchFactory() { - return procSearchFactory; - } + public ProcSearchFactory getProcSearchFactory() { + return procSearchFactory; + } - public void setProcSearchFactory(ProcSearchFactory procSearchFactory) { - this.procSearchFactory = procSearchFactory; - } + public void setProcSearchFactory(ProcSearchFactory procSearchFactory) { + this.procSearchFactory = procSearchFactory; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceContainer.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceContainer.java index 879dad3fb..6910315ea 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceContainer.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceContainer.java @@ -17,26 +17,26 @@ public interface ResourceContainer { - /** - * Return true if the container can handle the given resource amounts. False if not. - * - * @param minCores - * @param minMemory - * @param minGpus - * @param minGpuMemory - * @return - */ - public boolean hasAdditionalResources(int minCores, long minMemory, int minGpus, - long minGpuMemory); + /** + * Return true if the container can handle the given resource amounts. False if not. + * + * @param minCores + * @param minMemory + * @param minGpus + * @param minGpuMemory + * @return + */ + public boolean hasAdditionalResources(int minCores, long minMemory, int minGpus, + long minGpuMemory); - /** - * Subtract the given resources from the grand totals. - * - * @param coreUnits - * @param memory - * @param gpuUnits - * @param gpuMemory - */ - public void useResources(int coreUnits, long memory, int gpuUnits, long gpuMemory); + /** + * Subtract the given resources from the grand totals. + * + * @param coreUnits + * @param memory + * @param gpuUnits + * @param gpuMemory + */ + public void useResources(int coreUnits, long memory, int gpuUnits, long gpuMemory); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceCreationFailureException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceCreationFailureException.java index b83f3f1df..18d5859c7 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceCreationFailureException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceCreationFailureException.java @@ -20,23 +20,23 @@ @SuppressWarnings("serial") public class ResourceCreationFailureException extends SpcueRuntimeException { - public ResourceCreationFailureException() { - // TODO Auto-generated constructor stub - } - - public ResourceCreationFailureException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } - - public ResourceCreationFailureException(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public ResourceCreationFailureException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public ResourceCreationFailureException() { + // TODO Auto-generated constructor stub + } + + public ResourceCreationFailureException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + + public ResourceCreationFailureException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public ResourceCreationFailureException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceDuplicationFailureException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceDuplicationFailureException.java index 4b502f021..8faf06b20 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceDuplicationFailureException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceDuplicationFailureException.java @@ -23,23 +23,23 @@ @SuppressWarnings("serial") public class ResourceDuplicationFailureException extends SpcueRuntimeException { - public ResourceDuplicationFailureException() { - // TODO Auto-generated constructor stub - } - - public ResourceDuplicationFailureException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } - - public ResourceDuplicationFailureException(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public ResourceDuplicationFailureException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public ResourceDuplicationFailureException() { + // TODO Auto-generated constructor stub + } + + public ResourceDuplicationFailureException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + + public ResourceDuplicationFailureException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public ResourceDuplicationFailureException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceReleaseFailureException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceReleaseFailureException.java index aa2cb5212..a025d65ae 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceReleaseFailureException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceReleaseFailureException.java @@ -20,23 +20,23 @@ @SuppressWarnings("serial") public class ResourceReleaseFailureException extends SpcueRuntimeException { - public ResourceReleaseFailureException() { - // TODO Auto-generated constructor stub - } - - public ResourceReleaseFailureException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } - - public ResourceReleaseFailureException(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public ResourceReleaseFailureException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public ResourceReleaseFailureException() { + // TODO Auto-generated constructor stub + } + + public ResourceReleaseFailureException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + + public ResourceReleaseFailureException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public ResourceReleaseFailureException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceReservationFailureException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceReservationFailureException.java index 37f0fdf21..c0404b6a4 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceReservationFailureException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceReservationFailureException.java @@ -20,23 +20,23 @@ @SuppressWarnings("serial") public class ResourceReservationFailureException extends SpcueRuntimeException { - public ResourceReservationFailureException() { - // TODO Auto-generated constructor stub - } - - public ResourceReservationFailureException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } - - public ResourceReservationFailureException(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public ResourceReservationFailureException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public ResourceReservationFailureException() { + // TODO Auto-generated constructor stub + } + + public ResourceReservationFailureException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + + public ResourceReservationFailureException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public ResourceReservationFailureException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RqdRetryReportException.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RqdRetryReportException.java index d8e3992bb..35a0b2ae4 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RqdRetryReportException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RqdRetryReportException.java @@ -20,23 +20,23 @@ @SuppressWarnings("serial") public class RqdRetryReportException extends SpcueRuntimeException { - public RqdRetryReportException() { - // TODO Auto-generated constructor stub - } - - public RqdRetryReportException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } - - public RqdRetryReportException(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public RqdRetryReportException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public RqdRetryReportException() { + // TODO Auto-generated constructor stub + } + + public RqdRetryReportException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + + public RqdRetryReportException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public RqdRetryReportException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHost.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHost.java index 58cf4c568..c7a6802b2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHost.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHost.java @@ -31,92 +31,93 @@ */ public class DispatchBookHost extends KeyRunnable { - private Environment env; - private ShowInterface show = null; - private GroupInterface group = null; - private JobInterface job = null; - private DispatchHost host; - private Dispatcher dispatcher; - - public DispatchHost getDispatchHost() { - this.setKey(host.getId()); - return host; - } - - public DispatchBookHost(DispatchHost host, Dispatcher d, Environment env) { - super(host.getId()); - this.host = host; - this.dispatcher = d; - this.env = env; - } - - public DispatchBookHost(DispatchHost host, JobInterface job, Dispatcher d, Environment env) { - super(host.getId() + "_job_" + job.getJobId()); - this.host = host; - this.job = job; - this.dispatcher = d; - this.env = env; - } - - public DispatchBookHost(DispatchHost host, GroupInterface group, Dispatcher d, Environment env) { - super(host.getId() + "_group_" + group.getGroupId()); - this.host = host; - this.group = group; - this.dispatcher = d; - this.env = env; - } - - public DispatchBookHost(DispatchHost host, ShowInterface show, Dispatcher d, Environment env) { - super(host.getId() + "_name_" + show.getName()); - this.host = host; - this.show = show; - this.dispatcher = d; - this.env = env; - } - - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - if (show != null) { - dispatcher.dispatchHost(host, show); - } else if (group != null) { - dispatcher.dispatchHost(host, group); - } else if (job != null) { - dispatcher.dispatchHost(host, job); - } - long memReservedMin = - env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); - long memGpuReservedMin = - env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_min", Long.class); - - // Try to book any remaining resources - if (host.hasAdditionalResources(Dispatcher.CORE_POINTS_RESERVED_MIN, memReservedMin, - Dispatcher.GPU_UNITS_RESERVED_MIN, memGpuReservedMin)) { - dispatcher.dispatchHost(host); - } + private Environment env; + private ShowInterface show = null; + private GroupInterface group = null; + private JobInterface job = null; + private DispatchHost host; + private Dispatcher dispatcher; + + public DispatchHost getDispatchHost() { + this.setKey(host.getId()); + return host; + } - if (host.hasAdditionalResources(Dispatcher.CORE_POINTS_RESERVED_MIN, memReservedMin, - Dispatcher.GPU_UNITS_RESERVED_MIN, memGpuReservedMin)) { - dispatcher.dispatchHostToAllShows(host); - } - } - }.execute(); - } - - @Override - public int hashCode() { - return host.name.hashCode(); - }; - - @Override - public boolean equals(Object other) { - if (other == null) { - return false; + public DispatchBookHost(DispatchHost host, Dispatcher d, Environment env) { + super(host.getId()); + this.host = host; + this.dispatcher = d; + this.env = env; } - if (this.getClass() != other.getClass()) { - return false; + + public DispatchBookHost(DispatchHost host, JobInterface job, Dispatcher d, Environment env) { + super(host.getId() + "_job_" + job.getJobId()); + this.host = host; + this.job = job; + this.dispatcher = d; + this.env = env; + } + + public DispatchBookHost(DispatchHost host, GroupInterface group, Dispatcher d, + Environment env) { + super(host.getId() + "_group_" + group.getGroupId()); + this.host = host; + this.group = group; + this.dispatcher = d; + this.env = env; } - DispatchBookHost that = (DispatchBookHost) other; - return that.host.name.equals(host.name); - }; + + public DispatchBookHost(DispatchHost host, ShowInterface show, Dispatcher d, Environment env) { + super(host.getId() + "_name_" + show.getName()); + this.host = host; + this.show = show; + this.dispatcher = d; + this.env = env; + } + + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + if (show != null) { + dispatcher.dispatchHost(host, show); + } else if (group != null) { + dispatcher.dispatchHost(host, group); + } else if (job != null) { + dispatcher.dispatchHost(host, job); + } + long memReservedMin = + env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); + long memGpuReservedMin = env + .getRequiredProperty("dispatcher.memory.mem_gpu_reserved_min", Long.class); + + // Try to book any remaining resources + if (host.hasAdditionalResources(Dispatcher.CORE_POINTS_RESERVED_MIN, memReservedMin, + Dispatcher.GPU_UNITS_RESERVED_MIN, memGpuReservedMin)) { + dispatcher.dispatchHost(host); + } + + if (host.hasAdditionalResources(Dispatcher.CORE_POINTS_RESERVED_MIN, memReservedMin, + Dispatcher.GPU_UNITS_RESERVED_MIN, memGpuReservedMin)) { + dispatcher.dispatchHostToAllShows(host); + } + } + }.execute(); + } + + @Override + public int hashCode() { + return host.name.hashCode(); + }; + + @Override + public boolean equals(Object other) { + if (other == null) { + return false; + } + if (this.getClass() != other.getClass()) { + return false; + } + DispatchBookHost that = (DispatchBookHost) other; + return that.host.name.equals(host.name); + }; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHostLocal.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHostLocal.java index 00ec6979c..5b2d3d0ae 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHostLocal.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHostLocal.java @@ -20,22 +20,22 @@ public class DispatchBookHostLocal extends KeyRunnable { - private DispatchHost host; - private Dispatcher dispatcher; - - public DispatchBookHostLocal(DispatchHost host, Dispatcher d) { - super(host.getId()); - - this.host = host; - this.dispatcher = d; - } - - @Override - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - dispatcher.dispatchHost(host); - } - }.execute(); - } + private DispatchHost host; + private Dispatcher dispatcher; + + public DispatchBookHostLocal(DispatchHost host, Dispatcher d) { + super(host.getId()); + + this.host = host; + this.dispatcher = d; + } + + @Override + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + dispatcher.dispatchHost(host); + } + }.execute(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchCommandTemplate.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchCommandTemplate.java index b0406348e..1a78c1a17 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchCommandTemplate.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchCommandTemplate.java @@ -24,14 +24,15 @@ */ public abstract class DispatchCommandTemplate { - public abstract void wrapDispatchCommand(); + public abstract void wrapDispatchCommand(); - public void execute() { - try { - wrapDispatchCommand(); - } catch (java.lang.Throwable t) { - CueExceptionUtil.logStackTrace( - "Dispatch command template " + this.getClass().toString() + " caught error ", t); + public void execute() { + try { + wrapDispatchCommand(); + } catch (java.lang.Throwable t) { + CueExceptionUtil.logStackTrace( + "Dispatch command template " + this.getClass().toString() + " caught error ", + t); + } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchDropDepends.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchDropDepends.java index 3e9b698b4..e735803aa 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchDropDepends.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchDropDepends.java @@ -29,54 +29,57 @@ */ public class DispatchDropDepends extends KeyRunnable { - JobInterface job; - LayerInterface layer; - FrameInterface frame; + JobInterface job; + LayerInterface layer; + FrameInterface frame; - DependTarget target; - DependManager dependManager; + DependTarget target; + DependManager dependManager; - public DispatchDropDepends(JobInterface job, DependTarget target, DependManager dependManager) { - super("disp_drop_dep_job_" + job.getJobId() + "_" + target.toString()); - this.job = job; - this.target = target; - this.dependManager = dependManager; - } + public DispatchDropDepends(JobInterface job, DependTarget target, DependManager dependManager) { + super("disp_drop_dep_job_" + job.getJobId() + "_" + target.toString()); + this.job = job; + this.target = target; + this.dependManager = dependManager; + } - public DispatchDropDepends(LayerInterface layer, DependTarget target, - DependManager dependManager) { - super("disp_drop_dep_layer_" + layer.getLayerId() + "_" + target.toString()); - this.layer = layer; - this.target = target; - this.dependManager = dependManager; - } + public DispatchDropDepends(LayerInterface layer, DependTarget target, + DependManager dependManager) { + super("disp_drop_dep_layer_" + layer.getLayerId() + "_" + target.toString()); + this.layer = layer; + this.target = target; + this.dependManager = dependManager; + } - public DispatchDropDepends(FrameInterface frame, DependTarget target, - DependManager dependManager) { - super("disp_drop_dep_frame_" + frame.getFrameId() + "_" + target.toString()); - this.frame = frame; - this.target = target; - this.dependManager = dependManager; - } + public DispatchDropDepends(FrameInterface frame, DependTarget target, + DependManager dependManager) { + super("disp_drop_dep_frame_" + frame.getFrameId() + "_" + target.toString()); + this.frame = frame; + this.target = target; + this.dependManager = dependManager; + } - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - if (job != null) { - for (LightweightDependency d : dependManager.getWhatThisDependsOn(job, target)) { - dependManager.satisfyDepend(d); - } - } else if (layer != null) { - for (LightweightDependency d : dependManager.getWhatThisDependsOn(layer, target)) { - dependManager.satisfyDepend(d); - } - } else if (frame != null) { - for (LightweightDependency d : dependManager.getWhatThisDependsOn(frame, target)) { - dependManager.satisfyDepend(d); - } - } + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + if (job != null) { + for (LightweightDependency d : dependManager.getWhatThisDependsOn(job, + target)) { + dependManager.satisfyDepend(d); + } + } else if (layer != null) { + for (LightweightDependency d : dependManager.getWhatThisDependsOn(layer, + target)) { + dependManager.satisfyDepend(d); + } + } else if (frame != null) { + for (LightweightDependency d : dependManager.getWhatThisDependsOn(frame, + target)) { + dependManager.satisfyDepend(d); + } + } - } - }.execute(); - } + } + }.execute(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchEatFrames.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchEatFrames.java index 82108912f..49573d354 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchEatFrames.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchEatFrames.java @@ -26,23 +26,23 @@ */ public class DispatchEatFrames extends KeyRunnable { - private FrameSearchInterface search; - private Source source; - private JobManagerSupport jobManagerSupport; + private FrameSearchInterface search; + private Source source; + private JobManagerSupport jobManagerSupport; - public DispatchEatFrames(FrameSearchInterface search, Source source, - JobManagerSupport jobManagerSupport) { - super("disp_eat_frames_job_" + search.hashCode() + "_" + jobManagerSupport.hashCode()); - this.search = search; - this.source = source; - this.jobManagerSupport = jobManagerSupport; - } + public DispatchEatFrames(FrameSearchInterface search, Source source, + JobManagerSupport jobManagerSupport) { + super("disp_eat_frames_job_" + search.hashCode() + "_" + jobManagerSupport.hashCode()); + this.search = search; + this.source = source; + this.jobManagerSupport = jobManagerSupport; + } - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - jobManagerSupport.eatFrames(search, source); - } - }.execute(); - } + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + jobManagerSupport.eatFrames(search, source); + } + }.execute(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchHandleHostReport.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchHandleHostReport.java index 47c6553a6..0e57b9823 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchHandleHostReport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchHandleHostReport.java @@ -26,41 +26,41 @@ */ public class DispatchHandleHostReport extends KeyRunnable { - private HostReport hostReport; - private boolean isBootReport; - private HostReportHandler hostReportHandler; - public volatile int reportTime = (int) (System.currentTimeMillis() / 1000); + private HostReport hostReport; + private boolean isBootReport; + private HostReportHandler hostReportHandler; + public volatile int reportTime = (int) (System.currentTimeMillis() / 1000); - public DispatchHandleHostReport(HostReport report, HostReportHandler rqdReportManager) { - super("disp_handle_host_report_" + report.hashCode() + "_" + rqdReportManager.hashCode()); - this.hostReport = report; - this.isBootReport = false; - this.hostReportHandler = rqdReportManager; - } + public DispatchHandleHostReport(HostReport report, HostReportHandler rqdReportManager) { + super("disp_handle_host_report_" + report.hashCode() + "_" + rqdReportManager.hashCode()); + this.hostReport = report; + this.isBootReport = false; + this.hostReportHandler = rqdReportManager; + } - public DispatchHandleHostReport(BootReport report, HostReportHandler rqdReportManager) { - super("disp_handle_host_report_" + report.hashCode() + "_" + rqdReportManager.hashCode()); - HostReport hostReport = - HostReport.newBuilder().setHost(report.getHost()).setCoreInfo(report.getCoreInfo()).build(); + public DispatchHandleHostReport(BootReport report, HostReportHandler rqdReportManager) { + super("disp_handle_host_report_" + report.hashCode() + "_" + rqdReportManager.hashCode()); + HostReport hostReport = HostReport.newBuilder().setHost(report.getHost()) + .setCoreInfo(report.getCoreInfo()).build(); - this.hostReport = hostReport; - this.isBootReport = true; - this.hostReportHandler = rqdReportManager; - } + this.hostReport = hostReport; + this.isBootReport = true; + this.hostReportHandler = rqdReportManager; + } - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - hostReportHandler.handleHostReport(hostReport, isBootReport); - } - }.execute(); - } + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + hostReportHandler.handleHostReport(hostReport, isBootReport); + } + }.execute(); + } - public synchronized void updateReportTime() { - reportTime = (int) (System.currentTimeMillis() / 1000); - } + public synchronized void updateReportTime() { + reportTime = (int) (System.currentTimeMillis() / 1000); + } - public HostReport getHostReport() { - return hostReport; - } + public HostReport getHostReport() { + return hostReport; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchJobComplete.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchJobComplete.java index e3f7e1ffb..ea938249b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchJobComplete.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchJobComplete.java @@ -25,26 +25,26 @@ * @category command */ public class DispatchJobComplete extends KeyRunnable { - private JobInterface job; - private Source source; - private boolean isManualKill; + private JobInterface job; + private Source source; + private boolean isManualKill; - private JobManagerSupport jobManagerSupport; + private JobManagerSupport jobManagerSupport; - public DispatchJobComplete(JobInterface job, Source source, boolean isManualKill, - JobManagerSupport jobManagerSupport) { - super("disp_job_complete_" + job.getJobId() + "_" + source.toString()); - this.job = job; - this.source = source; - this.isManualKill = isManualKill; - this.jobManagerSupport = jobManagerSupport; - } + public DispatchJobComplete(JobInterface job, Source source, boolean isManualKill, + JobManagerSupport jobManagerSupport) { + super("disp_job_complete_" + job.getJobId() + "_" + source.toString()); + this.job = job; + this.source = source; + this.isManualKill = isManualKill; + this.jobManagerSupport = jobManagerSupport; + } - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - jobManagerSupport.shutdownJob(job, source, isManualKill); - } - }.execute(); - } + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + jobManagerSupport.shutdownJob(job, source, isManualKill); + } + }.execute(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillFrames.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillFrames.java index 2c2e97336..9ac87661e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillFrames.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillFrames.java @@ -26,23 +26,23 @@ */ public class DispatchKillFrames extends KeyRunnable { - private FrameSearchInterface search; - private JobManagerSupport jobManagerSupport; - private Source source; + private FrameSearchInterface search; + private JobManagerSupport jobManagerSupport; + private Source source; - public DispatchKillFrames(FrameSearchInterface search, Source source, - JobManagerSupport jobManagerSupport) { - super("disp_kill_frames_" + source.toString() + "_" + jobManagerSupport.hashCode()); - this.search = search; - this.source = source; - this.jobManagerSupport = jobManagerSupport; - } + public DispatchKillFrames(FrameSearchInterface search, Source source, + JobManagerSupport jobManagerSupport) { + super("disp_kill_frames_" + source.toString() + "_" + jobManagerSupport.hashCode()); + this.search = search; + this.source = source; + this.jobManagerSupport = jobManagerSupport; + } - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - jobManagerSupport.killProcs(search, source, true); - } - }.execute(); - } + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + jobManagerSupport.killProcs(search, source, true); + } + }.execute(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillProcs.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillProcs.java index 6176bc200..3505aa54e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillProcs.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillProcs.java @@ -21,26 +21,26 @@ import java.util.Collection; public class DispatchKillProcs extends KeyRunnable { - private Collection procs; - private JobManagerSupport jobManagerSupport; - private Source source; + private Collection procs; + private JobManagerSupport jobManagerSupport; + private Source source; - public DispatchKillProcs(Collection procs, Source source, - JobManagerSupport jobManagerSupport) { - super("disp_kill_procs_" + procs.hashCode() + "_" + source.toString() + "_" - + jobManagerSupport.hashCode()); - this.procs = procs; - this.source = source; - this.jobManagerSupport = jobManagerSupport; - } + public DispatchKillProcs(Collection procs, Source source, + JobManagerSupport jobManagerSupport) { + super("disp_kill_procs_" + procs.hashCode() + "_" + source.toString() + "_" + + jobManagerSupport.hashCode()); + this.procs = procs; + this.source = source; + this.jobManagerSupport = jobManagerSupport; + } - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - for (VirtualProc p : procs) { - jobManagerSupport.kill(p, source); - } - } - }.execute(); - } + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + for (VirtualProc p : procs) { + jobManagerSupport.kill(p, source); + } + } + }.execute(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchLaunchJob.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchLaunchJob.java index a88ce9b72..7b842961f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchLaunchJob.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchLaunchJob.java @@ -23,20 +23,20 @@ */ public class DispatchLaunchJob extends KeyRunnable { - private JobLauncher jobLauncher; - private JobSpec spec; + private JobLauncher jobLauncher; + private JobSpec spec; - public DispatchLaunchJob(JobSpec spec, JobLauncher jobLauncher) { - super("disp_launch_job_" + spec.getShow() + "_" + spec.getShot() + "_" + spec.getUid()); - this.spec = spec; - this.jobLauncher = jobLauncher; - } + public DispatchLaunchJob(JobSpec spec, JobLauncher jobLauncher) { + super("disp_launch_job_" + spec.getShow() + "_" + spec.getShot() + "_" + spec.getUid()); + this.spec = spec; + this.jobLauncher = jobLauncher; + } - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - jobLauncher.launch(spec); - } - }.execute(); - } + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + jobLauncher.launch(spec); + } + }.execute(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchMoveJobs.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchMoveJobs.java index 262338b32..9ed50c8be 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchMoveJobs.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchMoveJobs.java @@ -24,26 +24,26 @@ public class DispatchMoveJobs extends KeyRunnable { - private GroupDetail group; - private List jobs; - private GroupManager groupManager; - - public DispatchMoveJobs(GroupDetail group, List jobs, GroupManager groupManager) { - super("disp_move_jobs_" + group.getGroupId() + "_dept_" + group.getDepartmentId() + "_show_" - + group.getShowId()); - this.group = group; - this.jobs = jobs; - this.groupManager = groupManager; - } - - @Override - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - for (JobInterface job : jobs) { - groupManager.reparentJob(job, group, new Inherit[] {Inherit.All}); - } - } - }.execute(); - } + private GroupDetail group; + private List jobs; + private GroupManager groupManager; + + public DispatchMoveJobs(GroupDetail group, List jobs, GroupManager groupManager) { + super("disp_move_jobs_" + group.getGroupId() + "_dept_" + group.getDepartmentId() + "_show_" + + group.getShowId()); + this.group = group; + this.jobs = jobs; + this.groupManager = groupManager; + } + + @Override + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + for (JobInterface job : jobs) { + groupManager.reparentJob(job, group, new Inherit[] {Inherit.All}); + } + } + }.execute(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchNextFrame.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchNextFrame.java index c7920aba0..8b49b1661 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchNextFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchNextFrame.java @@ -26,22 +26,22 @@ */ public class DispatchNextFrame extends KeyRunnable { - private VirtualProc proc; - private DispatchJob job; - private Dispatcher dispatcher; + private VirtualProc proc; + private DispatchJob job; + private Dispatcher dispatcher; - public DispatchNextFrame(DispatchJob j, VirtualProc p, Dispatcher d) { - super("disp_next_frame_" + j.getJobId() + "_" + p.getProcId()); - this.job = j; - this.proc = p; - this.dispatcher = d; - } + public DispatchNextFrame(DispatchJob j, VirtualProc p, Dispatcher d) { + super("disp_next_frame_" + j.getJobId() + "_" + p.getProcId()); + this.job = j; + this.proc = p; + this.dispatcher = d; + } - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - dispatcher.dispatchProcToJob(proc, job); - } - }.execute(); - } + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + dispatcher.dispatchProcToJob(proc, job); + } + }.execute(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchReorderFrames.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchReorderFrames.java index 26cf95f9c..a643c28e9 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchReorderFrames.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchReorderFrames.java @@ -23,40 +23,41 @@ public class DispatchReorderFrames extends KeyRunnable { - private JobInterface job = null; - private LayerInterface layer = null; - private FrameSet frameSet; - private Order order; - private JobManagerSupport jobManagerSupport; - - public DispatchReorderFrames(JobInterface job, FrameSet frameSet, Order order, - JobManagerSupport jobManagerSupport) { - super("disp_reorder_frames_job_" + job.getJobId() + "_" + jobManagerSupport.toString()); - this.job = job; - this.frameSet = frameSet; - this.order = order; - this.jobManagerSupport = jobManagerSupport; - } - - public DispatchReorderFrames(LayerInterface layer, FrameSet frameSet, Order order, - JobManagerSupport jobManagerSupport) { - super("disp_reorder_frames_layer_" + layer.getLayerId() + "_" + jobManagerSupport.toString()); - this.layer = layer; - this.frameSet = frameSet; - this.order = order; - this.jobManagerSupport = jobManagerSupport; - } - - @Override - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - if (job != null) { - jobManagerSupport.reorderJob(job, frameSet, order); - } else if (layer != null) { - jobManagerSupport.reorderLayer(layer, frameSet, order); - } - } - }.execute(); - } + private JobInterface job = null; + private LayerInterface layer = null; + private FrameSet frameSet; + private Order order; + private JobManagerSupport jobManagerSupport; + + public DispatchReorderFrames(JobInterface job, FrameSet frameSet, Order order, + JobManagerSupport jobManagerSupport) { + super("disp_reorder_frames_job_" + job.getJobId() + "_" + jobManagerSupport.toString()); + this.job = job; + this.frameSet = frameSet; + this.order = order; + this.jobManagerSupport = jobManagerSupport; + } + + public DispatchReorderFrames(LayerInterface layer, FrameSet frameSet, Order order, + JobManagerSupport jobManagerSupport) { + super("disp_reorder_frames_layer_" + layer.getLayerId() + "_" + + jobManagerSupport.toString()); + this.layer = layer; + this.frameSet = frameSet; + this.order = order; + this.jobManagerSupport = jobManagerSupport; + } + + @Override + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + if (job != null) { + jobManagerSupport.reorderJob(job, frameSet, order); + } else if (layer != null) { + jobManagerSupport.reorderLayer(layer, frameSet, order); + } + } + }.execute(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRetryFrames.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRetryFrames.java index 0de3a787e..2941c337f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRetryFrames.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRetryFrames.java @@ -26,23 +26,23 @@ */ public class DispatchRetryFrames extends KeyRunnable { - private FrameSearchInterface search; - private Source source; - private JobManagerSupport jobManagerSupport; + private FrameSearchInterface search; + private Source source; + private JobManagerSupport jobManagerSupport; - public DispatchRetryFrames(FrameSearchInterface search, Source source, - JobManagerSupport jobManagerSupport) { - super("disp_retry_frames_" + search.hashCode() + "_" + source.toString()); - this.search = search; - this.source = source; - this.jobManagerSupport = jobManagerSupport; - } + public DispatchRetryFrames(FrameSearchInterface search, Source source, + JobManagerSupport jobManagerSupport) { + super("disp_retry_frames_" + search.hashCode() + "_" + source.toString()); + this.search = search; + this.source = source; + this.jobManagerSupport = jobManagerSupport; + } - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - jobManagerSupport.retryFrames(search, source); - } - }.execute(); - } + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + jobManagerSupport.retryFrames(search, source); + } + }.execute(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrame.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrame.java index 11fba3e9b..f895640b5 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrame.java @@ -25,33 +25,33 @@ public class DispatchRqdKillFrame extends KeyRunnable { - private static final Logger logger = LogManager.getLogger(DispatchRqdKillFrame.class); - - private String message; - private String hostname; - private String frameId; - - private final RqdClient rqdClient; - - public DispatchRqdKillFrame(String hostname, String frameId, String message, - RqdClient rqdClient) { - super("disp_rqd_kill_frame_" + hostname + "_" + frameId + "_" + rqdClient.toString()); - this.hostname = hostname; - this.frameId = frameId; - this.message = message; - this.rqdClient = rqdClient; - } - - @Override - public void run() { - long startTime = System.currentTimeMillis(); - try { - rqdClient.killFrame(hostname, frameId, message); - } catch (RqdClientException e) { - logger.info("Failed to contact host " + hostname + ", " + e); - } finally { - long elapsedTime = System.currentTimeMillis() - startTime; - logger.info("RQD communication with " + hostname + " took " + elapsedTime + "ms"); + private static final Logger logger = LogManager.getLogger(DispatchRqdKillFrame.class); + + private String message; + private String hostname; + private String frameId; + + private final RqdClient rqdClient; + + public DispatchRqdKillFrame(String hostname, String frameId, String message, + RqdClient rqdClient) { + super("disp_rqd_kill_frame_" + hostname + "_" + frameId + "_" + rqdClient.toString()); + this.hostname = hostname; + this.frameId = frameId; + this.message = message; + this.rqdClient = rqdClient; + } + + @Override + public void run() { + long startTime = System.currentTimeMillis(); + try { + rqdClient.killFrame(hostname, frameId, message); + } catch (RqdClientException e) { + logger.info("Failed to contact host " + hostname + ", " + e); + } finally { + long elapsedTime = System.currentTimeMillis() - startTime; + logger.info("RQD communication with " + hostname + " took " + elapsedTime + "ms"); + } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrameMemory.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrameMemory.java index f070692cb..736e4e648 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrameMemory.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrameMemory.java @@ -32,42 +32,42 @@ */ public class DispatchRqdKillFrameMemory extends KeyRunnable { - private static final Logger logger = LogManager.getLogger(DispatchRqdKillFrameMemory.class); + private static final Logger logger = LogManager.getLogger(DispatchRqdKillFrameMemory.class); - private String message; - private String hostname; - private DispatchSupport dispatchSupport; - private final RqdClient rqdClient; - private final boolean isTestMode; + private String message; + private String hostname; + private DispatchSupport dispatchSupport; + private final RqdClient rqdClient; + private final boolean isTestMode; - private FrameInterface frame; + private FrameInterface frame; - public DispatchRqdKillFrameMemory(String hostname, FrameInterface frame, String message, - RqdClient rqdClient, DispatchSupport dispatchSupport, boolean isTestMode) { - super("disp_rqd_kill_frame_" + frame.getFrameId() + "_" + rqdClient.toString()); - this.frame = frame; - this.hostname = hostname; - this.message = message; - this.rqdClient = rqdClient; - this.dispatchSupport = dispatchSupport; - this.isTestMode = isTestMode; - } + public DispatchRqdKillFrameMemory(String hostname, FrameInterface frame, String message, + RqdClient rqdClient, DispatchSupport dispatchSupport, boolean isTestMode) { + super("disp_rqd_kill_frame_" + frame.getFrameId() + "_" + rqdClient.toString()); + this.frame = frame; + this.hostname = hostname; + this.message = message; + this.rqdClient = rqdClient; + this.dispatchSupport = dispatchSupport; + this.isTestMode = isTestMode; + } - @Override - public void run() { - long startTime = System.currentTimeMillis(); - try { - if (dispatchSupport.updateFrameMemoryError(frame) && !isTestMode) { - rqdClient.killFrame(hostname, frame.getFrameId(), message); - } else { - logger.warn("Could not update frame " + frame.getFrameId() - + " status to EXIT_STATUS_MEMORY_FAILURE. Canceling kill request!"); - } - } catch (RqdClientException e) { - logger.warn("Failed to contact host " + hostname + ", " + e); - } finally { - long elapsedTime = System.currentTimeMillis() - startTime; - logger.info("RQD communication with " + hostname + " took " + elapsedTime + "ms"); + @Override + public void run() { + long startTime = System.currentTimeMillis(); + try { + if (dispatchSupport.updateFrameMemoryError(frame) && !isTestMode) { + rqdClient.killFrame(hostname, frame.getFrameId(), message); + } else { + logger.warn("Could not update frame " + frame.getFrameId() + + " status to EXIT_STATUS_MEMORY_FAILURE. Canceling kill request!"); + } + } catch (RqdClientException e) { + logger.warn("Failed to contact host " + hostname + ", " + e); + } finally { + long elapsedTime = System.currentTimeMillis() - startTime; + logger.info("RQD communication with " + hostname + " took " + elapsedTime + "ms"); + } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchSatisfyDepends.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchSatisfyDepends.java index 39bae5229..1f8a2aa8b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchSatisfyDepends.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchSatisfyDepends.java @@ -28,49 +28,50 @@ */ public class DispatchSatisfyDepends extends KeyRunnable { - private JobInterface job = null; - private LayerInterface layer = null; - private FrameInterface frame = null; - private FrameSearchInterface search; - private JobManagerSupport jobManagerSupport; + private JobInterface job = null; + private LayerInterface layer = null; + private FrameInterface frame = null; + private FrameSearchInterface search; + private JobManagerSupport jobManagerSupport; - public DispatchSatisfyDepends(JobInterface job, JobManagerSupport jobManagerSupport) { - super("disp_sat_deps_" + job.getJobId() + "_" + jobManagerSupport.toString()); - this.job = job; - this.jobManagerSupport = jobManagerSupport; - } + public DispatchSatisfyDepends(JobInterface job, JobManagerSupport jobManagerSupport) { + super("disp_sat_deps_" + job.getJobId() + "_" + jobManagerSupport.toString()); + this.job = job; + this.jobManagerSupport = jobManagerSupport; + } - public DispatchSatisfyDepends(LayerInterface layer, JobManagerSupport jobManagerSupport) { - super("disp_sat_deps_" + layer.getLayerId() + "_" + jobManagerSupport.toString()); - this.layer = layer; - this.jobManagerSupport = jobManagerSupport; - } + public DispatchSatisfyDepends(LayerInterface layer, JobManagerSupport jobManagerSupport) { + super("disp_sat_deps_" + layer.getLayerId() + "_" + jobManagerSupport.toString()); + this.layer = layer; + this.jobManagerSupport = jobManagerSupport; + } - public DispatchSatisfyDepends(FrameInterface frame, JobManagerSupport jobManagerSupport) { - super("disp_sat_deps_" + frame.getFrameId() + "_" + jobManagerSupport.toString()); - this.frame = frame; - this.jobManagerSupport = jobManagerSupport; - } + public DispatchSatisfyDepends(FrameInterface frame, JobManagerSupport jobManagerSupport) { + super("disp_sat_deps_" + frame.getFrameId() + "_" + jobManagerSupport.toString()); + this.frame = frame; + this.jobManagerSupport = jobManagerSupport; + } - public DispatchSatisfyDepends(FrameSearchInterface search, JobManagerSupport jobManagerSupport) { - super("disp_sat_deps_" + search.hashCode() + "_" + jobManagerSupport.hashCode()); - this.search = search; - this.jobManagerSupport = jobManagerSupport; - } + public DispatchSatisfyDepends(FrameSearchInterface search, + JobManagerSupport jobManagerSupport) { + super("disp_sat_deps_" + search.hashCode() + "_" + jobManagerSupport.hashCode()); + this.search = search; + this.jobManagerSupport = jobManagerSupport; + } - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - if (search != null) { - jobManagerSupport.satisfyWhatDependsOn(search); - } else if (frame != null) { - jobManagerSupport.satisfyWhatDependsOn(frame); - } else if (layer != null) { - jobManagerSupport.satisfyWhatDependsOn(layer); - } else { - jobManagerSupport.satisfyWhatDependsOn(job); - } - } - }.execute(); - } + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + if (search != null) { + jobManagerSupport.satisfyWhatDependsOn(search); + } else if (frame != null) { + jobManagerSupport.satisfyWhatDependsOn(frame); + } else if (layer != null) { + jobManagerSupport.satisfyWhatDependsOn(layer); + } else { + jobManagerSupport.satisfyWhatDependsOn(job); + } + } + }.execute(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchShutdownJobIfCompleted.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchShutdownJobIfCompleted.java index a106644ec..7d7de0015 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchShutdownJobIfCompleted.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchShutdownJobIfCompleted.java @@ -26,23 +26,23 @@ * @category command */ public class DispatchShutdownJobIfCompleted extends KeyRunnable { - private JobInterface job; + private JobInterface job; - private JobManagerSupport jobManagerSupport; + private JobManagerSupport jobManagerSupport; - public DispatchShutdownJobIfCompleted(JobInterface job, JobManagerSupport jobManagerSupport) { - super("disp_st_job_comp_" + job.getJobId()); - this.job = job; - this.jobManagerSupport = jobManagerSupport; - } + public DispatchShutdownJobIfCompleted(JobInterface job, JobManagerSupport jobManagerSupport) { + super("disp_st_job_comp_" + job.getJobId()); + this.job = job; + this.jobManagerSupport = jobManagerSupport; + } - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - if (jobManagerSupport.isJobComplete(job)) { - jobManagerSupport.shutdownJob(job, new Source("natural"), false); - } - } - }.execute(); - } + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + if (jobManagerSupport.isJobComplete(job)) { + jobManagerSupport.shutdownJob(job, new Source("natural"), false); + } + } + }.execute(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchStaggerFrames.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchStaggerFrames.java index 1bad4f6fe..5b2b65753 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchStaggerFrames.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchStaggerFrames.java @@ -21,40 +21,40 @@ public class DispatchStaggerFrames extends KeyRunnable { - private JobInterface job = null; - private LayerInterface layer = null; - private String range; - private int stagger; - private JobManagerSupport jobManagerSupport; - - public DispatchStaggerFrames(JobInterface job, String range, int stagger, - JobManagerSupport jobManagerSupport) { - super("disp_stag_frames_" + job.getJobId() + "_" + range); - this.job = job; - this.range = range; - this.stagger = stagger; - this.jobManagerSupport = jobManagerSupport; - } - - public DispatchStaggerFrames(LayerInterface layer, String range, int stagger, - JobManagerSupport jobManagerSupport) { - super("disp_stag_frames_" + layer.getLayerId() + "_" + range); - this.layer = layer; - this.range = range; - this.stagger = stagger; - this.jobManagerSupport = jobManagerSupport; - } - - @Override - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - if (job != null) { - jobManagerSupport.staggerJob(job, range, stagger); - } else if (layer != null) { - jobManagerSupport.staggerJob(layer, range, stagger); - } - } - }.execute(); - } + private JobInterface job = null; + private LayerInterface layer = null; + private String range; + private int stagger; + private JobManagerSupport jobManagerSupport; + + public DispatchStaggerFrames(JobInterface job, String range, int stagger, + JobManagerSupport jobManagerSupport) { + super("disp_stag_frames_" + job.getJobId() + "_" + range); + this.job = job; + this.range = range; + this.stagger = stagger; + this.jobManagerSupport = jobManagerSupport; + } + + public DispatchStaggerFrames(LayerInterface layer, String range, int stagger, + JobManagerSupport jobManagerSupport) { + super("disp_stag_frames_" + layer.getLayerId() + "_" + range); + this.layer = layer; + this.range = range; + this.stagger = stagger; + this.jobManagerSupport = jobManagerSupport; + } + + @Override + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + if (job != null) { + jobManagerSupport.staggerJob(job, range, stagger); + } else if (layer != null) { + jobManagerSupport.staggerJob(layer, range, stagger); + } + } + }.execute(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/KeyRunnable.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/KeyRunnable.java index dfee490cf..58461bad0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/KeyRunnable.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/KeyRunnable.java @@ -20,17 +20,17 @@ public abstract class KeyRunnable implements Runnable { - private String key; + private String key; - public KeyRunnable(String key) { - this.key = key; - } + public KeyRunnable(String key) { + this.key = key; + } - public String getKey() { - return key; - } + public String getKey() { + return key; + } - public void setKey(String key) { - this.key = key; - } + public void setKey(String key) { + this.key = key; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/ManageReparentHosts.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/ManageReparentHosts.java index 0bdda38ca..c14aa8757 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/ManageReparentHosts.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/ManageReparentHosts.java @@ -23,25 +23,25 @@ import com.imageworks.spcue.service.HostManager; public class ManageReparentHosts extends KeyRunnable { - AllocationInterface alloc; - List hosts; - HostManager hostManager; + AllocationInterface alloc; + List hosts; + HostManager hostManager; - public ManageReparentHosts(AllocationInterface alloc, List hosts, - HostManager hostManager) { - super(alloc.getAllocationId()); - this.alloc = alloc; - this.hosts = hosts; - this.hostManager = hostManager; - } + public ManageReparentHosts(AllocationInterface alloc, List hosts, + HostManager hostManager) { + super(alloc.getAllocationId()); + this.alloc = alloc; + this.hosts = hosts; + this.hostManager = hostManager; + } - public void run() { - new DispatchCommandTemplate() { - public void wrapDispatchCommand() { - for (HostInterface host : hosts) { - hostManager.setAllocation(host, alloc); - } - } - }.execute(); - } + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + for (HostInterface host : hosts) { + hostManager.setAllocation(host, alloc); + } + } + }.execute(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClient.java b/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClient.java index 90522b3ff..39a9a3deb 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClient.java +++ b/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClient.java @@ -23,78 +23,78 @@ public interface RqdClient { - /** - * Setting to true pretends all remote procedures execute perfectly. - * - * @param tests - */ - public void setTestMode(boolean tests); + /** + * Setting to true pretends all remote procedures execute perfectly. + * + * @param tests + */ + public void setTestMode(boolean tests); - /** - * Returns a RunningFrameInfo - * - * @param proc - * @return - */ - RunningFrameInfo getFrameStatus(VirtualProc proc); + /** + * Returns a RunningFrameInfo + * + * @param proc + * @return + */ + RunningFrameInfo getFrameStatus(VirtualProc proc); - /** - * Sets the host lock to the provided state. - * - * @param host - * @param lock - */ - public void setHostLock(HostInterface host, LockState lock); + /** + * Sets the host lock to the provided state. + * + * @param host + * @param lock + */ + public void setHostLock(HostInterface host, LockState lock); - /** - * Locks the host. - * - * @param host - */ - public void lockHost(HostInterface host); + /** + * Locks the host. + * + * @param host + */ + public void lockHost(HostInterface host); - /** - * Unlocks the host. - * - * @param host - */ - public void unlockHost(HostInterface host); + /** + * Unlocks the host. + * + * @param host + */ + public void unlockHost(HostInterface host); - /** - * Reboots the host now. - * - * @param host - */ - public void rebootNow(HostInterface host); + /** + * Reboots the host now. + * + * @param host + */ + public void rebootNow(HostInterface host); - /** - * Reboots the host when idle - * - * @param host - */ - public void rebootWhenIdle(HostInterface host); + /** + * Reboots the host when idle + * + * @param host + */ + public void rebootWhenIdle(HostInterface host); - /** - * Attempts to launch a frame - * - * @param frame - * @param resource - * @return RunningFramePrx - */ - void launchFrame(RunFrame frame, VirtualProc proc); + /** + * Attempts to launch a frame + * + * @param frame + * @param resource + * @return RunningFramePrx + */ + void launchFrame(RunFrame frame, VirtualProc proc); - /** - * Kills a running frame by resource - * - * @param resource - */ - void killFrame(VirtualProc Proc, String message); + /** + * Kills a running frame by resource + * + * @param resource + */ + void killFrame(VirtualProc Proc, String message); - /** - * Kills a running frame - * - * @param hostName - * @param frameId - */ - void killFrame(String hostName, String frameId, String message); + /** + * Kills a running frame + * + * @param hostName + * @param frameId + */ + void killFrame(String hostName, String frameId, String message); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientException.java b/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientException.java index 4609b94e3..89a0a33c7 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientException.java +++ b/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientException.java @@ -20,23 +20,23 @@ @SuppressWarnings("serial") public class RqdClientException extends SpcueRuntimeException { - public RqdClientException() { - // TODO Auto-generated constructor stub - } - - public RqdClientException(String message, Throwable cause) { - super(message, cause); - // TODO Auto-generated constructor stub - } - - public RqdClientException(String message) { - super(message); - // TODO Auto-generated constructor stub - } - - public RqdClientException(Throwable cause) { - super(cause); - // TODO Auto-generated constructor stub - } + public RqdClientException() { + // TODO Auto-generated constructor stub + } + + public RqdClientException(String message, Throwable cause) { + super(message, cause); + // TODO Auto-generated constructor stub + } + + public RqdClientException(String message) { + super(message); + // TODO Auto-generated constructor stub + } + + public RqdClientException(Throwable cause) { + super(cause); + // TODO Auto-generated constructor stub + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientGrpc.java b/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientGrpc.java index 27aad081c..5d2677598 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientGrpc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientGrpc.java @@ -48,171 +48,172 @@ import com.imageworks.spcue.grpc.rqd.RunningFrameStatusResponse; public final class RqdClientGrpc implements RqdClient { - private static final Logger logger = LogManager.getLogger(RqdClientGrpc.class); - - private final int rqdCacheSize; - private final int rqdCacheExpiration; - private final int rqdCacheConcurrency; - private final int rqdServerPort; - private final int rqdTaskDeadlineSeconds; - private LoadingCache channelCache; - - private boolean testMode = false; - - public RqdClientGrpc(int rqdServerPort, int rqdCacheSize, int rqdCacheExpiration, - int rqdCacheConcurrency, int rqdTaskDeadline) { - this.rqdServerPort = rqdServerPort; - this.rqdCacheSize = rqdCacheSize; - this.rqdCacheExpiration = rqdCacheExpiration; - this.rqdCacheConcurrency = rqdCacheConcurrency; - this.rqdTaskDeadlineSeconds = rqdTaskDeadline; - } - - private void buildChannelCache() { - this.channelCache = - CacheBuilder.newBuilder().maximumSize(rqdCacheSize).concurrencyLevel(rqdCacheConcurrency) - .expireAfterAccess(rqdCacheExpiration, TimeUnit.MINUTES) - .removalListener(new RemovalListener() { - @Override - public void onRemoval(RemovalNotification removal) { - ManagedChannel conn = removal.getValue(); - conn.shutdown(); - } - }).build(new CacheLoader() { - @Override - public ManagedChannel load(String host) throws Exception { - ManagedChannelBuilder channelBuilder = - ManagedChannelBuilder.forAddress(host, rqdServerPort).usePlaintext(); - return channelBuilder.build(); - } - }); - } - - private RqdInterfaceGrpc.RqdInterfaceBlockingStub getStub(String host) throws ExecutionException { - if (channelCache == null) { - buildChannelCache(); + private static final Logger logger = LogManager.getLogger(RqdClientGrpc.class); + + private final int rqdCacheSize; + private final int rqdCacheExpiration; + private final int rqdCacheConcurrency; + private final int rqdServerPort; + private final int rqdTaskDeadlineSeconds; + private LoadingCache channelCache; + + private boolean testMode = false; + + public RqdClientGrpc(int rqdServerPort, int rqdCacheSize, int rqdCacheExpiration, + int rqdCacheConcurrency, int rqdTaskDeadline) { + this.rqdServerPort = rqdServerPort; + this.rqdCacheSize = rqdCacheSize; + this.rqdCacheExpiration = rqdCacheExpiration; + this.rqdCacheConcurrency = rqdCacheConcurrency; + this.rqdTaskDeadlineSeconds = rqdTaskDeadline; } - ManagedChannel channel = channelCache.get(host); - return RqdInterfaceGrpc.newBlockingStub(channel).withDeadlineAfter(rqdTaskDeadlineSeconds, - TimeUnit.SECONDS); - } - - private RunningFrameGrpc.RunningFrameBlockingStub getRunningFrameStub(String host) - throws ExecutionException { - if (channelCache == null) { - buildChannelCache(); + + private void buildChannelCache() { + this.channelCache = CacheBuilder.newBuilder().maximumSize(rqdCacheSize) + .concurrencyLevel(rqdCacheConcurrency) + .expireAfterAccess(rqdCacheExpiration, TimeUnit.MINUTES) + .removalListener(new RemovalListener() { + @Override + public void onRemoval(RemovalNotification removal) { + ManagedChannel conn = removal.getValue(); + conn.shutdown(); + } + }).build(new CacheLoader() { + @Override + public ManagedChannel load(String host) throws Exception { + ManagedChannelBuilder channelBuilder = ManagedChannelBuilder + .forAddress(host, rqdServerPort).usePlaintext(); + return channelBuilder.build(); + } + }); } - ManagedChannel channel = channelCache.get(host); - return RunningFrameGrpc.newBlockingStub(channel).withDeadlineAfter(rqdTaskDeadlineSeconds, - TimeUnit.SECONDS); - } - - public void setHostLock(HostInterface host, LockState lock) { - if (lock == LockState.OPEN) { - logger.debug("Unlocking RQD host"); - unlockHost(host); - } else if (lock == LockState.LOCKED) { - logger.debug("Locking RQD host"); - lockHost(host); - } else { - logger.debug("Unknown LockState passed to setHostLock."); + + private RqdInterfaceGrpc.RqdInterfaceBlockingStub getStub(String host) + throws ExecutionException { + if (channelCache == null) { + buildChannelCache(); + } + ManagedChannel channel = channelCache.get(host); + return RqdInterfaceGrpc.newBlockingStub(channel).withDeadlineAfter(rqdTaskDeadlineSeconds, + TimeUnit.SECONDS); } - } - public void lockHost(HostInterface host) { - RqdStaticLockAllRequest request = RqdStaticLockAllRequest.newBuilder().build(); + private RunningFrameGrpc.RunningFrameBlockingStub getRunningFrameStub(String host) + throws ExecutionException { + if (channelCache == null) { + buildChannelCache(); + } + ManagedChannel channel = channelCache.get(host); + return RunningFrameGrpc.newBlockingStub(channel).withDeadlineAfter(rqdTaskDeadlineSeconds, + TimeUnit.SECONDS); + } - try { - getStub(host.getName()).lockAll(request); - } catch (StatusRuntimeException | ExecutionException e) { - throw new RqdClientException("failed to lock host: " + host.getName(), e); + public void setHostLock(HostInterface host, LockState lock) { + if (lock == LockState.OPEN) { + logger.debug("Unlocking RQD host"); + unlockHost(host); + } else if (lock == LockState.LOCKED) { + logger.debug("Locking RQD host"); + lockHost(host); + } else { + logger.debug("Unknown LockState passed to setHostLock."); + } } - } - public void unlockHost(HostInterface host) { - RqdStaticUnlockAllRequest request = RqdStaticUnlockAllRequest.newBuilder().build(); + public void lockHost(HostInterface host) { + RqdStaticLockAllRequest request = RqdStaticLockAllRequest.newBuilder().build(); - try { - getStub(host.getName()).unlockAll(request); - } catch (StatusRuntimeException | ExecutionException e) { - throw new RqdClientException("failed to unlock host: " + host.getName(), e); + try { + getStub(host.getName()).lockAll(request); + } catch (StatusRuntimeException | ExecutionException e) { + throw new RqdClientException("failed to lock host: " + host.getName(), e); + } } - } - public void rebootNow(HostInterface host) { - RqdStaticRebootNowRequest request = RqdStaticRebootNowRequest.newBuilder().build(); + public void unlockHost(HostInterface host) { + RqdStaticUnlockAllRequest request = RqdStaticUnlockAllRequest.newBuilder().build(); - try { - getStub(host.getName()).rebootNow(request); - } catch (StatusRuntimeException | ExecutionException e) { - throw new RqdClientException("failed to reboot host: " + host.getName(), e); + try { + getStub(host.getName()).unlockAll(request); + } catch (StatusRuntimeException | ExecutionException e) { + throw new RqdClientException("failed to unlock host: " + host.getName(), e); + } } - } - public void rebootWhenIdle(HostInterface host) { - RqdStaticRebootIdleRequest request = RqdStaticRebootIdleRequest.newBuilder().build(); + public void rebootNow(HostInterface host) { + RqdStaticRebootNowRequest request = RqdStaticRebootNowRequest.newBuilder().build(); - if (testMode) { - return; + try { + getStub(host.getName()).rebootNow(request); + } catch (StatusRuntimeException | ExecutionException e) { + throw new RqdClientException("failed to reboot host: " + host.getName(), e); + } } - try { - getStub(host.getName()).rebootIdle(request); - } catch (StatusRuntimeException | ExecutionException e) { - throw new RqdClientException("failed to reboot host: " + host.getName(), e); - } - } + public void rebootWhenIdle(HostInterface host) { + RqdStaticRebootIdleRequest request = RqdStaticRebootIdleRequest.newBuilder().build(); - public void killFrame(VirtualProc proc, String message) { - killFrame(proc.hostName, proc.frameId, message); - } + if (testMode) { + return; + } - public void killFrame(String host, String frameId, String message) { - RqdStaticKillRunningFrameRequest request = RqdStaticKillRunningFrameRequest.newBuilder() - .setFrameId(frameId).setMessage(message).build(); + try { + getStub(host.getName()).rebootIdle(request); + } catch (StatusRuntimeException | ExecutionException e) { + throw new RqdClientException("failed to reboot host: " + host.getName(), e); + } + } - if (testMode) { - return; + public void killFrame(VirtualProc proc, String message) { + killFrame(proc.hostName, proc.frameId, message); } - try { - logger.info("killing frame on " + host + ", source: " + message); - getStub(host).killRunningFrame(request); - } catch (StatusRuntimeException | ExecutionException e) { - throw new RqdClientException("failed to kill frame " + frameId, e); + public void killFrame(String host, String frameId, String message) { + RqdStaticKillRunningFrameRequest request = RqdStaticKillRunningFrameRequest.newBuilder() + .setFrameId(frameId).setMessage(message).build(); + + if (testMode) { + return; + } + + try { + logger.info("killing frame on " + host + ", source: " + message); + getStub(host).killRunningFrame(request); + } catch (StatusRuntimeException | ExecutionException e) { + throw new RqdClientException("failed to kill frame " + frameId, e); + } } - } - - public RunningFrameInfo getFrameStatus(VirtualProc proc) { - try { - RqdStaticGetRunFrameResponse getRunFrameResponse = getStub(proc.hostName) - .getRunFrame(RqdStaticGetRunFrameRequest.newBuilder().setFrameId(proc.frameId).build()); - RunningFrameStatusResponse frameStatusResponse = - getRunningFrameStub(proc.hostName).status(RunningFrameStatusRequest.newBuilder() - .setRunFrame(getRunFrameResponse.getRunFrame()).build()); - return frameStatusResponse.getRunningFrameInfo(); - } catch (StatusRuntimeException | ExecutionException e) { - throw new RqdClientException("failed to obtain status for frame " + proc.frameId, e); + + public RunningFrameInfo getFrameStatus(VirtualProc proc) { + try { + RqdStaticGetRunFrameResponse getRunFrameResponse = getStub(proc.hostName).getRunFrame( + RqdStaticGetRunFrameRequest.newBuilder().setFrameId(proc.frameId).build()); + RunningFrameStatusResponse frameStatusResponse = + getRunningFrameStub(proc.hostName).status(RunningFrameStatusRequest.newBuilder() + .setRunFrame(getRunFrameResponse.getRunFrame()).build()); + return frameStatusResponse.getRunningFrameInfo(); + } catch (StatusRuntimeException | ExecutionException e) { + throw new RqdClientException("failed to obtain status for frame " + proc.frameId, e); + } } - } - public void launchFrame(final RunFrame frame, final VirtualProc proc) { - RqdStaticLaunchFrameRequest request = - RqdStaticLaunchFrameRequest.newBuilder().setRunFrame(frame).build(); + public void launchFrame(final RunFrame frame, final VirtualProc proc) { + RqdStaticLaunchFrameRequest request = + RqdStaticLaunchFrameRequest.newBuilder().setRunFrame(frame).build(); - if (testMode) { - return; - } + if (testMode) { + return; + } - try { - getStub(proc.hostName).launchFrame(request); - } catch (StatusRuntimeException | ExecutionException e) { - throw new RqdClientException("failed to launch frame", e); + try { + getStub(proc.hostName).launchFrame(request); + } catch (StatusRuntimeException | ExecutionException e) { + throw new RqdClientException("failed to launch frame", e); + } } - } - @Override - public void setTestMode(boolean testMode) { - this.testMode = testMode; - } + @Override + public void setTestMode(boolean testMode) { + this.testMode = testMode; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/CueStatic.java b/cuebot/src/main/java/com/imageworks/spcue/servant/CueStatic.java index 68d773301..480de83d1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/CueStatic.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/CueStatic.java @@ -29,121 +29,123 @@ public class CueStatic extends CueInterfaceGrpc.CueInterfaceImplBase { - private Whiteboard whiteboard; - private DispatchQueue manageQueue; - private DispatchQueue dispatchQueue; - private HostReportQueue reportQueue; - private BookingQueue bookingQueue; - private DispatchSupport dispatchSupport; - - @Override - public void getSystemStats(CueGetSystemStatsRequest request, - StreamObserver responseObserver) { - SystemStats stats = SystemStats.newBuilder().setDispatchThreads(dispatchQueue.getActiveCount()) - .setDispatchWaiting(dispatchQueue.getSize()) - .setDispatchRemainingCapacity(dispatchQueue.getRemainingCapacity()) - .setDispatchExecuted(dispatchQueue.getCompletedTaskCount()) - .setDispatchRejected(dispatchQueue.getRejectedTaskCount()) - - .setManageThreads(manageQueue.getActiveCount()).setManageWaiting(manageQueue.getSize()) - .setManageRemainingCapacity(manageQueue.getRemainingCapacity()) - .setManageExecuted(manageQueue.getCompletedTaskCount()) - .setManageRejected(manageQueue.getRejectedTaskCount()) - - .setReportThreads(reportQueue.getActiveCount()) - .setReportWaiting(reportQueue.getQueue().size()) - .setReportRemainingCapacity(reportQueue.getQueue().remainingCapacity()) - .setReportExecuted(reportQueue.getTaskCount()) - .setReportRejected(reportQueue.getRejectedTaskCount()) - - .setBookingWaiting(bookingQueue.getSize()) - .setBookingRemainingCapacity(bookingQueue.getRemainingCapacity()) - .setBookingThreads(bookingQueue.getActiveCount()) - .setBookingExecuted(bookingQueue.getCompletedTaskCount()) - .setBookingRejected(bookingQueue.getRejectedTaskCount()).setBookingSleepMillis(0) - - .setHostBalanceSuccess(DispatchSupport.balanceSuccess.get()) - .setHostBalanceFailed(DispatchSupport.balanceFailed.get()) - .setKilledOffenderProcs(DispatchSupport.killedOffenderProcs.get()) - .setKilledOomProcs(DispatchSupport.killedOomProcs.get()) - .setClearedProcs(DispatchSupport.clearedProcs.get()) - .setBookingRetries(DispatchSupport.bookingRetries.get()) - .setBookingErrors(DispatchSupport.bookingErrors.get()) - .setBookedProcs(DispatchSupport.bookedProcs.get()) - - // TODO(gregdenton) Reimplement these with gRPC. (Issue #69) - // .setReqForData(IceServer.dataRequests.get()) - // .setReqForFunction(IceServer.rpcRequests.get()) - // .setReqErrors(IceServer.errors.get()) - - .setUnbookedProcs(DispatchSupport.unbookedProcs.get()) - .setPickedUpCores(DispatchSupport.pickedUpCoresCount.get()) - .setStrandedCores(DispatchSupport.strandedCoresCount.get()).build(); - responseObserver.onNext(CueGetSystemStatsResponse.newBuilder().setStats(stats).build()); - responseObserver.onCompleted(); - } - - public boolean isDispatchQueueHealthy() { - return this.dispatchQueue.isHealthy(); - } - - public boolean isManageQueueHealthy() { - return this.manageQueue.isHealthy(); - } - - public boolean isReportQueueHealthy() { - return this.reportQueue.isHealthy(); - } - - public boolean isBookingQueueHealthy() { - return this.bookingQueue.isHealthy(); - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public DispatchQueue getManageQueue() { - return manageQueue; - } - - public void setManageQueue(DispatchQueue manageQueue) { - this.manageQueue = manageQueue; - } - - public DispatchQueue getDispatchQueue() { - return dispatchQueue; - } - - public void setDispatchQueue(DispatchQueue dispatchQueue) { - this.dispatchQueue = dispatchQueue; - } - - public HostReportQueue getReportQueue() { - return reportQueue; - } - - public void setReportQueue(HostReportQueue reportQueue) { - this.reportQueue = reportQueue; - } - - public BookingQueue getBookingQueue() { - return bookingQueue; - } - - public void setBookingQueue(BookingQueue bookingQueue) { - this.bookingQueue = bookingQueue; - } - - public DispatchSupport getDispatchSupport() { - return dispatchSupport; - } - - public void setDispatchSupport(DispatchSupport dispatchSupport) { - this.dispatchSupport = dispatchSupport; - } + private Whiteboard whiteboard; + private DispatchQueue manageQueue; + private DispatchQueue dispatchQueue; + private HostReportQueue reportQueue; + private BookingQueue bookingQueue; + private DispatchSupport dispatchSupport; + + @Override + public void getSystemStats(CueGetSystemStatsRequest request, + StreamObserver responseObserver) { + SystemStats stats = SystemStats.newBuilder() + .setDispatchThreads(dispatchQueue.getActiveCount()) + .setDispatchWaiting(dispatchQueue.getSize()) + .setDispatchRemainingCapacity(dispatchQueue.getRemainingCapacity()) + .setDispatchExecuted(dispatchQueue.getCompletedTaskCount()) + .setDispatchRejected(dispatchQueue.getRejectedTaskCount()) + + .setManageThreads(manageQueue.getActiveCount()) + .setManageWaiting(manageQueue.getSize()) + .setManageRemainingCapacity(manageQueue.getRemainingCapacity()) + .setManageExecuted(manageQueue.getCompletedTaskCount()) + .setManageRejected(manageQueue.getRejectedTaskCount()) + + .setReportThreads(reportQueue.getActiveCount()) + .setReportWaiting(reportQueue.getQueue().size()) + .setReportRemainingCapacity(reportQueue.getQueue().remainingCapacity()) + .setReportExecuted(reportQueue.getTaskCount()) + .setReportRejected(reportQueue.getRejectedTaskCount()) + + .setBookingWaiting(bookingQueue.getSize()) + .setBookingRemainingCapacity(bookingQueue.getRemainingCapacity()) + .setBookingThreads(bookingQueue.getActiveCount()) + .setBookingExecuted(bookingQueue.getCompletedTaskCount()) + .setBookingRejected(bookingQueue.getRejectedTaskCount()).setBookingSleepMillis(0) + + .setHostBalanceSuccess(DispatchSupport.balanceSuccess.get()) + .setHostBalanceFailed(DispatchSupport.balanceFailed.get()) + .setKilledOffenderProcs(DispatchSupport.killedOffenderProcs.get()) + .setKilledOomProcs(DispatchSupport.killedOomProcs.get()) + .setClearedProcs(DispatchSupport.clearedProcs.get()) + .setBookingRetries(DispatchSupport.bookingRetries.get()) + .setBookingErrors(DispatchSupport.bookingErrors.get()) + .setBookedProcs(DispatchSupport.bookedProcs.get()) + + // TODO(gregdenton) Reimplement these with gRPC. (Issue #69) + // .setReqForData(IceServer.dataRequests.get()) + // .setReqForFunction(IceServer.rpcRequests.get()) + // .setReqErrors(IceServer.errors.get()) + + .setUnbookedProcs(DispatchSupport.unbookedProcs.get()) + .setPickedUpCores(DispatchSupport.pickedUpCoresCount.get()) + .setStrandedCores(DispatchSupport.strandedCoresCount.get()).build(); + responseObserver.onNext(CueGetSystemStatsResponse.newBuilder().setStats(stats).build()); + responseObserver.onCompleted(); + } + + public boolean isDispatchQueueHealthy() { + return this.dispatchQueue.isHealthy(); + } + + public boolean isManageQueueHealthy() { + return this.manageQueue.isHealthy(); + } + + public boolean isReportQueueHealthy() { + return this.reportQueue.isHealthy(); + } + + public boolean isBookingQueueHealthy() { + return this.bookingQueue.isHealthy(); + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public DispatchQueue getManageQueue() { + return manageQueue; + } + + public void setManageQueue(DispatchQueue manageQueue) { + this.manageQueue = manageQueue; + } + + public DispatchQueue getDispatchQueue() { + return dispatchQueue; + } + + public void setDispatchQueue(DispatchQueue dispatchQueue) { + this.dispatchQueue = dispatchQueue; + } + + public HostReportQueue getReportQueue() { + return reportQueue; + } + + public void setReportQueue(HostReportQueue reportQueue) { + this.reportQueue = reportQueue; + } + + public BookingQueue getBookingQueue() { + return bookingQueue; + } + + public void setBookingQueue(BookingQueue bookingQueue) { + this.bookingQueue = bookingQueue; + } + + public DispatchSupport getDispatchSupport() { + return dispatchSupport; + } + + public void setDispatchSupport(DispatchSupport dispatchSupport) { + this.dispatchSupport = dispatchSupport; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAction.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAction.java index caca2612d..05e0ba8f0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAction.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAction.java @@ -35,61 +35,62 @@ public class ManageAction extends ActionInterfaceGrpc.ActionInterfaceImplBase { - private FilterManager filterManager; - private Whiteboard whiteboard; + private FilterManager filterManager; + private Whiteboard whiteboard; - @Override - public void delete(ActionDeleteRequest request, - StreamObserver responseObserver) { - Action requestAction = request.getAction(); - ActionEntity existingAction = filterManager.getAction(requestAction.getId()); - FilterEntity filterEntity = filterManager.getFilter(existingAction); - ActionEntity actionToDelete = - ActionEntity.build(filterEntity, requestAction, requestAction.getId()); - filterManager.deleteAction(actionToDelete); - responseObserver.onNext(ActionDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + @Override + public void delete(ActionDeleteRequest request, + StreamObserver responseObserver) { + Action requestAction = request.getAction(); + ActionEntity existingAction = filterManager.getAction(requestAction.getId()); + FilterEntity filterEntity = filterManager.getFilter(existingAction); + ActionEntity actionToDelete = + ActionEntity.build(filterEntity, requestAction, requestAction.getId()); + filterManager.deleteAction(actionToDelete); + responseObserver.onNext(ActionDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } - @Override - public void getParentFilter(ActionGetParentFilterRequest request, - StreamObserver responseObserver) { - Filter filter = whiteboard.getFilter(ActionEntity.build(request.getAction())); - responseObserver.onNext(ActionGetParentFilterResponse.newBuilder().setFilter(filter).build()); - responseObserver.onCompleted(); - } + @Override + public void getParentFilter(ActionGetParentFilterRequest request, + StreamObserver responseObserver) { + Filter filter = whiteboard.getFilter(ActionEntity.build(request.getAction())); + responseObserver + .onNext(ActionGetParentFilterResponse.newBuilder().setFilter(filter).build()); + responseObserver.onCompleted(); + } - @Override - public void commit(ActionCommitRequest request, - StreamObserver responseObserver) { - Action requestAction = request.getAction(); - // Getting an action to have filterId populated from the DB - try { - ActionEntity persistedAction = filterManager.getAction(requestAction.getId()); - ActionEntity newAction = - ActionEntity.build(persistedAction, requestAction, requestAction.getId()); - filterManager.updateAction(newAction); - responseObserver.onNext(ActionCommitResponse.newBuilder().build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - throw new SpcueRuntimeException( - "Invalid actionId on Action commit: " + requestAction.getId()); + @Override + public void commit(ActionCommitRequest request, + StreamObserver responseObserver) { + Action requestAction = request.getAction(); + // Getting an action to have filterId populated from the DB + try { + ActionEntity persistedAction = filterManager.getAction(requestAction.getId()); + ActionEntity newAction = + ActionEntity.build(persistedAction, requestAction, requestAction.getId()); + filterManager.updateAction(newAction); + responseObserver.onNext(ActionCommitResponse.newBuilder().build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + throw new SpcueRuntimeException( + "Invalid actionId on Action commit: " + requestAction.getId()); + } } - } - public FilterManager getFilterManager() { - return filterManager; - } + public FilterManager getFilterManager() { + return filterManager; + } - public void setFilterManager(FilterManager filterManager) { - this.filterManager = filterManager; - } + public void setFilterManager(FilterManager filterManager) { + this.filterManager = filterManager; + } - public Whiteboard getWhiteboard() { - return whiteboard; - } + public Whiteboard getWhiteboard() { + return whiteboard; + } - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAllocation.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAllocation.java index 71c0af17b..ab8881887 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAllocation.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAllocation.java @@ -52,225 +52,225 @@ import com.imageworks.spcue.util.CueUtil; public class ManageAllocation extends AllocationInterfaceGrpc.AllocationInterfaceImplBase { - private AllocationDao allocationDao; - private DispatchQueue manageQueue; - private Whiteboard whiteboard; - private AdminManager adminManager; - private HostManager hostManager; - private HostSearchFactory hostSearchFactory; - - public ManageAllocation() {} - - @Override - public void create(AllocCreateRequest request, - StreamObserver responseObserver) { - String new_name = request.getName(); - // If they pass name in the format ., just remove the facility. - if (CueUtil.verifyAllocationNameFormat(request.getName())) { - new_name = CueUtil.splitAllocationName(request.getName())[1]; + private AllocationDao allocationDao; + private DispatchQueue manageQueue; + private Whiteboard whiteboard; + private AdminManager adminManager; + private HostManager hostManager; + private HostSearchFactory hostSearchFactory; + + public ManageAllocation() {} + + @Override + public void create(AllocCreateRequest request, + StreamObserver responseObserver) { + String new_name = request.getName(); + // If they pass name in the format ., just remove the facility. + if (CueUtil.verifyAllocationNameFormat(request.getName())) { + new_name = CueUtil.splitAllocationName(request.getName())[1]; + } + + AllocationEntity detail = new AllocationEntity(); + detail.name = new_name; + detail.tag = request.getTag(); + adminManager.createAllocation(adminManager.getFacility(request.getFacility().getName()), + detail); + + responseObserver.onNext(AllocCreateResponse.newBuilder() + .setAllocation(whiteboard.getAllocation(detail.id)).build()); + responseObserver.onCompleted(); } - AllocationEntity detail = new AllocationEntity(); - detail.name = new_name; - detail.tag = request.getTag(); - adminManager.createAllocation(adminManager.getFacility(request.getFacility().getName()), - detail); - - responseObserver.onNext(AllocCreateResponse.newBuilder() - .setAllocation(whiteboard.getAllocation(detail.id)).build()); - responseObserver.onCompleted(); - } - - @Override - public void getAll(AllocGetAllRequest request, - StreamObserver responseObserver) { - responseObserver.onNext( - AllocGetAllResponse.newBuilder().setAllocations(whiteboard.getAllocations()).build()); - responseObserver.onCompleted(); - } - - @Override - public void find(AllocFindRequest request, StreamObserver responseObserver) { - try { - responseObserver.onNext(AllocFindResponse.newBuilder() - .setAllocation(whiteboard.findAllocation(request.getName())).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + @Override + public void getAll(AllocGetAllRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(AllocGetAllResponse.newBuilder() + .setAllocations(whiteboard.getAllocations()).build()); + responseObserver.onCompleted(); } - } - - @Override - public void get(AllocGetRequest request, StreamObserver responseObserver) { - try { - responseObserver.onNext(AllocGetResponse.newBuilder() - .setAllocation(whiteboard.findAllocation(request.getId())).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + + @Override + public void find(AllocFindRequest request, StreamObserver responseObserver) { + try { + responseObserver.onNext(AllocFindResponse.newBuilder() + .setAllocation(whiteboard.findAllocation(request.getName())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); + } + } + + @Override + public void get(AllocGetRequest request, StreamObserver responseObserver) { + try { + responseObserver.onNext(AllocGetResponse.newBuilder() + .setAllocation(whiteboard.findAllocation(request.getId())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); + } + } + + private AllocationEntity findAllocationDetail(String facility, String name) { + // If they pass name in the format ., just remove the facility. + if (CueUtil.verifyAllocationNameFormat(name)) { + name = CueUtil.splitAllocationName(name)[1]; + } + return adminManager.findAllocationDetail(facility, name); + } + + @Override + public void delete(AllocDeleteRequest request, + StreamObserver responseObserver) { + AllocationEntity alloc = findAllocationDetail(request.getAllocation().getFacility(), + request.getAllocation().getName()); + adminManager.deleteAllocation(alloc); + responseObserver.onNext(AllocDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void findHosts(AllocFindHostsRequest request, + StreamObserver responseObserver) { + HostSearchCriteria searchCriteria = + request.getR().toBuilder().addAllocs(request.getAllocation().getId()).build(); + responseObserver.onNext(AllocFindHostsResponse.newBuilder() + .setHosts(whiteboard.getHosts(hostSearchFactory.create(searchCriteria))).build()); + responseObserver.onCompleted(); + } + + @Override + public void getHosts(AllocGetHostsRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(AllocGetHostsResponse.newBuilder() + .setHosts(whiteboard.getHosts( + hostSearchFactory.create(toAllocationEntity(request.getAllocation())))) + .build()); + responseObserver.onCompleted(); + } + + @Override + public void getSubscriptions(AllocGetSubscriptionsRequest request, + StreamObserver responseObserver) { + AllocationEntity allocEntity = toAllocationEntity(request.getAllocation()); + responseObserver.onNext(AllocGetSubscriptionsResponse.newBuilder() + .setSubscriptions(whiteboard.getSubscriptions(allocEntity)).build()); + responseObserver.onCompleted(); + } + + @Override + public void reparentHosts(AllocReparentHostsRequest request, + StreamObserver responseObserver) { + AllocationEntity allocEntity = toAllocationEntity(request.getAllocation()); + List hosts = request.getHosts().getHostsList(); + List hostEntities = + hosts.stream().map(HostEntity::new).collect(Collectors.toList()); + manageQueue.execute(new ManageReparentHosts(allocEntity, hostEntities, hostManager)); + responseObserver.onNext(AllocReparentHostsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setBillable(AllocSetBillableRequest request, + StreamObserver responseObserver) { + AllocationEntity alloc = findAllocationDetail(request.getAllocation().getFacility(), + request.getAllocation().getName()); + adminManager.setAllocationBillable(alloc, request.getValue()); + responseObserver.onNext(AllocSetBillableResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setName(AllocSetNameRequest request, + StreamObserver responseObserver) { + AllocationEntity alloc = findAllocationDetail(request.getAllocation().getFacility(), + request.getAllocation().getName()); + adminManager.setAllocationName(alloc, request.getName()); + responseObserver.onNext(AllocSetNameResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setTag(AllocSetTagRequest request, + StreamObserver responseObserver) { + AllocationEntity alloc = findAllocationDetail(request.getAllocation().getFacility(), + request.getAllocation().getName()); + adminManager.setAllocationTag(alloc, request.getTag()); + responseObserver.onNext(AllocSetTagResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void getDefault(AllocGetDefaultRequest request, + StreamObserver responseObserver) { + AllocationEntity alloc = adminManager.getDefaultAllocation(); + responseObserver.onNext(AllocGetDefaultResponse.newBuilder() + .setAllocation(whiteboard.getAllocation(alloc.id)).build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefault(AllocSetDefaultRequest request, + StreamObserver responseObserver) { + AllocationEntity alloc = findAllocationDetail(request.getAllocation().getFacility(), + request.getAllocation().getName()); + adminManager.setDefaultAllocation(alloc); + responseObserver.onNext(AllocSetDefaultResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public AdminManager getAdminManager() { + return adminManager; + } + + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } + + public AllocationDao getAllocationDao() { + return allocationDao; + } + + public void setAllocationDao(AllocationDao allocationDao) { + this.allocationDao = allocationDao; + } + + public DispatchQueue getManageQueue() { + return manageQueue; + } + + public void setManageQueue(DispatchQueue manageQueue) { + this.manageQueue = manageQueue; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public HostManager getHostManager() { + return hostManager; + } + + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; + } + + public void setHostSearchFactory(HostSearchFactory hostSearchFactory) { + this.hostSearchFactory = hostSearchFactory; } - } - private AllocationEntity findAllocationDetail(String facility, String name) { - // If they pass name in the format ., just remove the facility. - if (CueUtil.verifyAllocationNameFormat(name)) { - name = CueUtil.splitAllocationName(name)[1]; + private AllocationEntity toAllocationEntity(Allocation allocGrpc) { + AllocationEntity allocEntity = new AllocationEntity(); + allocEntity.id = allocGrpc.getId(); + allocEntity.name = allocGrpc.getName(); + allocEntity.tag = allocGrpc.getTag(); + allocEntity.facilityId = allocGrpc.getFacility(); + return allocEntity; } - return adminManager.findAllocationDetail(facility, name); - } - - @Override - public void delete(AllocDeleteRequest request, - StreamObserver responseObserver) { - AllocationEntity alloc = findAllocationDetail(request.getAllocation().getFacility(), - request.getAllocation().getName()); - adminManager.deleteAllocation(alloc); - responseObserver.onNext(AllocDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void findHosts(AllocFindHostsRequest request, - StreamObserver responseObserver) { - HostSearchCriteria searchCriteria = - request.getR().toBuilder().addAllocs(request.getAllocation().getId()).build(); - responseObserver.onNext(AllocFindHostsResponse.newBuilder() - .setHosts(whiteboard.getHosts(hostSearchFactory.create(searchCriteria))).build()); - responseObserver.onCompleted(); - } - - @Override - public void getHosts(AllocGetHostsRequest request, - StreamObserver responseObserver) { - responseObserver.onNext(AllocGetHostsResponse.newBuilder() - .setHosts(whiteboard - .getHosts(hostSearchFactory.create(toAllocationEntity(request.getAllocation())))) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void getSubscriptions(AllocGetSubscriptionsRequest request, - StreamObserver responseObserver) { - AllocationEntity allocEntity = toAllocationEntity(request.getAllocation()); - responseObserver.onNext(AllocGetSubscriptionsResponse.newBuilder() - .setSubscriptions(whiteboard.getSubscriptions(allocEntity)).build()); - responseObserver.onCompleted(); - } - - @Override - public void reparentHosts(AllocReparentHostsRequest request, - StreamObserver responseObserver) { - AllocationEntity allocEntity = toAllocationEntity(request.getAllocation()); - List hosts = request.getHosts().getHostsList(); - List hostEntities = - hosts.stream().map(HostEntity::new).collect(Collectors.toList()); - manageQueue.execute(new ManageReparentHosts(allocEntity, hostEntities, hostManager)); - responseObserver.onNext(AllocReparentHostsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setBillable(AllocSetBillableRequest request, - StreamObserver responseObserver) { - AllocationEntity alloc = findAllocationDetail(request.getAllocation().getFacility(), - request.getAllocation().getName()); - adminManager.setAllocationBillable(alloc, request.getValue()); - responseObserver.onNext(AllocSetBillableResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setName(AllocSetNameRequest request, - StreamObserver responseObserver) { - AllocationEntity alloc = findAllocationDetail(request.getAllocation().getFacility(), - request.getAllocation().getName()); - adminManager.setAllocationName(alloc, request.getName()); - responseObserver.onNext(AllocSetNameResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setTag(AllocSetTagRequest request, - StreamObserver responseObserver) { - AllocationEntity alloc = findAllocationDetail(request.getAllocation().getFacility(), - request.getAllocation().getName()); - adminManager.setAllocationTag(alloc, request.getTag()); - responseObserver.onNext(AllocSetTagResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void getDefault(AllocGetDefaultRequest request, - StreamObserver responseObserver) { - AllocationEntity alloc = adminManager.getDefaultAllocation(); - responseObserver.onNext(AllocGetDefaultResponse.newBuilder() - .setAllocation(whiteboard.getAllocation(alloc.id)).build()); - responseObserver.onCompleted(); - } - - @Override - public void setDefault(AllocSetDefaultRequest request, - StreamObserver responseObserver) { - AllocationEntity alloc = findAllocationDetail(request.getAllocation().getFacility(), - request.getAllocation().getName()); - adminManager.setDefaultAllocation(alloc); - responseObserver.onNext(AllocSetDefaultResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public AdminManager getAdminManager() { - return adminManager; - } - - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } - - public AllocationDao getAllocationDao() { - return allocationDao; - } - - public void setAllocationDao(AllocationDao allocationDao) { - this.allocationDao = allocationDao; - } - - public DispatchQueue getManageQueue() { - return manageQueue; - } - - public void setManageQueue(DispatchQueue manageQueue) { - this.manageQueue = manageQueue; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public HostManager getHostManager() { - return hostManager; - } - - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; - } - - public void setHostSearchFactory(HostSearchFactory hostSearchFactory) { - this.hostSearchFactory = hostSearchFactory; - } - - private AllocationEntity toAllocationEntity(Allocation allocGrpc) { - AllocationEntity allocEntity = new AllocationEntity(); - allocEntity.id = allocGrpc.getId(); - allocEntity.name = allocGrpc.getName(); - allocEntity.tag = allocGrpc.getTag(); - allocEntity.facilityId = allocGrpc.getFacility(); - return allocEntity; - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageComment.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageComment.java index 839fde67c..d0db7a52d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageComment.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageComment.java @@ -27,34 +27,34 @@ public class ManageComment extends CommentInterfaceGrpc.CommentInterfaceImplBase { - private CommentManager commentManager; - - @Override - public void delete(CommentDeleteRequest request, - StreamObserver responseObserver) { - commentManager.deleteComment(request.getComment().getId()); - responseObserver.onNext(CommentDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void save(CommentSaveRequest request, - StreamObserver responseObserver) { - CommentDetail c = new CommentDetail(); - c.id = request.getComment().getId(); - c.message = request.getComment().getMessage(); - c.subject = request.getComment().getSubject(); - commentManager.saveComment(c); - CommentSaveResponse response = CommentSaveResponse.newBuilder().build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - public CommentManager getCommentManager() { - return commentManager; - } - - public void setCommentManager(CommentManager commentManager) { - this.commentManager = commentManager; - } + private CommentManager commentManager; + + @Override + public void delete(CommentDeleteRequest request, + StreamObserver responseObserver) { + commentManager.deleteComment(request.getComment().getId()); + responseObserver.onNext(CommentDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void save(CommentSaveRequest request, + StreamObserver responseObserver) { + CommentDetail c = new CommentDetail(); + c.id = request.getComment().getId(); + c.message = request.getComment().getMessage(); + c.subject = request.getComment().getSubject(); + commentManager.saveComment(c); + CommentSaveResponse response = CommentSaveResponse.newBuilder().build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + public CommentManager getCommentManager() { + return commentManager; + } + + public void setCommentManager(CommentManager commentManager) { + this.commentManager = commentManager; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDeed.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDeed.java index 258a7a7b7..8ad63908e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDeed.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDeed.java @@ -33,55 +33,55 @@ public class ManageDeed extends DeedInterfaceGrpc.DeedInterfaceImplBase { - private OwnerManager ownerManager; - private Whiteboard whiteboard; + private OwnerManager ownerManager; + private Whiteboard whiteboard; - @Override - public void delete(DeedDeleteRequest request, - StreamObserver responseObserver) { - ownerManager.removeDeed(toEntity(request.getDeed())); - responseObserver.onNext(DeedDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + @Override + public void delete(DeedDeleteRequest request, + StreamObserver responseObserver) { + ownerManager.removeDeed(toEntity(request.getDeed())); + responseObserver.onNext(DeedDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } - @Override - public void getHost(DeedGetHostRequest request, - StreamObserver responseObserver) { - Host host = whiteboard.getHost(toEntity(request.getDeed())); - responseObserver.onNext(DeedGetHostResponse.newBuilder().setHost(host).build()); - responseObserver.onCompleted(); - } + @Override + public void getHost(DeedGetHostRequest request, + StreamObserver responseObserver) { + Host host = whiteboard.getHost(toEntity(request.getDeed())); + responseObserver.onNext(DeedGetHostResponse.newBuilder().setHost(host).build()); + responseObserver.onCompleted(); + } - @Override - public void getOwner(DeedGetOwnerRequest request, - StreamObserver responseObserver) { - Owner owner = whiteboard.getOwner(toEntity(request.getDeed())); - responseObserver.onNext(DeedGetOwnerResponse.newBuilder().setOwner(owner).build()); - responseObserver.onCompleted(); - } + @Override + public void getOwner(DeedGetOwnerRequest request, + StreamObserver responseObserver) { + Owner owner = whiteboard.getOwner(toEntity(request.getDeed())); + responseObserver.onNext(DeedGetOwnerResponse.newBuilder().setOwner(owner).build()); + responseObserver.onCompleted(); + } - public OwnerManager getOwnerManager() { - return ownerManager; - } + public OwnerManager getOwnerManager() { + return ownerManager; + } - public void setOwnerManager(OwnerManager ownerManager) { - this.ownerManager = ownerManager; - } + public void setOwnerManager(OwnerManager ownerManager) { + this.ownerManager = ownerManager; + } - public Whiteboard getWhiteboard() { - return whiteboard; - } + public Whiteboard getWhiteboard() { + return whiteboard; + } - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } - private DeedEntity toEntity(Deed deed) { - DeedEntity entity = new DeedEntity(); - entity.id = deed.getId(); - entity.host = deed.getHost(); - entity.owner = deed.getOwner(); - entity.show = deed.getShow(); - return entity; - } + private DeedEntity toEntity(Deed deed) { + DeedEntity entity = new DeedEntity(); + entity.id = deed.getId(); + entity.host = deed.getHost(); + entity.owner = deed.getOwner(); + entity.show = deed.getShow(); + return entity; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepartment.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepartment.java index 610639019..42936b9fd 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepartment.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepartment.java @@ -56,165 +56,169 @@ public class ManageDepartment extends DepartmentInterfaceGrpc.DepartmentInterfaceImplBase { - private AdminManager adminManager; - private DepartmentManager departmentManager; - private Whiteboard whiteboard; - - private TaskSeq.Builder addTasksToDepartment(Map tmap, PointDetail deptConfig) { - TaskSeq.Builder builder = TaskSeq.newBuilder(); - for (Map.Entry e : tmap.entrySet()) { - TaskEntity t = new TaskEntity(deptConfig, e.getKey(), e.getValue()); - departmentManager.createTask(t); - builder.addTasks(toTask(t)); - } - return builder; - } - - @Override - public void addDepartmentName(DeptAddDeptNameRequest request, - StreamObserver responseObserver) { - adminManager.createDepartment(request.getName()); - responseObserver.onNext(DeptAddDeptNameResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void addTask(DeptAddTaskRequest request, - StreamObserver responseObserver) { - TaskEntity t = - new TaskEntity(departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()), - request.getShot(), Convert.coresToCoreUnits(request.getMinCores())); - departmentManager.createTask(t); - Task createdTask = toTask(t); - responseObserver.onNext(DeptAddTaskResponse.newBuilder().setTask(createdTask).build()); - responseObserver.onCompleted(); - } - - @Override - public void addTasks(DeptAddTasksRequest request, - StreamObserver responseObserver) { - PointDetail deptConfig = - departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); - TaskSeq.Builder builder = addTasksToDepartment(request.getTmapMap(), deptConfig); - responseObserver.onNext(DeptAddTasksResponse.newBuilder().setTasks(builder.build()).build()); - responseObserver.onCompleted(); - } - - @Override - public void clearTasks(DeptClearTasksRequest request, - StreamObserver responseObserver) { - PointDetail deptConfig = - departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); - departmentManager.clearTasks(deptConfig); - responseObserver.onNext(DeptClearTasksResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void clearTaskAdjustments(DeptClearTaskAdjustmentsRequest request, - StreamObserver responseObserver) { - PointDetail deptConfig = - departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); - departmentManager.clearTaskAdjustments(deptConfig); - responseObserver.onNext(DeptClearTaskAdjustmentsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void disableTiManaged(DeptDisableTiManagedRequest request, - StreamObserver responseObserver) { - PointDetail deptConfig = - departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); - departmentManager.disableTiManaged(deptConfig); - responseObserver.onNext(DeptDisableTiManagedResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void enableTiManaged(DeptEnableTiManagedRequest request, - StreamObserver responseObserver) { - PointDetail deptConfig = - departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); - departmentManager.enableTiManaged(deptConfig, request.getTiTask(), - Convert.coresToWholeCoreUnits(request.getManagedCores())); - responseObserver.onNext(DeptEnableTiManagedResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void getDepartmentNames(DeptGetDepartmentNamesRequest request, - StreamObserver responseObserver) { - responseObserver.onNext(DeptGetDepartmentNamesResponse.newBuilder() - .addAllNames(whiteboard.getDepartmentNames()).build()); - responseObserver.onCompleted(); - } - - @Override - public void getTasks(DeptGetTasksRequest request, - StreamObserver responseObserver) { - PointDetail deptConfig = - departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); - TaskSeq tasks = whiteboard.getTasks(deptConfig, deptConfig); - TaskSeq taskSeq = TaskSeq.newBuilder().addAllTasks(tasks.getTasksList()).build(); - responseObserver.onNext(DeptGetTasksResponse.newBuilder().setTasks(taskSeq).build()); - responseObserver.onCompleted(); - } - - @Override - public void removeDepartmentName(DeptRemoveDepartmentNameRequest request, - StreamObserver responseObserver) { - adminManager.removeDepartment(adminManager.findDepartment(request.getName())); - responseObserver.onNext(DeptRemoveDepartmentNameResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void replaceTasks(DeptReplaceTaskRequest request, - StreamObserver responseObserver) { - PointDetail deptConfig = - departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); - departmentManager.clearTasks(deptConfig); - TaskSeq.Builder builder = addTasksToDepartment(request.getTmapMap(), deptConfig); - responseObserver.onNext(DeptReplaceTaskResponse.newBuilder().setTasks(builder.build()).build()); - responseObserver.onCompleted(); - } - - public void setManagedCores(DeptSetManagedCoresRequest request, - StreamObserver responseObserver) { - PointDetail deptConfig = - departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); - departmentManager.setManagedCores(deptConfig, - Convert.coresToWholeCoreUnits(request.getManagedCores())); - responseObserver.onNext(DeptSetManagedCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public AdminManager getAdminManager() { - return adminManager; - } - - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } - - public DepartmentManager getDepartmentManager() { - return departmentManager; - } - - public void setDepartmentManager(DepartmentManager departmentManager) { - this.departmentManager = departmentManager; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - private Task toTask(TaskEntity detail) { - return Task.newBuilder().setId(detail.id).setName(detail.name).setShot(detail.shot) - .setDept(detail.deptId).setMinCores(detail.minCoreUnits).setPointId(detail.pointId).build(); - } + private AdminManager adminManager; + private DepartmentManager departmentManager; + private Whiteboard whiteboard; + + private TaskSeq.Builder addTasksToDepartment(Map tmap, + PointDetail deptConfig) { + TaskSeq.Builder builder = TaskSeq.newBuilder(); + for (Map.Entry e : tmap.entrySet()) { + TaskEntity t = new TaskEntity(deptConfig, e.getKey(), e.getValue()); + departmentManager.createTask(t); + builder.addTasks(toTask(t)); + } + return builder; + } + + @Override + public void addDepartmentName(DeptAddDeptNameRequest request, + StreamObserver responseObserver) { + adminManager.createDepartment(request.getName()); + responseObserver.onNext(DeptAddDeptNameResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void addTask(DeptAddTaskRequest request, + StreamObserver responseObserver) { + TaskEntity t = new TaskEntity( + departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()), + request.getShot(), Convert.coresToCoreUnits(request.getMinCores())); + departmentManager.createTask(t); + Task createdTask = toTask(t); + responseObserver.onNext(DeptAddTaskResponse.newBuilder().setTask(createdTask).build()); + responseObserver.onCompleted(); + } + + @Override + public void addTasks(DeptAddTasksRequest request, + StreamObserver responseObserver) { + PointDetail deptConfig = + departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); + TaskSeq.Builder builder = addTasksToDepartment(request.getTmapMap(), deptConfig); + responseObserver + .onNext(DeptAddTasksResponse.newBuilder().setTasks(builder.build()).build()); + responseObserver.onCompleted(); + } + + @Override + public void clearTasks(DeptClearTasksRequest request, + StreamObserver responseObserver) { + PointDetail deptConfig = + departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); + departmentManager.clearTasks(deptConfig); + responseObserver.onNext(DeptClearTasksResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void clearTaskAdjustments(DeptClearTaskAdjustmentsRequest request, + StreamObserver responseObserver) { + PointDetail deptConfig = + departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); + departmentManager.clearTaskAdjustments(deptConfig); + responseObserver.onNext(DeptClearTaskAdjustmentsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void disableTiManaged(DeptDisableTiManagedRequest request, + StreamObserver responseObserver) { + PointDetail deptConfig = + departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); + departmentManager.disableTiManaged(deptConfig); + responseObserver.onNext(DeptDisableTiManagedResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void enableTiManaged(DeptEnableTiManagedRequest request, + StreamObserver responseObserver) { + PointDetail deptConfig = + departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); + departmentManager.enableTiManaged(deptConfig, request.getTiTask(), + Convert.coresToWholeCoreUnits(request.getManagedCores())); + responseObserver.onNext(DeptEnableTiManagedResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void getDepartmentNames(DeptGetDepartmentNamesRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(DeptGetDepartmentNamesResponse.newBuilder() + .addAllNames(whiteboard.getDepartmentNames()).build()); + responseObserver.onCompleted(); + } + + @Override + public void getTasks(DeptGetTasksRequest request, + StreamObserver responseObserver) { + PointDetail deptConfig = + departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); + TaskSeq tasks = whiteboard.getTasks(deptConfig, deptConfig); + TaskSeq taskSeq = TaskSeq.newBuilder().addAllTasks(tasks.getTasksList()).build(); + responseObserver.onNext(DeptGetTasksResponse.newBuilder().setTasks(taskSeq).build()); + responseObserver.onCompleted(); + } + + @Override + public void removeDepartmentName(DeptRemoveDepartmentNameRequest request, + StreamObserver responseObserver) { + adminManager.removeDepartment(adminManager.findDepartment(request.getName())); + responseObserver.onNext(DeptRemoveDepartmentNameResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void replaceTasks(DeptReplaceTaskRequest request, + StreamObserver responseObserver) { + PointDetail deptConfig = + departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); + departmentManager.clearTasks(deptConfig); + TaskSeq.Builder builder = addTasksToDepartment(request.getTmapMap(), deptConfig); + responseObserver + .onNext(DeptReplaceTaskResponse.newBuilder().setTasks(builder.build()).build()); + responseObserver.onCompleted(); + } + + public void setManagedCores(DeptSetManagedCoresRequest request, + StreamObserver responseObserver) { + PointDetail deptConfig = + departmentManager.getDepartmentConfigDetail(request.getDepartment().getId()); + departmentManager.setManagedCores(deptConfig, + Convert.coresToWholeCoreUnits(request.getManagedCores())); + responseObserver.onNext(DeptSetManagedCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public AdminManager getAdminManager() { + return adminManager; + } + + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } + + public DepartmentManager getDepartmentManager() { + return departmentManager; + } + + public void setDepartmentManager(DepartmentManager departmentManager) { + this.departmentManager = departmentManager; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + private Task toTask(TaskEntity detail) { + return Task.newBuilder().setId(detail.id).setName(detail.name).setShot(detail.shot) + .setDept(detail.deptId).setMinCores(detail.minCoreUnits).setPointId(detail.pointId) + .build(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepend.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepend.java index a86c8a049..49eb50f77 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepend.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepend.java @@ -36,73 +36,73 @@ public class ManageDepend extends DependInterfaceGrpc.DependInterfaceImplBase { - private static final Logger logger = LogManager.getLogger(ManageDepend.class); - - private DependManager dependManager; - private DispatchQueue manageQueue; - private Whiteboard whiteboard; - - @Override - public void getDepend(DependGetDependRequest request, - StreamObserver responseObserver) { - try { - responseObserver.onNext(DependGetDependResponse.newBuilder() - .setDepend(whiteboard.getDepend(request.getId())).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); - } - } + private static final Logger logger = LogManager.getLogger(ManageDepend.class); - public void satisfy(DependSatisfyRequest request, - StreamObserver responseObserver) { + private DependManager dependManager; + private DispatchQueue manageQueue; + private Whiteboard whiteboard; - LightweightDependency depend = dependManager.getDepend(request.getDepend().getId()); - String key = "manage_dep_sat_req_" + request.getDepend().getId(); - manageQueue.execute(new KeyRunnable(key) { - public void run() { + @Override + public void getDepend(DependGetDependRequest request, + StreamObserver responseObserver) { try { - logger.info("dropping dependency: " + depend.id); - dependManager.satisfyDepend(depend); - } catch (Exception e) { - logger.error("error satisfying dependency: " + depend.getId() + " , " + e); + responseObserver.onNext(DependGetDependResponse.newBuilder() + .setDepend(whiteboard.getDepend(request.getId())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); } - } - }); - responseObserver.onNext(DependSatisfyResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public void unsatisfy(DependUnsatisfyRequest request, - StreamObserver responseObserver) { - LightweightDependency depend = dependManager.getDepend(request.getDepend().getId()); - dependManager.unsatisfyDepend(depend); - responseObserver.onNext(DependUnsatisfyResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public DependManager getDependManager() { - return dependManager; - } - - public void setDependManager(DependManager dependManager) { - this.dependManager = dependManager; - } - - public DispatchQueue getManageQueue() { - return manageQueue; - } - - public void setManageQueue(DispatchQueue manageQueue) { - this.manageQueue = manageQueue; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } + } + + public void satisfy(DependSatisfyRequest request, + StreamObserver responseObserver) { + + LightweightDependency depend = dependManager.getDepend(request.getDepend().getId()); + String key = "manage_dep_sat_req_" + request.getDepend().getId(); + manageQueue.execute(new KeyRunnable(key) { + public void run() { + try { + logger.info("dropping dependency: " + depend.id); + dependManager.satisfyDepend(depend); + } catch (Exception e) { + logger.error("error satisfying dependency: " + depend.getId() + " , " + e); + } + } + }); + responseObserver.onNext(DependSatisfyResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public void unsatisfy(DependUnsatisfyRequest request, + StreamObserver responseObserver) { + LightweightDependency depend = dependManager.getDepend(request.getDepend().getId()); + dependManager.unsatisfyDepend(depend); + responseObserver.onNext(DependUnsatisfyResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public DependManager getDependManager() { + return dependManager; + } + + public void setDependManager(DependManager dependManager) { + this.dependManager = dependManager; + } + + public DispatchQueue getManageQueue() { + return manageQueue; + } + + public void setManageQueue(DispatchQueue manageQueue) { + this.manageQueue = manageQueue; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFacility.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFacility.java index 0fe0f6606..0502b80ed 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFacility.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFacility.java @@ -17,67 +17,67 @@ import com.imageworks.spcue.service.Whiteboard; public class ManageFacility extends FacilityInterfaceGrpc.FacilityInterfaceImplBase { - private AdminManager adminManager; - private Whiteboard whiteboard; + private AdminManager adminManager; + private Whiteboard whiteboard; - public ManageFacility() {} + public ManageFacility() {} - // TODO(bcipriano) Add error handling. (Issue #59) + // TODO(bcipriano) Add error handling. (Issue #59) - @Override - public void create(FacilityCreateRequest request, - StreamObserver responseObserver) { - adminManager.createFacility(request.getName()); - FacilityCreateResponse response = FacilityCreateResponse.newBuilder() - .setFacility(whiteboard.getFacility(request.getName())).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } + @Override + public void create(FacilityCreateRequest request, + StreamObserver responseObserver) { + adminManager.createFacility(request.getName()); + FacilityCreateResponse response = FacilityCreateResponse.newBuilder() + .setFacility(whiteboard.getFacility(request.getName())).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } - @Override - public void get(FacilityGetRequest request, - StreamObserver responseObserver) { - try { - FacilityGetResponse response = FacilityGetResponse.newBuilder() - .setFacility(whiteboard.getFacility(request.getName())).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + @Override + public void get(FacilityGetRequest request, + StreamObserver responseObserver) { + try { + FacilityGetResponse response = FacilityGetResponse.newBuilder() + .setFacility(whiteboard.getFacility(request.getName())).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); + } } - } - @Override - public void rename(FacilityRenameRequest request, - StreamObserver responseObserver) { - adminManager.setFacilityName(adminManager.getFacility(request.getFacility().getName()), - request.getNewName()); - responseObserver.onNext(FacilityRenameResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + @Override + public void rename(FacilityRenameRequest request, + StreamObserver responseObserver) { + adminManager.setFacilityName(adminManager.getFacility(request.getFacility().getName()), + request.getNewName()); + responseObserver.onNext(FacilityRenameResponse.newBuilder().build()); + responseObserver.onCompleted(); + } - @Override - public void delete(FacilityDeleteRequest request, - StreamObserver responseObserver) { - adminManager.deleteFacility(adminManager.getFacility(request.getName())); - responseObserver.onNext(FacilityDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + @Override + public void delete(FacilityDeleteRequest request, + StreamObserver responseObserver) { + adminManager.deleteFacility(adminManager.getFacility(request.getName())); + responseObserver.onNext(FacilityDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } - public AdminManager getAdminManager() { - return adminManager; - } + public AdminManager getAdminManager() { + return adminManager; + } - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } - public Whiteboard getWhiteboard() { - return whiteboard; - } + public Whiteboard getWhiteboard() { + return whiteboard; + } - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFilter.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFilter.java index 4ea6beb38..f94470131 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFilter.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFilter.java @@ -72,230 +72,231 @@ public class ManageFilter extends FilterInterfaceGrpc.FilterInterfaceImplBase { - private Whiteboard whiteboard; - private FilterManager filterManager; - private FilterDao filterDao; - private GroupDao groupDao; - private DispatchQueue manageQueue; - - @Override - public void findFilter(FilterFindFilterRequest request, - StreamObserver responseObserver) { - try { - responseObserver.onNext(FilterFindFilterResponse.newBuilder() - .setFilter(whiteboard.findFilter(request.getShow(), request.getName())).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + private Whiteboard whiteboard; + private FilterManager filterManager; + private FilterDao filterDao; + private GroupDao groupDao; + private DispatchQueue manageQueue; + + @Override + public void findFilter(FilterFindFilterRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(FilterFindFilterResponse.newBuilder() + .setFilter(whiteboard.findFilter(request.getShow(), request.getName())) + .build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); + } } - } - - @Override - public void createAction(FilterCreateActionRequest request, - StreamObserver responseObserver) { - ActionEntity actionDetail = - ActionEntity.build(getFilterEntity(request.getFilter()), request.getData()); - filterManager.createAction(actionDetail); - Action action = whiteboard.getAction(actionDetail); - FilterCreateActionResponse response = - FilterCreateActionResponse.newBuilder().setAction(action).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void createMatcher(FilterCreateMatcherRequest request, - StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - MatcherEntity matcherDetail = MatcherEntity.build(filter, request.getData()); - matcherDetail.filterId = filter.id; - filterManager.createMatcher(matcherDetail); - Matcher newMatcher = whiteboard.getMatcher(matcherDetail); - FilterCreateMatcherResponse response = - FilterCreateMatcherResponse.newBuilder().setMatcher(newMatcher).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void delete(FilterDeleteRequest request, - StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - String key = "manage_filter_del_req_" + filter.getId(); - manageQueue.execute(new KeyRunnable(key) { - public void run() { - filterManager.deleteFilter(filter); - } - }); - responseObserver.onNext(FilterDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void getActions(FilterGetActionsRequest request, - StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - FilterGetActionsResponse response = - FilterGetActionsResponse.newBuilder().setActions(whiteboard.getActions(filter)).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getMatchers(FilterGetMatchersRequest request, - StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - FilterGetMatchersResponse response = - FilterGetMatchersResponse.newBuilder().setMatchers(whiteboard.getMatchers(filter)).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void lowerOrder(FilterLowerOrderRequest request, - StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - filterManager.lowerFilterOrder(filter); - responseObserver.onNext(FilterLowerOrderResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void raiseOrder(FilterRaiseOrderRequest request, - StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - filterManager.raiseFilterOrder(filter); - responseObserver.onNext(FilterRaiseOrderResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void orderFirst(FilterOrderFirstRequest request, - StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - filterManager.setFilterOrder(filter, 0); - responseObserver.onNext(FilterOrderFirstResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void orderLast(FilterOrderLastRequest request, - StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - filterManager.setFilterOrder(filter, 9999); - responseObserver.onNext(FilterOrderLastResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void runFilterOnGroup(FilterRunFilterOnGroupRequest request, - StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - filterManager.runFilterOnGroup(filter, groupDao.getGroup(request.getGroup().getId())); - responseObserver.onNext(FilterRunFilterOnGroupResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setEnabled(FilterSetEnabledRequest request, - StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - filterDao.updateSetFilterEnabled(filter, request.getEnabled()); - responseObserver.onNext(FilterSetEnabledResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setName(FilterSetNameRequest request, - StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - filterDao.updateSetFilterName(filter, request.getName()); - responseObserver.onNext(FilterSetNameResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setType(FilterSetTypeRequest request, - StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - filterDao.updateSetFilterType(filter, request.getType()); - responseObserver.onNext(FilterSetTypeResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public void setOrder(FilterSetOrderRequest request, - StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - filterManager.setFilterOrder(filter, (double) request.getOrder()); - responseObserver.onNext(FilterSetOrderResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public void runFilterOnJobs(FilterRunFilterOnJobsRequest request, - StreamObserver responseObserver) { - FilterEntity filter = getFilterEntity(request.getFilter()); - for (Job job : request.getJobs().getJobsList()) { - filterManager.runFilterOnJob(filter, job.getId()); + + @Override + public void createAction(FilterCreateActionRequest request, + StreamObserver responseObserver) { + ActionEntity actionDetail = + ActionEntity.build(getFilterEntity(request.getFilter()), request.getData()); + filterManager.createAction(actionDetail); + Action action = whiteboard.getAction(actionDetail); + FilterCreateActionResponse response = + FilterCreateActionResponse.newBuilder().setAction(action).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void createMatcher(FilterCreateMatcherRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + MatcherEntity matcherDetail = MatcherEntity.build(filter, request.getData()); + matcherDetail.filterId = filter.id; + filterManager.createMatcher(matcherDetail); + Matcher newMatcher = whiteboard.getMatcher(matcherDetail); + FilterCreateMatcherResponse response = + FilterCreateMatcherResponse.newBuilder().setMatcher(newMatcher).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void delete(FilterDeleteRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + String key = "manage_filter_del_req_" + filter.getId(); + manageQueue.execute(new KeyRunnable(key) { + public void run() { + filterManager.deleteFilter(filter); + } + }); + responseObserver.onNext(FilterDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void getActions(FilterGetActionsRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + FilterGetActionsResponse response = FilterGetActionsResponse.newBuilder() + .setActions(whiteboard.getActions(filter)).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getMatchers(FilterGetMatchersRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + FilterGetMatchersResponse response = FilterGetMatchersResponse.newBuilder() + .setMatchers(whiteboard.getMatchers(filter)).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void lowerOrder(FilterLowerOrderRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + filterManager.lowerFilterOrder(filter); + responseObserver.onNext(FilterLowerOrderResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void raiseOrder(FilterRaiseOrderRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + filterManager.raiseFilterOrder(filter); + responseObserver.onNext(FilterRaiseOrderResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void orderFirst(FilterOrderFirstRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + filterManager.setFilterOrder(filter, 0); + responseObserver.onNext(FilterOrderFirstResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void orderLast(FilterOrderLastRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + filterManager.setFilterOrder(filter, 9999); + responseObserver.onNext(FilterOrderLastResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void runFilterOnGroup(FilterRunFilterOnGroupRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + filterManager.runFilterOnGroup(filter, groupDao.getGroup(request.getGroup().getId())); + responseObserver.onNext(FilterRunFilterOnGroupResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setEnabled(FilterSetEnabledRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + filterDao.updateSetFilterEnabled(filter, request.getEnabled()); + responseObserver.onNext(FilterSetEnabledResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setName(FilterSetNameRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + filterDao.updateSetFilterName(filter, request.getName()); + responseObserver.onNext(FilterSetNameResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setType(FilterSetTypeRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + filterDao.updateSetFilterType(filter, request.getType()); + responseObserver.onNext(FilterSetTypeResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public void setOrder(FilterSetOrderRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + filterManager.setFilterOrder(filter, (double) request.getOrder()); + responseObserver.onNext(FilterSetOrderResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public void runFilterOnJobs(FilterRunFilterOnJobsRequest request, + StreamObserver responseObserver) { + FilterEntity filter = getFilterEntity(request.getFilter()); + for (Job job : request.getJobs().getJobsList()) { + filterManager.runFilterOnJob(filter, job.getId()); + } + responseObserver.onNext(FilterRunFilterOnJobsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public FilterDao getFilterDao() { + return filterDao; + } + + public void setFilterDao(FilterDao filterDao) { + this.filterDao = filterDao; + } + + public FilterManager getFilterManager() { + return filterManager; + } + + public void setFilterManager(FilterManager filterManager) { + this.filterManager = filterManager; + } + + public GroupDao getGroupDao() { + return groupDao; + } + + public void setGroupDao(GroupDao groupDao) { + this.groupDao = groupDao; + } + + public DispatchQueue getManageQueue() { + return manageQueue; + } + + public void setManageQueue(DispatchQueue manageQueue) { + this.manageQueue = manageQueue; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + private FilterEntity getFilterEntity(Filter filter) { + return filterManager.getFilter(filter.getId()); + } + + private ActionEntity toActionEntity(Action action) { + ActionEntity entity = new ActionEntity(); + entity.id = action.getId(); + entity.type = action.getType(); + entity.valueType = action.getValueType(); + entity.groupValue = action.getGroupValue(); + entity.stringValue = action.getStringValue(); + entity.intValue = action.getIntegerValue(); + entity.floatValue = action.getFloatValue(); + entity.booleanValue = action.getBooleanValue(); + return entity; } - responseObserver.onNext(FilterRunFilterOnJobsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public FilterDao getFilterDao() { - return filterDao; - } - - public void setFilterDao(FilterDao filterDao) { - this.filterDao = filterDao; - } - - public FilterManager getFilterManager() { - return filterManager; - } - - public void setFilterManager(FilterManager filterManager) { - this.filterManager = filterManager; - } - - public GroupDao getGroupDao() { - return groupDao; - } - - public void setGroupDao(GroupDao groupDao) { - this.groupDao = groupDao; - } - - public DispatchQueue getManageQueue() { - return manageQueue; - } - - public void setManageQueue(DispatchQueue manageQueue) { - this.manageQueue = manageQueue; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - private FilterEntity getFilterEntity(Filter filter) { - return filterManager.getFilter(filter.getId()); - } - - private ActionEntity toActionEntity(Action action) { - ActionEntity entity = new ActionEntity(); - entity.id = action.getId(); - entity.type = action.getType(); - entity.valueType = action.getValueType(); - entity.groupValue = action.getGroupValue(); - entity.stringValue = action.getStringValue(); - entity.intValue = action.getIntegerValue(); - entity.floatValue = action.getFloatValue(); - entity.booleanValue = action.getBooleanValue(); - return entity; - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFrame.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFrame.java index d26ef635d..9f7d6847d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFrame.java @@ -83,323 +83,327 @@ public class ManageFrame extends FrameInterfaceGrpc.FrameInterfaceImplBase { - private JobManager jobManager; - private DependManager dependManager; - private JobManagerSupport jobManagerSupport; - private FrameDao frameDao; - private DispatchQueue manageQueue; - private Whiteboard whiteboard; - private LocalBookingSupport localBookingSupport; - private FrameSearchFactory frameSearchFactory; - - @Override - public void findFrame(FrameFindFrameRequest request, - StreamObserver responseObserver) { - try { - responseObserver.onNext(FrameFindFrameResponse.newBuilder() - .setFrame(whiteboard.findFrame(request.getJob(), request.getLayer(), request.getFrame())) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); - } - } - - @Override - public void getFrame(FrameGetFrameRequest request, - StreamObserver responseObserver) { - try { - responseObserver.onNext(FrameGetFrameResponse.newBuilder() - .setFrame(whiteboard.getFrame(request.getId())).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); - } - } - - @Override - public void getFrames(FrameGetFramesRequest request, - StreamObserver responseObserver) { - responseObserver.onNext(FrameGetFramesResponse.newBuilder() - .setFrames(whiteboard.getFrames(frameSearchFactory - .create(jobManagerSupport.getJobManager().findJob(request.getJob()), request.getR()))) - .build()); - responseObserver.onCompleted(); - } - - @Override - public void eat(FrameEatRequest request, StreamObserver responseObserver) { - FrameEntity frame = getFrameEntity(request.getFrame()); - manageQueue.execute(new DispatchEatFrames(frameSearchFactory.create(frame), - new Source(request.toString()), jobManagerSupport)); - responseObserver.onNext(FrameEatResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void kill(FrameKillRequest request, StreamObserver responseObserver) { - FrameEntity frame = getFrameEntity(request.getFrame()); - manageQueue.execute(new DispatchKillFrames(frameSearchFactory.create(frame), - new Source(request.toString()), jobManagerSupport)); - responseObserver.onNext(FrameKillResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void retry(FrameRetryRequest request, - StreamObserver responseObserver) { - FrameEntity frame = getFrameEntity(request.getFrame()); - manageQueue.execute(new DispatchRetryFrames(frameSearchFactory.create(frame), - new Source(request.toString()), jobManagerSupport)); - responseObserver.onNext(FrameRetryResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void createDependencyOnFrame(FrameCreateDependencyOnFrameRequest request, - StreamObserver responseObserver) { - updateManagers(); - FrameEntity frame = getFrameEntity(request.getFrame()); - FrameOnFrame depend = - new FrameOnFrame(frame, jobManager.getFrameDetail(request.getDependOnFrame().getId())); - dependManager.createDepend(depend); - Depend dependency = whiteboard.getDepend(depend); - responseObserver - .onNext(FrameCreateDependencyOnFrameResponse.newBuilder().setDepend(dependency).build()); - responseObserver.onCompleted(); - } - - @Override - public void createDependencyOnJob(FrameCreateDependencyOnJobRequest request, - StreamObserver responseObserver) { - updateManagers(); - FrameEntity frame = getFrameEntity(request.getFrame()); - FrameOnJob depend = new FrameOnJob(frame, jobManager.getJobDetail(request.getJob().getId())); - dependManager.createDepend(depend); - Depend dependency = whiteboard.getDepend(depend); - responseObserver - .onNext(FrameCreateDependencyOnJobResponse.newBuilder().setDepend(dependency).build()); - responseObserver.onCompleted(); - } - - @Override - public void createDependencyOnLayer(FrameCreateDependencyOnLayerRequest request, - StreamObserver responseObserver) { - updateManagers(); - FrameEntity frame = getFrameEntity(request.getFrame()); - FrameOnLayer depend = - new FrameOnLayer(frame, jobManager.getLayerDetail(request.getLayer().getId())); - dependManager.createDepend(depend); - Depend dependency = whiteboard.getDepend(depend); - responseObserver - .onNext(FrameCreateDependencyOnLayerResponse.newBuilder().setDepend(dependency).build()); - responseObserver.onCompleted(); - } - - @Override - public void getWhatDependsOnThis(FrameGetWhatDependsOnThisRequest request, - StreamObserver responseObserver) { - FrameEntity frame = getFrameEntity(request.getFrame()); - responseObserver.onNext(FrameGetWhatDependsOnThisResponse.newBuilder() - .setDepends(whiteboard.getWhatDependsOnThis(frame)).build()); - responseObserver.onCompleted(); - } - - @Override - public void getWhatThisDependsOn(FrameGetWhatThisDependsOnRequest request, - StreamObserver responseObserver) { - FrameEntity frame = getFrameEntity(request.getFrame()); - responseObserver.onNext(FrameGetWhatThisDependsOnResponse.newBuilder() - .setDepends(whiteboard.getWhatThisDependsOn(frame)).build()); - responseObserver.onCompleted(); - } - - @Override - public void markAsDepend(FrameMarkAsDependRequest request, - StreamObserver responseObserver) { - updateManagers(); - FrameEntity frame = getFrameEntity(request.getFrame()); - jobManager.markFrameAsDepend(frame); - responseObserver.onNext(FrameMarkAsDependResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void markAsWaiting(FrameMarkAsWaitingRequest request, - StreamObserver responseObserver) { - updateManagers(); - FrameEntity frame = getFrameEntity(request.getFrame()); - jobManager.markFrameAsWaiting(frame); - responseObserver.onNext(FrameMarkAsWaitingResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void dropDepends(FrameDropDependsRequest request, - StreamObserver responseObserver) { - updateManagers(); - FrameEntity frame = getFrameEntity(request.getFrame()); - manageQueue.execute(new DispatchDropDepends(frame, request.getTarget(), dependManager)); - responseObserver.onNext(FrameDropDependsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void addRenderPartition(FrameAddRenderPartitionRequest request, - StreamObserver responseObserver) { - updateManagers(); - FrameEntity frame = getFrameEntity(request.getFrame()); - LocalHostAssignment lha = new LocalHostAssignment(); - lha.setFrameId(frame.id); - lha.setThreads(request.getThreads()); - lha.setMaxCoreUnits(request.getMaxCores() * 100); - lha.setMaxMemory(request.getMaxMemory()); - lha.setMaxGpuMemory(request.getMaxGpuMemory()); - lha.setType(RenderPartitionType.FRAME_PARTITION); - - if (localBookingSupport.bookLocal(frame, request.getHost(), request.getUsername(), lha)) { - RenderPartition partition = whiteboard.getRenderPartition(lha); - - responseObserver.onNext( - FrameAddRenderPartitionResponse.newBuilder().setRenderPartition(partition).build()); - responseObserver.onCompleted(); - } else { - responseObserver.onError(Status.INTERNAL.withDescription("Failed to find suitable frames.") - .augmentDescription("customException()").asRuntimeException()); - } - } - - @Override - public void setCheckpointState(FrameSetCheckpointStateRequest request, - StreamObserver responseObserver) { - updateManagers(); - FrameEntity frame = getFrameEntity(request.getFrame()); - jobManager.updateCheckpointState(frame, request.getState()); - responseObserver.onNext(FrameSetCheckpointStateResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setFrameStateDisplayOverride(FrameStateDisplayOverrideRequest request, - StreamObserver responseObserver) { - updateManagers(); - Frame frame = request.getFrame(); - FrameStateDisplayOverride override = request.getOverride(); - - FrameStateDisplayOverrideSeq existing_overrides = - frameDao.getFrameStateDisplayOverrides(frame.getId()); - // if override already exists, do nothing - // if override is for a state that already has an override but diff color/text, - // update - // if override is new, add - boolean newOverride = true; - for (FrameStateDisplayOverride eo : existing_overrides.getOverridesList()) { - if (eo.equals(override)) { - newOverride = false; - break; - } else if (eo.getState().equals(override.getState()) - && !(eo.getColor().equals(override.getColor()) - && eo.getText().equals(override.getText()))) { - newOverride = false; - frameDao.updateFrameStateDisplayOverride(frame.getId(), override); - break; - } - } - - if (newOverride) { - frameDao.setFrameStateDisplayOverride(frame.getId(), override); - } - responseObserver.onNext(FrameStateDisplayOverrideResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void getFrameStateDisplayOverrides(GetFrameStateDisplayOverridesRequest request, - StreamObserver responseObserver) { - try { - updateManagers(); - Frame frame = request.getFrame(); - responseObserver.onNext(GetFrameStateDisplayOverridesResponse.newBuilder() - .setOverrides(frameDao.getFrameStateDisplayOverrides(frame.getId())).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError(Status.INTERNAL - .withDescription("No Frame State display overrides found.").asRuntimeException()); - } - } - - public JobManager getJobManager() { - return jobManager; - } - - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } - - public DispatchQueue getManageQueue() { - return manageQueue; - } - - public void setManageQueue(DispatchQueue dispatchQueue) { - this.manageQueue = dispatchQueue; - } - - public FrameDao getFrameDao() { - return frameDao; - } - - public void setFrameDao(FrameDao frameDao) { - this.frameDao = frameDao; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public DependManager getDependManager() { - return dependManager; - } - - public void setDependManager(DependManager dependManager) { - this.dependManager = dependManager; - } - - public JobManagerSupport getJobManagerSupport() { - return jobManagerSupport; - } - - public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { - this.jobManagerSupport = jobManagerSupport; - } - - public LocalBookingSupport getLocalBookingSupport() { - return localBookingSupport; - } - - public void setLocalBookingSupport(LocalBookingSupport localBookingSupport) { - this.localBookingSupport = localBookingSupport; - } - - private FrameEntity getFrameEntity(Frame frame) { - return frameDao.getFrameDetail(frame.getId()); - } - - private void updateManagers() { - setDependManager(jobManagerSupport.getDependManager()); - setJobManager(jobManagerSupport.getJobManager()); - } - - public FrameSearchFactory getFrameSearchFactory() { - return frameSearchFactory; - } - - public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { - this.frameSearchFactory = frameSearchFactory; - } + private JobManager jobManager; + private DependManager dependManager; + private JobManagerSupport jobManagerSupport; + private FrameDao frameDao; + private DispatchQueue manageQueue; + private Whiteboard whiteboard; + private LocalBookingSupport localBookingSupport; + private FrameSearchFactory frameSearchFactory; + + @Override + public void findFrame(FrameFindFrameRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(FrameFindFrameResponse.newBuilder().setFrame( + whiteboard.findFrame(request.getJob(), request.getLayer(), request.getFrame())) + .build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); + } + } + + @Override + public void getFrame(FrameGetFrameRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(FrameGetFrameResponse.newBuilder() + .setFrame(whiteboard.getFrame(request.getId())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); + } + } + + @Override + public void getFrames(FrameGetFramesRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(FrameGetFramesResponse.newBuilder() + .setFrames(whiteboard.getFrames(frameSearchFactory.create( + jobManagerSupport.getJobManager().findJob(request.getJob()), + request.getR()))) + .build()); + responseObserver.onCompleted(); + } + + @Override + public void eat(FrameEatRequest request, StreamObserver responseObserver) { + FrameEntity frame = getFrameEntity(request.getFrame()); + manageQueue.execute(new DispatchEatFrames(frameSearchFactory.create(frame), + new Source(request.toString()), jobManagerSupport)); + responseObserver.onNext(FrameEatResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void kill(FrameKillRequest request, StreamObserver responseObserver) { + FrameEntity frame = getFrameEntity(request.getFrame()); + manageQueue.execute(new DispatchKillFrames(frameSearchFactory.create(frame), + new Source(request.toString()), jobManagerSupport)); + responseObserver.onNext(FrameKillResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void retry(FrameRetryRequest request, + StreamObserver responseObserver) { + FrameEntity frame = getFrameEntity(request.getFrame()); + manageQueue.execute(new DispatchRetryFrames(frameSearchFactory.create(frame), + new Source(request.toString()), jobManagerSupport)); + responseObserver.onNext(FrameRetryResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void createDependencyOnFrame(FrameCreateDependencyOnFrameRequest request, + StreamObserver responseObserver) { + updateManagers(); + FrameEntity frame = getFrameEntity(request.getFrame()); + FrameOnFrame depend = new FrameOnFrame(frame, + jobManager.getFrameDetail(request.getDependOnFrame().getId())); + dependManager.createDepend(depend); + Depend dependency = whiteboard.getDepend(depend); + responseObserver.onNext( + FrameCreateDependencyOnFrameResponse.newBuilder().setDepend(dependency).build()); + responseObserver.onCompleted(); + } + + @Override + public void createDependencyOnJob(FrameCreateDependencyOnJobRequest request, + StreamObserver responseObserver) { + updateManagers(); + FrameEntity frame = getFrameEntity(request.getFrame()); + FrameOnJob depend = + new FrameOnJob(frame, jobManager.getJobDetail(request.getJob().getId())); + dependManager.createDepend(depend); + Depend dependency = whiteboard.getDepend(depend); + responseObserver.onNext( + FrameCreateDependencyOnJobResponse.newBuilder().setDepend(dependency).build()); + responseObserver.onCompleted(); + } + + @Override + public void createDependencyOnLayer(FrameCreateDependencyOnLayerRequest request, + StreamObserver responseObserver) { + updateManagers(); + FrameEntity frame = getFrameEntity(request.getFrame()); + FrameOnLayer depend = + new FrameOnLayer(frame, jobManager.getLayerDetail(request.getLayer().getId())); + dependManager.createDepend(depend); + Depend dependency = whiteboard.getDepend(depend); + responseObserver.onNext( + FrameCreateDependencyOnLayerResponse.newBuilder().setDepend(dependency).build()); + responseObserver.onCompleted(); + } + + @Override + public void getWhatDependsOnThis(FrameGetWhatDependsOnThisRequest request, + StreamObserver responseObserver) { + FrameEntity frame = getFrameEntity(request.getFrame()); + responseObserver.onNext(FrameGetWhatDependsOnThisResponse.newBuilder() + .setDepends(whiteboard.getWhatDependsOnThis(frame)).build()); + responseObserver.onCompleted(); + } + + @Override + public void getWhatThisDependsOn(FrameGetWhatThisDependsOnRequest request, + StreamObserver responseObserver) { + FrameEntity frame = getFrameEntity(request.getFrame()); + responseObserver.onNext(FrameGetWhatThisDependsOnResponse.newBuilder() + .setDepends(whiteboard.getWhatThisDependsOn(frame)).build()); + responseObserver.onCompleted(); + } + + @Override + public void markAsDepend(FrameMarkAsDependRequest request, + StreamObserver responseObserver) { + updateManagers(); + FrameEntity frame = getFrameEntity(request.getFrame()); + jobManager.markFrameAsDepend(frame); + responseObserver.onNext(FrameMarkAsDependResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void markAsWaiting(FrameMarkAsWaitingRequest request, + StreamObserver responseObserver) { + updateManagers(); + FrameEntity frame = getFrameEntity(request.getFrame()); + jobManager.markFrameAsWaiting(frame); + responseObserver.onNext(FrameMarkAsWaitingResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void dropDepends(FrameDropDependsRequest request, + StreamObserver responseObserver) { + updateManagers(); + FrameEntity frame = getFrameEntity(request.getFrame()); + manageQueue.execute(new DispatchDropDepends(frame, request.getTarget(), dependManager)); + responseObserver.onNext(FrameDropDependsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void addRenderPartition(FrameAddRenderPartitionRequest request, + StreamObserver responseObserver) { + updateManagers(); + FrameEntity frame = getFrameEntity(request.getFrame()); + LocalHostAssignment lha = new LocalHostAssignment(); + lha.setFrameId(frame.id); + lha.setThreads(request.getThreads()); + lha.setMaxCoreUnits(request.getMaxCores() * 100); + lha.setMaxMemory(request.getMaxMemory()); + lha.setMaxGpuMemory(request.getMaxGpuMemory()); + lha.setType(RenderPartitionType.FRAME_PARTITION); + + if (localBookingSupport.bookLocal(frame, request.getHost(), request.getUsername(), lha)) { + RenderPartition partition = whiteboard.getRenderPartition(lha); + + responseObserver.onNext(FrameAddRenderPartitionResponse.newBuilder() + .setRenderPartition(partition).build()); + responseObserver.onCompleted(); + } else { + responseObserver + .onError(Status.INTERNAL.withDescription("Failed to find suitable frames.") + .augmentDescription("customException()").asRuntimeException()); + } + } + + @Override + public void setCheckpointState(FrameSetCheckpointStateRequest request, + StreamObserver responseObserver) { + updateManagers(); + FrameEntity frame = getFrameEntity(request.getFrame()); + jobManager.updateCheckpointState(frame, request.getState()); + responseObserver.onNext(FrameSetCheckpointStateResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setFrameStateDisplayOverride(FrameStateDisplayOverrideRequest request, + StreamObserver responseObserver) { + updateManagers(); + Frame frame = request.getFrame(); + FrameStateDisplayOverride override = request.getOverride(); + + FrameStateDisplayOverrideSeq existing_overrides = + frameDao.getFrameStateDisplayOverrides(frame.getId()); + // if override already exists, do nothing + // if override is for a state that already has an override but diff color/text, + // update + // if override is new, add + boolean newOverride = true; + for (FrameStateDisplayOverride eo : existing_overrides.getOverridesList()) { + if (eo.equals(override)) { + newOverride = false; + break; + } else if (eo.getState().equals(override.getState()) + && !(eo.getColor().equals(override.getColor()) + && eo.getText().equals(override.getText()))) { + newOverride = false; + frameDao.updateFrameStateDisplayOverride(frame.getId(), override); + break; + } + } + + if (newOverride) { + frameDao.setFrameStateDisplayOverride(frame.getId(), override); + } + responseObserver.onNext(FrameStateDisplayOverrideResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void getFrameStateDisplayOverrides(GetFrameStateDisplayOverridesRequest request, + StreamObserver responseObserver) { + try { + updateManagers(); + Frame frame = request.getFrame(); + responseObserver.onNext(GetFrameStateDisplayOverridesResponse.newBuilder() + .setOverrides(frameDao.getFrameStateDisplayOverrides(frame.getId())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError( + Status.INTERNAL.withDescription("No Frame State display overrides found.") + .asRuntimeException()); + } + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public DispatchQueue getManageQueue() { + return manageQueue; + } + + public void setManageQueue(DispatchQueue dispatchQueue) { + this.manageQueue = dispatchQueue; + } + + public FrameDao getFrameDao() { + return frameDao; + } + + public void setFrameDao(FrameDao frameDao) { + this.frameDao = frameDao; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public DependManager getDependManager() { + return dependManager; + } + + public void setDependManager(DependManager dependManager) { + this.dependManager = dependManager; + } + + public JobManagerSupport getJobManagerSupport() { + return jobManagerSupport; + } + + public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { + this.jobManagerSupport = jobManagerSupport; + } + + public LocalBookingSupport getLocalBookingSupport() { + return localBookingSupport; + } + + public void setLocalBookingSupport(LocalBookingSupport localBookingSupport) { + this.localBookingSupport = localBookingSupport; + } + + private FrameEntity getFrameEntity(Frame frame) { + return frameDao.getFrameDetail(frame.getId()); + } + + private void updateManagers() { + setDependManager(jobManagerSupport.getDependManager()); + setJobManager(jobManagerSupport.getJobManager()); + } + + public FrameSearchFactory getFrameSearchFactory() { + return frameSearchFactory; + } + + public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { + this.frameSearchFactory = frameSearchFactory; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageGroup.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageGroup.java index e9ed05324..baad91fb2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageGroup.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageGroup.java @@ -80,274 +80,275 @@ public class ManageGroup extends GroupInterfaceGrpc.GroupInterfaceImplBase { - private GroupDao groupDao; - private JobDao jobDao; - private GroupManager groupManager; - private AdminManager adminManager; - private Whiteboard whiteboard; - private DispatchQueue manageQueue; - - @Override - public void getGroup(GroupGetGroupRequest request, - StreamObserver responseObserver) { - try { - responseObserver.onNext(GroupGetGroupResponse.newBuilder() - .setGroup(whiteboard.getGroup(request.getId())).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); - } - } - - @Override - public void findGroup(GroupFindGroupRequest request, - StreamObserver responseObserver) { - try { - responseObserver.onNext(GroupFindGroupResponse.newBuilder() - .setGroup(whiteboard.findGroup(request.getShow(), request.getName())).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); - } - } - - @Override - public void reparentGroups(GroupReparentGroupsRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - GroupSeq groupSeq = request.getGroups(); - List groupIds = new ArrayList(groupSeq.getGroupsCount()); - for (Group g : groupSeq.getGroupsList()) { - groupIds.add(g.getId()); - } - groupManager.reparentGroupIds(group, groupIds); - responseObserver.onNext(GroupReparentGroupsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void reparentJobs(GroupReparentJobsRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - final GroupDetail gDetail = groupDao.getGroupDetail(group.getId()); - for (Job job : request.getJobs().getJobsList()) { - groupManager.reparentJob(jobDao.getJob(job.getId()), gDetail, new Inherit[] {Inherit.All}); - } - responseObserver.onNext(GroupReparentJobsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void createSubGroup(GroupCreateSubGroupRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - GroupDetail newGroup = new GroupDetail(); - newGroup.name = request.getName(); - newGroup.parentId = group.getId(); - newGroup.showId = group.getShowId(); - groupManager.createGroup(newGroup, group); - Group subgroup = whiteboard.getGroup(newGroup.id); - responseObserver.onNext(GroupCreateSubGroupResponse.newBuilder().setGroup(subgroup).build()); - responseObserver.onCompleted(); - } - - @Override - public void delete(GroupDeleteRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - try { - groupManager.deleteGroup(group); - } catch (Exception e) { - responseObserver - .onError(Status.INTERNAL - .withDescription("Failed to remove group, be sure that there are no " - + "jobs or filter actions pointing at the group.") - .withCause(e).asRuntimeException()); - } - responseObserver.onNext(GroupDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setDefaultJobMaxCores(GroupSetDefJobMaxCoresRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupDefaultJobMaxCores(group, - Convert.coresToWholeCoreUnits(request.getMaxCores())); - responseObserver.onNext(GroupSetDefJobMaxCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setDefaultJobMinCores(GroupSetDefJobMinCoresRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupDefaultJobMinCores(group, - Convert.coresToWholeCoreUnits(request.getMinCores())); - responseObserver.onNext(GroupSetDefJobMinCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setDefaultJobMaxGpus(GroupSetDefJobMaxGpusRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupDefaultJobMaxGpus(group, request.getMaxGpus()); - responseObserver.onNext(GroupSetDefJobMaxGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setDefaultJobMinGpus(GroupSetDefJobMinGpusRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupDefaultJobMinGpus(group, request.getMinGpus()); - responseObserver.onNext(GroupSetDefJobMinGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setName(GroupSetNameRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupDao.updateName(group, request.getName()); - responseObserver.onNext(GroupSetNameResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setGroup(GroupSetGroupRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - GroupInterface parentGroup = groupDao.getGroup(request.getParentGroup().getId()); - groupManager.setGroupParent(group, groupDao.getGroupDetail(parentGroup.getGroupId())); - responseObserver.onNext(GroupSetGroupResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setDepartment(GroupSetDeptRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupDepartment(group, adminManager.findDepartment(request.getDept())); - responseObserver.onNext(GroupSetDeptResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setDefaultJobPriority(GroupSetDefJobPriorityRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupDefaultJobPriority(group, request.getPriority()); - responseObserver.onNext(GroupSetDefJobPriorityResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void getGroups(GroupGetGroupsRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - GroupSeq groupSeq = whiteboard.getGroups(group); - responseObserver.onNext(GroupGetGroupsResponse.newBuilder().setGroups(groupSeq).build()); - responseObserver.onCompleted(); - } - - @Override - public void getJobs(GroupGetJobsRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - JobSeq jobSeq = whiteboard.getJobs(group); - responseObserver.onNext(GroupGetJobsResponse.newBuilder().setJobs(jobSeq).build()); - responseObserver.onCompleted(); - } - - @Override - public void setMaxCores(GroupSetMaxCoresRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupMaxCores(group, Convert.coresToWholeCoreUnits(request.getMaxCores())); - responseObserver.onNext(GroupSetMaxCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setMinCores(GroupSetMinCoresRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupMinCores(group, Convert.coresToWholeCoreUnits(request.getMinCores())); - responseObserver.onNext(GroupSetMinCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setMaxGpus(GroupSetMaxGpusRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupMaxGpus(group, request.getMaxGpus()); - responseObserver.onNext(GroupSetMaxGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setMinGpus(GroupSetMinGpusRequest request, - StreamObserver responseObserver) { - GroupInterface group = getGroupInterface(request.getGroup()); - groupManager.setGroupMinGpus(group, request.getMinGpus()); - responseObserver.onNext(GroupSetMinGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public GroupDao getGroupDao() { - return groupDao; - } - - public void setGroupDao(GroupDao groupDao) { - this.groupDao = groupDao; - } - - public GroupManager getGroupManager() { - return groupManager; - } - - public void setGroupManager(GroupManager groupManager) { - this.groupManager = groupManager; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public AdminManager getAdminManager() { - return adminManager; - } - - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } - - public JobDao getJobDao() { - return jobDao; - } - - public DispatchQueue getManageQueue() { - return manageQueue; - } - - public void setManageQueue(DispatchQueue manageQueue) { - this.manageQueue = manageQueue; - } - - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } - - private GroupInterface getGroupInterface(Group group) { - return groupDao.getGroup(group.getId()); - } + private GroupDao groupDao; + private JobDao jobDao; + private GroupManager groupManager; + private AdminManager adminManager; + private Whiteboard whiteboard; + private DispatchQueue manageQueue; + + @Override + public void getGroup(GroupGetGroupRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(GroupGetGroupResponse.newBuilder() + .setGroup(whiteboard.getGroup(request.getId())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); + } + } + + @Override + public void findGroup(GroupFindGroupRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(GroupFindGroupResponse.newBuilder() + .setGroup(whiteboard.findGroup(request.getShow(), request.getName())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); + } + } + + @Override + public void reparentGroups(GroupReparentGroupsRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + GroupSeq groupSeq = request.getGroups(); + List groupIds = new ArrayList(groupSeq.getGroupsCount()); + for (Group g : groupSeq.getGroupsList()) { + groupIds.add(g.getId()); + } + groupManager.reparentGroupIds(group, groupIds); + responseObserver.onNext(GroupReparentGroupsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void reparentJobs(GroupReparentJobsRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + final GroupDetail gDetail = groupDao.getGroupDetail(group.getId()); + for (Job job : request.getJobs().getJobsList()) { + groupManager.reparentJob(jobDao.getJob(job.getId()), gDetail, + new Inherit[] {Inherit.All}); + } + responseObserver.onNext(GroupReparentJobsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void createSubGroup(GroupCreateSubGroupRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + GroupDetail newGroup = new GroupDetail(); + newGroup.name = request.getName(); + newGroup.parentId = group.getId(); + newGroup.showId = group.getShowId(); + groupManager.createGroup(newGroup, group); + Group subgroup = whiteboard.getGroup(newGroup.id); + responseObserver + .onNext(GroupCreateSubGroupResponse.newBuilder().setGroup(subgroup).build()); + responseObserver.onCompleted(); + } + + @Override + public void delete(GroupDeleteRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + try { + groupManager.deleteGroup(group); + } catch (Exception e) { + responseObserver.onError(Status.INTERNAL + .withDescription("Failed to remove group, be sure that there are no " + + "jobs or filter actions pointing at the group.") + .withCause(e).asRuntimeException()); + } + responseObserver.onNext(GroupDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultJobMaxCores(GroupSetDefJobMaxCoresRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupDefaultJobMaxCores(group, + Convert.coresToWholeCoreUnits(request.getMaxCores())); + responseObserver.onNext(GroupSetDefJobMaxCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultJobMinCores(GroupSetDefJobMinCoresRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupDefaultJobMinCores(group, + Convert.coresToWholeCoreUnits(request.getMinCores())); + responseObserver.onNext(GroupSetDefJobMinCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultJobMaxGpus(GroupSetDefJobMaxGpusRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupDefaultJobMaxGpus(group, request.getMaxGpus()); + responseObserver.onNext(GroupSetDefJobMaxGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultJobMinGpus(GroupSetDefJobMinGpusRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupDefaultJobMinGpus(group, request.getMinGpus()); + responseObserver.onNext(GroupSetDefJobMinGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setName(GroupSetNameRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupDao.updateName(group, request.getName()); + responseObserver.onNext(GroupSetNameResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setGroup(GroupSetGroupRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + GroupInterface parentGroup = groupDao.getGroup(request.getParentGroup().getId()); + groupManager.setGroupParent(group, groupDao.getGroupDetail(parentGroup.getGroupId())); + responseObserver.onNext(GroupSetGroupResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDepartment(GroupSetDeptRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupDepartment(group, adminManager.findDepartment(request.getDept())); + responseObserver.onNext(GroupSetDeptResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultJobPriority(GroupSetDefJobPriorityRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupDefaultJobPriority(group, request.getPriority()); + responseObserver.onNext(GroupSetDefJobPriorityResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void getGroups(GroupGetGroupsRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + GroupSeq groupSeq = whiteboard.getGroups(group); + responseObserver.onNext(GroupGetGroupsResponse.newBuilder().setGroups(groupSeq).build()); + responseObserver.onCompleted(); + } + + @Override + public void getJobs(GroupGetJobsRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + JobSeq jobSeq = whiteboard.getJobs(group); + responseObserver.onNext(GroupGetJobsResponse.newBuilder().setJobs(jobSeq).build()); + responseObserver.onCompleted(); + } + + @Override + public void setMaxCores(GroupSetMaxCoresRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupMaxCores(group, Convert.coresToWholeCoreUnits(request.getMaxCores())); + responseObserver.onNext(GroupSetMaxCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setMinCores(GroupSetMinCoresRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupMinCores(group, Convert.coresToWholeCoreUnits(request.getMinCores())); + responseObserver.onNext(GroupSetMinCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setMaxGpus(GroupSetMaxGpusRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupMaxGpus(group, request.getMaxGpus()); + responseObserver.onNext(GroupSetMaxGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setMinGpus(GroupSetMinGpusRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupMinGpus(group, request.getMinGpus()); + responseObserver.onNext(GroupSetMinGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public GroupDao getGroupDao() { + return groupDao; + } + + public void setGroupDao(GroupDao groupDao) { + this.groupDao = groupDao; + } + + public GroupManager getGroupManager() { + return groupManager; + } + + public void setGroupManager(GroupManager groupManager) { + this.groupManager = groupManager; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public AdminManager getAdminManager() { + return adminManager; + } + + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } + + public JobDao getJobDao() { + return jobDao; + } + + public DispatchQueue getManageQueue() { + return manageQueue; + } + + public void setManageQueue(DispatchQueue manageQueue) { + this.manageQueue = manageQueue; + } + + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } + + private GroupInterface getGroupInterface(Group group) { + return groupDao.getGroup(group.getId()); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageHost.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageHost.java index 38ed6e1a1..5732af62d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageHost.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageHost.java @@ -89,302 +89,305 @@ public class ManageHost extends HostInterfaceGrpc.HostInterfaceImplBase { - private HostManager hostManager; - private HostDao hostDao; - private AdminManager adminManager; - private CommentManager commentManager; - private RedirectManager redirectManager; - private JobManager jobManager; - private Whiteboard whiteboard; - private HostSearchFactory hostSearchFactory; - - @Override - public void getHosts(HostGetHostsRequest request, - StreamObserver responseObserver) { - responseObserver.onNext(HostGetHostsResponse.newBuilder() - .setHosts(whiteboard.getHosts(hostSearchFactory.create(request.getR()))).build()); - responseObserver.onCompleted(); - } - - @Override - public void getHostWhiteboard(HostGetHostWhiteboardRequest request, - StreamObserver responseObserver) { - responseObserver.onNext(HostGetHostWhiteboardResponse.newBuilder() - .setNestedHosts(whiteboard.getHostWhiteboard()).build()); - responseObserver.onCompleted(); - } - - @Override - public void findHost(HostFindHostRequest request, - StreamObserver responseObserver) { - try { - responseObserver.onNext(HostFindHostResponse.newBuilder() - .setHost(whiteboard.findHost(request.getName())).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); - } - } - - @Override - public void getHost(HostGetHostRequest request, - StreamObserver responseObserver) { - try { - responseObserver.onNext( - HostGetHostResponse.newBuilder().setHost(whiteboard.getHost(request.getId())).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); - } - } - - @Override - public void lock(HostLockRequest request, StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostManager.setHostLock(host, LockState.LOCKED, new Source("HostApi")); - responseObserver.onNext(HostLockResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void unlock(HostUnlockRequest request, - StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostManager.setHostLock(host, LockState.OPEN, new Source("HostApi")); - responseObserver.onNext(HostUnlockResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void rebootWhenIdle(HostRebootWhenIdleRequest request, - StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostManager.rebootWhenIdle(host); - responseObserver.onNext(HostRebootWhenIdleResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void delete(HostDeleteRequest request, - StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostManager.deleteHost(host); - responseObserver.onNext(HostDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void reboot(HostRebootRequest request, - StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostManager.rebootNow(host); - responseObserver.onNext(HostRebootResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setAllocation(HostSetAllocationRequest request, - StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostManager.setAllocation(host, adminManager.getAllocationDetail(request.getAllocationId())); - responseObserver.onNext(HostSetAllocationResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void addTags(HostAddTagsRequest request, - StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostManager.addTags(host, request.getTagsList().toArray(new String[0])); - responseObserver.onNext(HostAddTagsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void removeTags(HostRemoveTagsRequest request, - StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostManager.removeTags(host, request.getTagsList().toArray(new String[0])); - responseObserver.onNext(HostRemoveTagsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void renameTag(HostRenameTagRequest request, - StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostManager.renameTag(host, request.getOldTag(), request.getNewTag()); - responseObserver.onNext(HostRenameTagResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void addComment(HostAddCommentRequest request, - StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - CommentDetail c = new CommentDetail(); - Comment newComment = request.getNewComment(); - c.message = newComment.getMessage(); - c.subject = newComment.getSubject(); - c.user = newComment.getUser(); - c.timestamp = null; - commentManager.addComment(host, c); - responseObserver.onNext(HostAddCommentResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void getComments(HostGetCommentsRequest request, - StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - CommentSeq commentSeq = whiteboard.getComments(host); - responseObserver.onNext(HostGetCommentsResponse.newBuilder().setComments(commentSeq).build()); - responseObserver.onCompleted(); - } - - @Override - public void getProcs(HostGetProcsRequest request, - StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - ProcSeq procs = whiteboard.getProcs(host); - responseObserver.onNext(HostGetProcsResponse.newBuilder().setProcs(procs).build()); - responseObserver.onCompleted(); - } - - @Override - public void setThreadMode(HostSetThreadModeRequest request, - StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostDao.updateThreadMode(host, request.getMode()); - responseObserver.onNext(HostSetThreadModeResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setHardwareState(HostSetHardwareStateRequest request, - StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostDao.updateHostState(host, request.getState()); - responseObserver.onNext(HostSetHardwareStateResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void getOwner(HostGetOwnerRequest request, - StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - responseObserver - .onNext(HostGetOwnerResponse.newBuilder().setOwner(whiteboard.getOwner(host)).build()); - responseObserver.onCompleted(); - } - - @Override - public void getDeed(HostGetDeedRequest request, - StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - responseObserver - .onNext(HostGetDeedResponse.newBuilder().setDeed(whiteboard.getDeed(host)).build()); - responseObserver.onCompleted(); - } - - @Override - public void getRenderPartitions(HostGetRenderPartitionsRequest request, - StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - responseObserver.onNext(HostGetRenderPartitionsResponse.newBuilder() - .setRenderPartitions(whiteboard.getRenderPartitions(host)).build()); - responseObserver.onCompleted(); - } - - @Override - public void redirectToJob(HostRedirectToJobRequest request, - StreamObserver responseObserver) { - - List virtualProcs = new ArrayList<>(); - for (String procName : request.getProcNamesList()) { - virtualProcs.add(hostManager.getVirtualProc(procName)); - } - boolean value = redirectManager.addRedirect(virtualProcs, jobManager.getJob(request.getJobId()), - new Source(request.toString())); - responseObserver.onNext(HostRedirectToJobResponse.newBuilder().setValue(value).build()); - responseObserver.onCompleted(); - } - - @Override - public void setOs(HostSetOsRequest request, StreamObserver responseObserver) { - HostInterface host = getHostInterface(request.getHost()); - hostDao.updateHostOs(host, request.getOs()); - responseObserver.onNext(HostSetOsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public HostManager getHostManager() { - return hostManager; - } - - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; - } - - public AdminManager getAdminManager() { - return adminManager; - } - - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } - - public CommentManager getCommentManager() { - return commentManager; - } - - public void setCommentManager(CommentManager commentManager) { - this.commentManager = commentManager; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public HostDao getHostDao() { - return hostDao; - } - - public void setHostDao(HostDao hostDao) { - this.hostDao = hostDao; - } - - public RedirectManager getRedirectManager() { - return redirectManager; - } - - public void setRedirectManager(RedirectManager redirectManager) { - this.redirectManager = redirectManager; - } - - public JobManager getJobManager() { - return jobManager; - } - - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } - - public HostSearchFactory getHostSearchFactory() { - return hostSearchFactory; - } - - public void setHostSearchFactory(HostSearchFactory hostSearchFactory) { - this.hostSearchFactory = hostSearchFactory; - } - - private HostInterface getHostInterface(Host host) { - return hostManager.getHost(host.getId()); - } + private HostManager hostManager; + private HostDao hostDao; + private AdminManager adminManager; + private CommentManager commentManager; + private RedirectManager redirectManager; + private JobManager jobManager; + private Whiteboard whiteboard; + private HostSearchFactory hostSearchFactory; + + @Override + public void getHosts(HostGetHostsRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(HostGetHostsResponse.newBuilder() + .setHosts(whiteboard.getHosts(hostSearchFactory.create(request.getR()))).build()); + responseObserver.onCompleted(); + } + + @Override + public void getHostWhiteboard(HostGetHostWhiteboardRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(HostGetHostWhiteboardResponse.newBuilder() + .setNestedHosts(whiteboard.getHostWhiteboard()).build()); + responseObserver.onCompleted(); + } + + @Override + public void findHost(HostFindHostRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(HostFindHostResponse.newBuilder() + .setHost(whiteboard.findHost(request.getName())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); + } + } + + @Override + public void getHost(HostGetHostRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(HostGetHostResponse.newBuilder() + .setHost(whiteboard.getHost(request.getId())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); + } + } + + @Override + public void lock(HostLockRequest request, StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostManager.setHostLock(host, LockState.LOCKED, new Source("HostApi")); + responseObserver.onNext(HostLockResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void unlock(HostUnlockRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostManager.setHostLock(host, LockState.OPEN, new Source("HostApi")); + responseObserver.onNext(HostUnlockResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void rebootWhenIdle(HostRebootWhenIdleRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostManager.rebootWhenIdle(host); + responseObserver.onNext(HostRebootWhenIdleResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void delete(HostDeleteRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostManager.deleteHost(host); + responseObserver.onNext(HostDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void reboot(HostRebootRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostManager.rebootNow(host); + responseObserver.onNext(HostRebootResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setAllocation(HostSetAllocationRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostManager.setAllocation(host, + adminManager.getAllocationDetail(request.getAllocationId())); + responseObserver.onNext(HostSetAllocationResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void addTags(HostAddTagsRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostManager.addTags(host, request.getTagsList().toArray(new String[0])); + responseObserver.onNext(HostAddTagsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void removeTags(HostRemoveTagsRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostManager.removeTags(host, request.getTagsList().toArray(new String[0])); + responseObserver.onNext(HostRemoveTagsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void renameTag(HostRenameTagRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostManager.renameTag(host, request.getOldTag(), request.getNewTag()); + responseObserver.onNext(HostRenameTagResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void addComment(HostAddCommentRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + CommentDetail c = new CommentDetail(); + Comment newComment = request.getNewComment(); + c.message = newComment.getMessage(); + c.subject = newComment.getSubject(); + c.user = newComment.getUser(); + c.timestamp = null; + commentManager.addComment(host, c); + responseObserver.onNext(HostAddCommentResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void getComments(HostGetCommentsRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + CommentSeq commentSeq = whiteboard.getComments(host); + responseObserver + .onNext(HostGetCommentsResponse.newBuilder().setComments(commentSeq).build()); + responseObserver.onCompleted(); + } + + @Override + public void getProcs(HostGetProcsRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + ProcSeq procs = whiteboard.getProcs(host); + responseObserver.onNext(HostGetProcsResponse.newBuilder().setProcs(procs).build()); + responseObserver.onCompleted(); + } + + @Override + public void setThreadMode(HostSetThreadModeRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostDao.updateThreadMode(host, request.getMode()); + responseObserver.onNext(HostSetThreadModeResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setHardwareState(HostSetHardwareStateRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostDao.updateHostState(host, request.getState()); + responseObserver.onNext(HostSetHardwareStateResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void getOwner(HostGetOwnerRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + responseObserver.onNext( + HostGetOwnerResponse.newBuilder().setOwner(whiteboard.getOwner(host)).build()); + responseObserver.onCompleted(); + } + + @Override + public void getDeed(HostGetDeedRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + responseObserver + .onNext(HostGetDeedResponse.newBuilder().setDeed(whiteboard.getDeed(host)).build()); + responseObserver.onCompleted(); + } + + @Override + public void getRenderPartitions(HostGetRenderPartitionsRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + responseObserver.onNext(HostGetRenderPartitionsResponse.newBuilder() + .setRenderPartitions(whiteboard.getRenderPartitions(host)).build()); + responseObserver.onCompleted(); + } + + @Override + public void redirectToJob(HostRedirectToJobRequest request, + StreamObserver responseObserver) { + + List virtualProcs = new ArrayList<>(); + for (String procName : request.getProcNamesList()) { + virtualProcs.add(hostManager.getVirtualProc(procName)); + } + boolean value = redirectManager.addRedirect(virtualProcs, + jobManager.getJob(request.getJobId()), new Source(request.toString())); + responseObserver.onNext(HostRedirectToJobResponse.newBuilder().setValue(value).build()); + responseObserver.onCompleted(); + } + + @Override + public void setOs(HostSetOsRequest request, + StreamObserver responseObserver) { + HostInterface host = getHostInterface(request.getHost()); + hostDao.updateHostOs(host, request.getOs()); + responseObserver.onNext(HostSetOsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public HostManager getHostManager() { + return hostManager; + } + + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; + } + + public AdminManager getAdminManager() { + return adminManager; + } + + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } + + public CommentManager getCommentManager() { + return commentManager; + } + + public void setCommentManager(CommentManager commentManager) { + this.commentManager = commentManager; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public HostDao getHostDao() { + return hostDao; + } + + public void setHostDao(HostDao hostDao) { + this.hostDao = hostDao; + } + + public RedirectManager getRedirectManager() { + return redirectManager; + } + + public void setRedirectManager(RedirectManager redirectManager) { + this.redirectManager = redirectManager; + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public HostSearchFactory getHostSearchFactory() { + return hostSearchFactory; + } + + public void setHostSearchFactory(HostSearchFactory hostSearchFactory) { + this.hostSearchFactory = hostSearchFactory; + } + + private HostInterface getHostInterface(Host host) { + return hostManager.getHost(host.getId()); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageJob.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageJob.java index 98432bdd4..82dacfc37 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageJob.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageJob.java @@ -162,809 +162,824 @@ import static com.imageworks.spcue.servant.ServantUtil.attemptChange; public class ManageJob extends JobInterfaceGrpc.JobInterfaceImplBase { - private static final Logger logger = LogManager.getLogger(ManageJob.class); - private Whiteboard whiteboard; - private JobManager jobManager; - private GroupManager groupManager; - private JobManagerSupport jobManagerSupport; - private JobDao jobDao; - private JobLauncher jobLauncher; - private DependManager dependManager; - private CommentManager commentManager; - private DispatchQueue manageQueue; - private Dispatcher localDispatcher; - private LocalBookingSupport localBookingSupport; - private FilterManager filterManager; - private JobInterface job; - private FrameSearchFactory frameSearchFactory; - private JobSearchFactory jobSearchFactory; - private final String property = "frame.finished_jobs_readonly"; - @Autowired - private Environment env; - - @Override - public void findJob(JobFindJobRequest request, - StreamObserver responseObserver) { - try { - responseObserver.onNext( - JobFindJobResponse.newBuilder().setJob(whiteboard.findJob(request.getName())).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); - } - } - - @Override - public void getJob(JobGetJobRequest request, StreamObserver responseObserver) { - try { - responseObserver.onNext( - JobGetJobResponse.newBuilder().setJob(whiteboard.getJob(request.getId())).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); - } - } - - @Override - public void getJobs(JobGetJobsRequest request, - StreamObserver responseObserver) { - responseObserver.onNext(JobGetJobsResponse.newBuilder() - .setJobs(whiteboard.getJobs(jobSearchFactory.create(request.getR()))).build()); - responseObserver.onCompleted(); - } - - @Override - public void getJobNames(JobGetJobNamesRequest request, - StreamObserver responseObserver) { - responseObserver.onNext(JobGetJobNamesResponse.newBuilder() - .addAllNames(whiteboard.getJobNames(jobSearchFactory.create(request.getR()))).build()); - responseObserver.onCompleted(); - } - - @Override - public void isJobPending(JobIsJobPendingRequest request, - StreamObserver responseObserver) { - responseObserver.onNext(JobIsJobPendingResponse.newBuilder() - .setValue(whiteboard.isJobPending(request.getName())).build()); - responseObserver.onCompleted(); - } - - @Override - public void getFrames(JobGetFramesRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - FrameSeq frameSeq = whiteboard.getFrames(frameSearchFactory.create(job, request.getReq())); - responseObserver.onNext(JobGetFramesResponse.newBuilder().setFrames(frameSeq).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void getLayers(JobGetLayersRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - LayerSeq layerSeq = whiteboard.getLayers(job); - responseObserver.onNext(JobGetLayersResponse.newBuilder().setLayers(layerSeq).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void kill(JobKillRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - manageQueue.execute(new DispatchJobComplete(job, new Source(request.toString(), - request.getUsername(), request.getPid(), request.getHostKill(), request.getReason()), - true, jobManagerSupport)); - responseObserver.onNext(JobKillResponse.newBuilder().build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void launchSpecAndWait(JobLaunchSpecAndWaitRequest request, - StreamObserver responseObserver) { - try { - JobSpec spec = jobLauncher.parse(request.getSpec()); - jobLauncher.launch(spec); - JobSeq.Builder jobSeqBuilder = JobSeq.newBuilder(); - for (BuildableJob j : spec.getJobs()) { - jobSeqBuilder.addJobs(whiteboard.findJob(j.detail.name)); - } - responseObserver - .onNext(JobLaunchSpecAndWaitResponse.newBuilder().setJobs(jobSeqBuilder.build()).build()); - responseObserver.onCompleted(); - } catch (Exception e) { - logger.error("Failed to launch and add job.", e); - responseObserver.onError( - Status.INTERNAL.withDescription("Failed to launch and add job: " + e.getMessage()) - .withCause(e).asRuntimeException()); - } - } - - @Override - public void launchSpec(JobLaunchSpecRequest request, - StreamObserver responseObserver) { - try { - JobSpec spec = jobLauncher.parse(request.getSpec()); - List result = new ArrayList(8); - for (BuildableJob j : spec.getJobs()) { - result.add(j.detail.name); - } - jobLauncher.queueAndLaunch(spec); - responseObserver.onNext(JobLaunchSpecResponse.newBuilder().addAllNames(result).build()); - responseObserver.onCompleted(); - } catch (Exception e) { - logger.error("Failed to add job to launch queue.", e); - responseObserver.onError( - Status.INTERNAL.withDescription("Failed to add job to launch queue: " + e.getMessage()) - .withCause(e).asRuntimeException()); - } - } - - @Override - public void pause(JobPauseRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - jobManager.setJobPaused(job, true); - responseObserver.onNext(JobPauseResponse.newBuilder().build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void resume(JobResumeRequest request, StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - jobManager.setJobPaused(job, false); - responseObserver.onNext(JobResumeResponse.newBuilder().build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void setMaxCores(JobSetMaxCoresRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - jobDao.updateMaxCores(job, Convert.coresToWholeCoreUnits(request.getVal())); - responseObserver.onNext(JobSetMaxCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void setMinCores(JobSetMinCoresRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - jobDao.updateMinCores(job, Convert.coresToWholeCoreUnits(request.getVal())); - responseObserver.onNext(JobSetMinCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void setMaxGpus(JobSetMaxGpusRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - jobDao.updateMaxGpus(job, request.getVal()); - responseObserver.onNext(JobSetMaxGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void setMinGpus(JobSetMinGpusRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - jobDao.updateMinGpus(job, request.getVal()); - responseObserver.onNext(JobSetMinGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void setPriority(JobSetPriorityRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - jobDao.updatePriority(job, request.getVal()); - responseObserver.onNext(JobSetPriorityResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void getCurrent(JobGetCurrentRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - Job currentJob = whiteboard.getJob(job.getId()); - responseObserver.onNext(JobGetCurrentResponse.newBuilder().setJob(currentJob).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void eatFrames(JobEatFramesRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - manageQueue.execute(new DispatchEatFrames(frameSearchFactory.create(job, request.getReq()), - new Source(request.toString()), jobManagerSupport)); - responseObserver.onNext(JobEatFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void killFrames(JobKillFramesRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - manageQueue.execute(new DispatchKillFrames(frameSearchFactory.create(job, request.getReq()), - new Source(request.toString(), request.getUsername(), request.getPid(), - request.getHostKill(), request.getReason()), - jobManagerSupport)); - responseObserver.onNext(JobKillFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void markDoneFrames(JobMarkDoneFramesRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - manageQueue.execute(new DispatchSatisfyDepends( - frameSearchFactory.create(job, request.getReq()), jobManagerSupport)); - responseObserver.onNext(JobMarkDoneFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void retryFrames(JobRetryFramesRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - manageQueue - .execute(new DispatchRetryFrames(frameSearchFactory.create(job, request.getReq()), - new Source(request.toString()), jobManagerSupport)); - responseObserver.onNext(JobRetryFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void setAutoEat(JobSetAutoEatRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - jobDao.updateAutoEat(job, request.getValue()); - responseObserver.onNext(JobSetAutoEatResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void createDependencyOnFrame(JobCreateDependencyOnFrameRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - JobOnFrame depend = - new JobOnFrame(job, jobManager.getFrameDetail(request.getFrame().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(JobCreateDependencyOnFrameResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)).build()); - responseObserver.onCompleted(); - } - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void createDependencyOnJob(JobCreateDependencyOnJobRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - JobOnJob depend = new JobOnJob(job, jobManager.getJobDetail(request.getOnJob().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(JobCreateDependencyOnJobResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)).build()); - responseObserver.onCompleted(); - } - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void createDependencyOnLayer(JobCreateDependencyOnLayerRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - JobOnLayer depend = - new JobOnLayer(job, jobManager.getLayerDetail(request.getLayer().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(JobCreateDependencyOnLayerResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)).build()); - responseObserver.onCompleted(); - } - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void getWhatDependsOnThis(JobGetWhatDependsOnThisRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - responseObserver.onNext(JobGetWhatDependsOnThisResponse.newBuilder() - .setDepends(whiteboard.getWhatDependsOnThis(job)).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void getWhatThisDependsOn(JobGetWhatThisDependsOnRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - responseObserver.onNext(JobGetWhatThisDependsOnResponse.newBuilder() - .setDepends(whiteboard.getWhatThisDependsOn(job)).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void getDepends(JobGetDependsRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - responseObserver.onNext( - JobGetDependsResponse.newBuilder().setDepends(whiteboard.getDepends(job)).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void getUpdatedFrames(JobGetUpdatedFramesRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - UpdatedFrameCheckResult result = whiteboard.getUpdatedFrames(job, - ServantUtil.convertLayerFilterList(request.getLayerFilter()), request.getLastCheck()); - responseObserver.onNext( - JobGetUpdatedFramesResponse.newBuilder().setUpdatedFrames(result.getUpdatedFrames()) - .setServerTime(result.getServerTime()).setState(result.getState()).build()); - responseObserver.onCompleted(); - - } catch (java.lang.IllegalArgumentException e) { - System.out.println(e); - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void setMaxRetries(JobSetMaxRetriesRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - jobDao.updateMaxFrameRetries(job, request.getMaxRetries()); - responseObserver.onNext(JobSetMaxRetriesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void addComment(JobAddCommentRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - Comment newComment = request.getNewComment(); - CommentDetail c = new CommentDetail(); - c.message = newComment.getMessage(); - c.subject = newComment.getSubject(); - c.user = newComment.getUser(); - c.timestamp = null; - commentManager.addComment(job, c); - responseObserver.onNext(JobAddCommentResponse.newBuilder().build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void getComments(JobGetCommentsRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - responseObserver.onNext( - JobGetCommentsResponse.newBuilder().setComments(whiteboard.getComments(job)).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void dropDepends(JobDropDependsRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - manageQueue.execute(new DispatchDropDepends(job, request.getTarget(), dependManager)); - responseObserver.onNext(JobDropDependsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void setGroup(JobSetGroupRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - jobDao.updateParent(job, groupManager.getGroupDetail(request.getGroupId())); - responseObserver.onNext(JobSetGroupResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void markAsWaiting(JobMarkAsWaitingRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - jobManagerSupport.markFramesAsWaiting(frameSearchFactory.create(job, request.getReq()), - new Source(request.toString())); - responseObserver.onNext(JobMarkAsWaitingResponse.newBuilder().build()); + private static final Logger logger = LogManager.getLogger(ManageJob.class); + private Whiteboard whiteboard; + private JobManager jobManager; + private GroupManager groupManager; + private JobManagerSupport jobManagerSupport; + private JobDao jobDao; + private JobLauncher jobLauncher; + private DependManager dependManager; + private CommentManager commentManager; + private DispatchQueue manageQueue; + private Dispatcher localDispatcher; + private LocalBookingSupport localBookingSupport; + private FilterManager filterManager; + private JobInterface job; + private FrameSearchFactory frameSearchFactory; + private JobSearchFactory jobSearchFactory; + private final String property = "frame.finished_jobs_readonly"; + @Autowired + private Environment env; + + @Override + public void findJob(JobFindJobRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(JobFindJobResponse.newBuilder() + .setJob(whiteboard.findJob(request.getName())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); + } + } + + @Override + public void getJob(JobGetJobRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(JobGetJobResponse.newBuilder() + .setJob(whiteboard.getJob(request.getId())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); + } + } + + @Override + public void getJobs(JobGetJobsRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(JobGetJobsResponse.newBuilder() + .setJobs(whiteboard.getJobs(jobSearchFactory.create(request.getR()))).build()); responseObserver.onCompleted(); - } - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void reorderFrames(JobReorderFramesRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - manageQueue.execute(new DispatchReorderFrames(job, new FrameSet(request.getRange()), - request.getOrder(), jobManagerSupport)); - responseObserver.onNext(JobReorderFramesResponse.newBuilder().build()); + } + + @Override + public void getJobNames(JobGetJobNamesRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(JobGetJobNamesResponse.newBuilder() + .addAllNames(whiteboard.getJobNames(jobSearchFactory.create(request.getR()))) + .build()); responseObserver.onCompleted(); - } - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void shutdownIfCompleted(JobShutdownIfCompletedRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - manageQueue.execute(new DispatchShutdownJobIfCompleted(job, jobManagerSupport)); - responseObserver.onNext(JobShutdownIfCompletedResponse.newBuilder().build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void staggerFrames(JobStaggerFramesRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - manageQueue.execute(new DispatchStaggerFrames(job, request.getRange(), request.getStagger(), - jobManagerSupport)); - responseObserver.onNext(JobStaggerFramesResponse.newBuilder().build()); + } + + @Override + public void isJobPending(JobIsJobPendingRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(JobIsJobPendingResponse.newBuilder() + .setValue(whiteboard.isJobPending(request.getName())).build()); responseObserver.onCompleted(); - } - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void addRenderPartition(JobAddRenderPartRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - LocalHostAssignment lha = new LocalHostAssignment(); - lha.setJobId(job.getId()); - lha.setThreads(request.getThreads()); - lha.setMaxCoreUnits(request.getMaxCores() * 100); - lha.setMaxMemory(request.getMaxMemory()); - lha.setMaxGpuUnits(request.getMaxGpus()); - lha.setMaxGpuMemory(request.getMaxGpuMemory()); - lha.setType(RenderPartitionType.JOB_PARTITION); - - if (localBookingSupport.bookLocal(job, request.getHost(), request.getUsername(), lha)) { - try { - RenderPartition renderPart = whiteboard.getRenderPartition(lha); - responseObserver.onNext( - JobAddRenderPartResponse.newBuilder().setRenderPartition(renderPart).build()); + } + + @Override + public void getFrames(JobGetFramesRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + FrameSeq frameSeq = + whiteboard.getFrames(frameSearchFactory.create(job, request.getReq())); + responseObserver.onNext(JobGetFramesResponse.newBuilder().setFrames(frameSeq).build()); responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.INTERNAL.withDescription("Failed to allocate render partition to host.") - .asRuntimeException()); - } - } else { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find suitable frames.").asRuntimeException()); - } - } - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - @Override - public void runFilters(JobRunFiltersRequest request, - StreamObserver responseObserver) { - try { - setupJobData(request.getJob()); - if (attemptChange(env, property, jobManager, job, responseObserver)) { - JobDetail jobDetail = jobManager.getJobDetail(job.getJobId()); - filterManager.runFiltersOnJob(jobDetail); - responseObserver.onNext(JobRunFiltersResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - public void addSubscriber(JobAddSubscriberRequest request, - StreamObserver responseStreamObserver) { - try { - setupJobData(request.getJob()); - Set subscribers = Sets.newHashSet(jobManager.getEmail(job).split(",")); - subscribers.add(request.getSubscriber()); - jobManager.updateEmail(job, String.join(",", subscribers)); - responseStreamObserver.onNext(JobAddSubscriberResponse.newBuilder().build()); - responseStreamObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseStreamObserver - .onError(Status.INTERNAL.withDescription("Failed to find job data").asRuntimeException()); - } - } - - public JobManager getJobManager() { - return jobManager; - } - - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public DispatchQueue getManageQueue() { - return manageQueue; - } - - public void setManageQueue(DispatchQueue dispatchQueue) { - this.manageQueue = dispatchQueue; - } - - public DependManager getDependManager() { - return dependManager; - } - - public void setDependManager(DependManager dependManager) { - this.dependManager = dependManager; - } - - public JobDao getJobDao() { - return jobDao; - } - - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } - - public JobLauncher getJobLauncher() { - return jobLauncher; - } - - public void setJobLauncher(JobLauncher jobLauncher) { - this.jobLauncher = jobLauncher; - } - - public CommentManager getCommentManager() { - return commentManager; - } - - public void setCommentManager(CommentManager commentManager) { - this.commentManager = commentManager; - } - - public JobManagerSupport getJobManagerSupport() { - return jobManagerSupport; - } - - public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { - this.jobManagerSupport = jobManagerSupport; - } - - public GroupManager getGroupManager() { - return groupManager; - } - - public void setGroupManager(GroupManager groupManager) { - this.groupManager = groupManager; - } - - public Dispatcher getLocalDispatcher() { - return localDispatcher; - } - - public void setLocalDispatcher(Dispatcher localDispatcher) { - this.localDispatcher = localDispatcher; - } - - public LocalBookingSupport getLocalBookingSupport() { - return localBookingSupport; - } - - public void setLocalBookingSupport(LocalBookingSupport localBookingSupport) { - this.localBookingSupport = localBookingSupport; - } - - public FilterManager getFilterManager() { - return filterManager; - } - - public void setFilterManager(FilterManager filterManager) { - this.filterManager = filterManager; - } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void getLayers(JobGetLayersRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + LayerSeq layerSeq = whiteboard.getLayers(job); + responseObserver.onNext(JobGetLayersResponse.newBuilder().setLayers(layerSeq).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void kill(JobKillRequest request, StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + manageQueue + .execute(new DispatchJobComplete(job, + new Source(request.toString(), request.getUsername(), request.getPid(), + request.getHostKill(), request.getReason()), + true, jobManagerSupport)); + responseObserver.onNext(JobKillResponse.newBuilder().build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void launchSpecAndWait(JobLaunchSpecAndWaitRequest request, + StreamObserver responseObserver) { + try { + JobSpec spec = jobLauncher.parse(request.getSpec()); + jobLauncher.launch(spec); + JobSeq.Builder jobSeqBuilder = JobSeq.newBuilder(); + for (BuildableJob j : spec.getJobs()) { + jobSeqBuilder.addJobs(whiteboard.findJob(j.detail.name)); + } + responseObserver.onNext(JobLaunchSpecAndWaitResponse.newBuilder() + .setJobs(jobSeqBuilder.build()).build()); + responseObserver.onCompleted(); + } catch (Exception e) { + logger.error("Failed to launch and add job.", e); + responseObserver.onError(Status.INTERNAL + .withDescription("Failed to launch and add job: " + e.getMessage()).withCause(e) + .asRuntimeException()); + } + } + + @Override + public void launchSpec(JobLaunchSpecRequest request, + StreamObserver responseObserver) { + try { + JobSpec spec = jobLauncher.parse(request.getSpec()); + List result = new ArrayList(8); + for (BuildableJob j : spec.getJobs()) { + result.add(j.detail.name); + } + jobLauncher.queueAndLaunch(spec); + responseObserver.onNext(JobLaunchSpecResponse.newBuilder().addAllNames(result).build()); + responseObserver.onCompleted(); + } catch (Exception e) { + logger.error("Failed to add job to launch queue.", e); + responseObserver.onError(Status.INTERNAL + .withDescription("Failed to add job to launch queue: " + e.getMessage()) + .withCause(e).asRuntimeException()); + } + } + + @Override + public void pause(JobPauseRequest request, StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + jobManager.setJobPaused(job, true); + responseObserver.onNext(JobPauseResponse.newBuilder().build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void resume(JobResumeRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + jobManager.setJobPaused(job, false); + responseObserver.onNext(JobResumeResponse.newBuilder().build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void setMaxCores(JobSetMaxCoresRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + jobDao.updateMaxCores(job, Convert.coresToWholeCoreUnits(request.getVal())); + responseObserver.onNext(JobSetMaxCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void setMinCores(JobSetMinCoresRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + jobDao.updateMinCores(job, Convert.coresToWholeCoreUnits(request.getVal())); + responseObserver.onNext(JobSetMinCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void setMaxGpus(JobSetMaxGpusRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + jobDao.updateMaxGpus(job, request.getVal()); + responseObserver.onNext(JobSetMaxGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void setMinGpus(JobSetMinGpusRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + jobDao.updateMinGpus(job, request.getVal()); + responseObserver.onNext(JobSetMinGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void setPriority(JobSetPriorityRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + jobDao.updatePriority(job, request.getVal()); + responseObserver.onNext(JobSetPriorityResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void getCurrent(JobGetCurrentRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + Job currentJob = whiteboard.getJob(job.getId()); + responseObserver.onNext(JobGetCurrentResponse.newBuilder().setJob(currentJob).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void eatFrames(JobEatFramesRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue.execute( + new DispatchEatFrames(frameSearchFactory.create(job, request.getReq()), + new Source(request.toString()), jobManagerSupport)); + responseObserver.onNext(JobEatFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void killFrames(JobKillFramesRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue.execute( + new DispatchKillFrames(frameSearchFactory.create(job, request.getReq()), + new Source(request.toString(), request.getUsername(), + request.getPid(), request.getHostKill(), + request.getReason()), + jobManagerSupport)); + responseObserver.onNext(JobKillFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void markDoneFrames(JobMarkDoneFramesRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue.execute(new DispatchSatisfyDepends( + frameSearchFactory.create(job, request.getReq()), jobManagerSupport)); + responseObserver.onNext(JobMarkDoneFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void retryFrames(JobRetryFramesRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue.execute( + new DispatchRetryFrames(frameSearchFactory.create(job, request.getReq()), + new Source(request.toString()), jobManagerSupport)); + responseObserver.onNext(JobRetryFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void setAutoEat(JobSetAutoEatRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + jobDao.updateAutoEat(job, request.getValue()); + responseObserver.onNext(JobSetAutoEatResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void createDependencyOnFrame(JobCreateDependencyOnFrameRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + JobOnFrame depend = + new JobOnFrame(job, jobManager.getFrameDetail(request.getFrame().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(JobCreateDependencyOnFrameResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)).build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void createDependencyOnJob(JobCreateDependencyOnJobRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + JobOnJob depend = + new JobOnJob(job, jobManager.getJobDetail(request.getOnJob().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(JobCreateDependencyOnJobResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)).build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void createDependencyOnLayer(JobCreateDependencyOnLayerRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + JobOnLayer depend = + new JobOnLayer(job, jobManager.getLayerDetail(request.getLayer().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(JobCreateDependencyOnLayerResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)).build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void getWhatDependsOnThis(JobGetWhatDependsOnThisRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + responseObserver.onNext(JobGetWhatDependsOnThisResponse.newBuilder() + .setDepends(whiteboard.getWhatDependsOnThis(job)).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void getWhatThisDependsOn(JobGetWhatThisDependsOnRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + responseObserver.onNext(JobGetWhatThisDependsOnResponse.newBuilder() + .setDepends(whiteboard.getWhatThisDependsOn(job)).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void getDepends(JobGetDependsRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + responseObserver.onNext(JobGetDependsResponse.newBuilder() + .setDepends(whiteboard.getDepends(job)).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void getUpdatedFrames(JobGetUpdatedFramesRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + UpdatedFrameCheckResult result = whiteboard.getUpdatedFrames(job, + ServantUtil.convertLayerFilterList(request.getLayerFilter()), + request.getLastCheck()); + responseObserver.onNext(JobGetUpdatedFramesResponse.newBuilder() + .setUpdatedFrames(result.getUpdatedFrames()) + .setServerTime(result.getServerTime()).setState(result.getState()).build()); + responseObserver.onCompleted(); + + } catch (java.lang.IllegalArgumentException e) { + System.out.println(e); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void setMaxRetries(JobSetMaxRetriesRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + jobDao.updateMaxFrameRetries(job, request.getMaxRetries()); + responseObserver.onNext(JobSetMaxRetriesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void addComment(JobAddCommentRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + Comment newComment = request.getNewComment(); + CommentDetail c = new CommentDetail(); + c.message = newComment.getMessage(); + c.subject = newComment.getSubject(); + c.user = newComment.getUser(); + c.timestamp = null; + commentManager.addComment(job, c); + responseObserver.onNext(JobAddCommentResponse.newBuilder().build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void getComments(JobGetCommentsRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + responseObserver.onNext(JobGetCommentsResponse.newBuilder() + .setComments(whiteboard.getComments(job)).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } - private void setupJobData(Job jobData) { - setJobManager(jobManagerSupport.getJobManager()); - setDependManager(jobManagerSupport.getDependManager()); - job = jobManager.getJob(jobData.getId()); - } - - public FrameSearchFactory getFrameSearchFactory() { - return frameSearchFactory; - } - - public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { - this.frameSearchFactory = frameSearchFactory; - } - - public JobSearchFactory getJobSearchFactory() { - return jobSearchFactory; - } - - public void setJobSearchFactory(JobSearchFactory jobSearchFactory) { - this.jobSearchFactory = jobSearchFactory; - } + @Override + public void dropDepends(JobDropDependsRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue + .execute(new DispatchDropDepends(job, request.getTarget(), dependManager)); + responseObserver.onNext(JobDropDependsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void setGroup(JobSetGroupRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + jobDao.updateParent(job, groupManager.getGroupDetail(request.getGroupId())); + responseObserver.onNext(JobSetGroupResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void markAsWaiting(JobMarkAsWaitingRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + jobManagerSupport.markFramesAsWaiting( + frameSearchFactory.create(job, request.getReq()), + new Source(request.toString())); + responseObserver.onNext(JobMarkAsWaitingResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void reorderFrames(JobReorderFramesRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue.execute(new DispatchReorderFrames(job, new FrameSet(request.getRange()), + request.getOrder(), jobManagerSupport)); + responseObserver.onNext(JobReorderFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void shutdownIfCompleted(JobShutdownIfCompletedRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + manageQueue.execute(new DispatchShutdownJobIfCompleted(job, jobManagerSupport)); + responseObserver.onNext(JobShutdownIfCompletedResponse.newBuilder().build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void staggerFrames(JobStaggerFramesRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue.execute(new DispatchStaggerFrames(job, request.getRange(), + request.getStagger(), jobManagerSupport)); + responseObserver.onNext(JobStaggerFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void addRenderPartition(JobAddRenderPartRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + LocalHostAssignment lha = new LocalHostAssignment(); + lha.setJobId(job.getId()); + lha.setThreads(request.getThreads()); + lha.setMaxCoreUnits(request.getMaxCores() * 100); + lha.setMaxMemory(request.getMaxMemory()); + lha.setMaxGpuUnits(request.getMaxGpus()); + lha.setMaxGpuMemory(request.getMaxGpuMemory()); + lha.setType(RenderPartitionType.JOB_PARTITION); + + if (localBookingSupport.bookLocal(job, request.getHost(), request.getUsername(), + lha)) { + try { + RenderPartition renderPart = whiteboard.getRenderPartition(lha); + responseObserver.onNext(JobAddRenderPartResponse.newBuilder() + .setRenderPartition(renderPart).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL + .withDescription("Failed to allocate render partition to host.") + .asRuntimeException()); + } + } else { + responseObserver.onError( + Status.INTERNAL.withDescription("Failed to find suitable frames.") + .asRuntimeException()); + } + } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void runFilters(JobRunFiltersRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + JobDetail jobDetail = jobManager.getJobDetail(job.getJobId()); + filterManager.runFiltersOnJob(jobDetail); + responseObserver.onNext(JobRunFiltersResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL.withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + public void addSubscriber(JobAddSubscriberRequest request, + StreamObserver responseStreamObserver) { + try { + setupJobData(request.getJob()); + Set subscribers = Sets.newHashSet(jobManager.getEmail(job).split(",")); + subscribers.add(request.getSubscriber()); + jobManager.updateEmail(job, String.join(",", subscribers)); + responseStreamObserver.onNext(JobAddSubscriberResponse.newBuilder().build()); + responseStreamObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseStreamObserver.onError(Status.INTERNAL + .withDescription("Failed to find job data").asRuntimeException()); + } + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public DispatchQueue getManageQueue() { + return manageQueue; + } + + public void setManageQueue(DispatchQueue dispatchQueue) { + this.manageQueue = dispatchQueue; + } + + public DependManager getDependManager() { + return dependManager; + } + + public void setDependManager(DependManager dependManager) { + this.dependManager = dependManager; + } + + public JobDao getJobDao() { + return jobDao; + } + + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } + + public JobLauncher getJobLauncher() { + return jobLauncher; + } + + public void setJobLauncher(JobLauncher jobLauncher) { + this.jobLauncher = jobLauncher; + } + + public CommentManager getCommentManager() { + return commentManager; + } + + public void setCommentManager(CommentManager commentManager) { + this.commentManager = commentManager; + } + + public JobManagerSupport getJobManagerSupport() { + return jobManagerSupport; + } + + public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { + this.jobManagerSupport = jobManagerSupport; + } + + public GroupManager getGroupManager() { + return groupManager; + } + + public void setGroupManager(GroupManager groupManager) { + this.groupManager = groupManager; + } + + public Dispatcher getLocalDispatcher() { + return localDispatcher; + } + + public void setLocalDispatcher(Dispatcher localDispatcher) { + this.localDispatcher = localDispatcher; + } + + public LocalBookingSupport getLocalBookingSupport() { + return localBookingSupport; + } + + public void setLocalBookingSupport(LocalBookingSupport localBookingSupport) { + this.localBookingSupport = localBookingSupport; + } + + public FilterManager getFilterManager() { + return filterManager; + } + + public void setFilterManager(FilterManager filterManager) { + this.filterManager = filterManager; + } + + private void setupJobData(Job jobData) { + setJobManager(jobManagerSupport.getJobManager()); + setDependManager(jobManagerSupport.getDependManager()); + job = jobManager.getJob(jobData.getId()); + } + + public FrameSearchFactory getFrameSearchFactory() { + return frameSearchFactory; + } + + public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { + this.frameSearchFactory = frameSearchFactory; + } + + public JobSearchFactory getJobSearchFactory() { + return jobSearchFactory; + } + + public void setJobSearchFactory(JobSearchFactory jobSearchFactory) { + this.jobSearchFactory = jobSearchFactory; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLayer.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLayer.java index e125ca8c2..0e19a2e43 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLayer.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLayer.java @@ -127,480 +127,484 @@ public class ManageLayer extends LayerInterfaceGrpc.LayerInterfaceImplBase { - private LayerDetail layer; - private FrameSearchInterface frameSearch; - private JobManager jobManager; - private DependManager dependManager; - private JobManagerSupport jobManagerSupport; - private LayerDao layerDao; - private DispatchQueue manageQueue; - private Whiteboard whiteboard; - private LocalBookingSupport localBookingSupport; - private FrameSearchFactory frameSearchFactory; - private final String property = "layer.finished_jobs_readonly"; - @Autowired - private Environment env; - - @Override - public void findLayer(LayerFindLayerRequest request, - StreamObserver responseObserver) { - try { - responseObserver.onNext(LayerFindLayerResponse.newBuilder() - .setLayer(whiteboard.findLayer(request.getJob(), request.getLayer())).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); - } - } - - @Override - public void getLayer(LayerGetLayerRequest request, - StreamObserver responseObserver) { - try { - responseObserver.onNext(LayerGetLayerResponse.newBuilder() - .setLayer(whiteboard.getLayer(request.getId())).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); - } - } - - @Override - public void eatFrames(LayerEatFramesRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - manageQueue.execute( - new DispatchEatFrames(frameSearch, new Source(request.toString()), jobManagerSupport)); - responseObserver.onNext(LayerEatFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void getFrames(LayerGetFramesRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - FrameSeq frames = whiteboard.getFrames(frameSearchFactory.create(layer, request.getS())); - responseObserver.onNext(LayerGetFramesResponse.newBuilder().setFrames(frames).build()); - responseObserver.onCompleted(); - } - - @Override - public void killFrames(LayerKillFramesRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - manageQueue.execute( - new DispatchKillFrames(frameSearch, new Source(request.toString(), request.getUsername(), - request.getPid(), request.getHostKill(), request.getReason()), jobManagerSupport)); - responseObserver.onNext(LayerKillFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void markdoneFrames(LayerMarkdoneFramesRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - manageQueue.execute(new DispatchSatisfyDepends(layer, jobManagerSupport)); - responseObserver.onNext(LayerMarkdoneFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void retryFrames(LayerRetryFramesRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - manageQueue.execute( - new DispatchRetryFrames(frameSearch, new Source(request.toString()), jobManagerSupport)); - responseObserver.onNext(LayerRetryFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setTags(LayerSetTagsRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - layerDao.updateLayerTags(layer, new HashSet<>(request.getTagsList())); - responseObserver.onNext(LayerSetTagsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setMinCores(LayerSetMinCoresRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - jobManager.setLayerMinCores(layer, Convert.coresToCoreUnits(request.getCores())); - responseObserver.onNext(LayerSetMinCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setMinGpus(LayerSetMinGpusRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - jobManager.setLayerMinGpus(layer, request.getMinGpus()); - responseObserver.onNext(LayerSetMinGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setMinMemory(LayerSetMinMemoryRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - layerDao.updateLayerMinMemory(layer, request.getMemory()); - responseObserver.onNext(LayerSetMinMemoryResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setMinGpuMemory(LayerSetMinGpuMemoryRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - layerDao.updateLayerMinGpuMemory(layer, request.getGpuMemory()); - responseObserver.onNext(LayerSetMinGpuMemoryResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void createDependencyOnFrame(LayerCreateDependOnFrameRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - LayerOnFrame depend = - new LayerOnFrame(layer, jobManager.getFrameDetail(request.getFrame().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(LayerCreateDependOnFrameResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)).build()); - responseObserver.onCompleted(); - } - } - - @Override - public void createDependencyOnJob(LayerCreateDependOnJobRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - LayerOnJob depend = new LayerOnJob(layer, jobManager.getJobDetail(request.getJob().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(LayerCreateDependOnJobResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)).build()); - responseObserver.onCompleted(); - } - } - - @Override - public void createDependencyOnLayer(LayerCreateDependOnLayerRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - LayerOnLayer depend = - new LayerOnLayer(layer, jobManager.getLayerDetail(request.getDependOnLayer().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(LayerCreateDependOnLayerResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)).build()); - responseObserver.onCompleted(); - } - } - - @Override - public void createFrameByFrameDependency(LayerCreateFrameByFrameDependRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - FrameByFrame depend = - new FrameByFrame(layer, jobManager.getLayerDetail(request.getDependLayer().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(LayerCreateFrameByFrameDependResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)).build()); - responseObserver.onCompleted(); - } - } - - @Override - public void getWhatDependsOnThis(LayerGetWhatDependsOnThisRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - responseObserver.onNext(LayerGetWhatDependsOnThisResponse.newBuilder() - .setDepends(whiteboard.getWhatDependsOnThis(layer)).build()); - responseObserver.onCompleted(); - } - - @Override - public void getWhatThisDependsOn(LayerGetWhatThisDependsOnRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - responseObserver.onNext(LayerGetWhatThisDependsOnResponse.newBuilder() - .setDepends(whiteboard.getWhatThisDependsOn(layer)).build()); - responseObserver.onCompleted(); - } - - @Override - public void dropDepends(LayerDropDependsRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - manageQueue.execute(new DispatchDropDepends(layer, request.getTarget(), dependManager)); - responseObserver.onNext(LayerDropDependsResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void dropLimit(LayerDropLimitRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - layerDao.dropLimit(layer, request.getLimitId()); - responseObserver.onNext(LayerDropLimitResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void reorderFrames(LayerReorderFramesRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - manageQueue.execute(new DispatchReorderFrames(layer, new FrameSet(request.getRange()), - request.getOrder(), jobManagerSupport)); - responseObserver.onNext(LayerReorderFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void staggerFrames(LayerStaggerFramesRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - manageQueue.execute(new DispatchStaggerFrames(layer, request.getRange(), request.getStagger(), - jobManagerSupport)); - responseObserver.onNext(LayerStaggerFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setThreadable(LayerSetThreadableRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - layerDao.updateThreadable(layer, request.getThreadable()); - responseObserver.onNext(LayerSetThreadableResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setTimeout(LayerSetTimeoutRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - layerDao.updateTimeout(layer, request.getTimeout()); - responseObserver.onNext(LayerSetTimeoutResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setTimeoutLLU(LayerSetTimeoutLLURequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - layerDao.updateTimeoutLLU(layer, request.getTimeoutLlu()); - responseObserver.onNext(LayerSetTimeoutLLUResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void addLimit(LayerAddLimitRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - layerDao.addLimit(layer, request.getLimitId()); - responseObserver.onNext(LayerAddLimitResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void addRenderPartition(LayerAddRenderPartitionRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - LocalHostAssignment lha = new LocalHostAssignment(); - lha.setThreads(request.getThreads()); - lha.setMaxCoreUnits(request.getMaxCores() * 100); - lha.setMaxMemory(request.getMaxMemory()); - lha.setMaxGpuUnits(request.getMaxGpus()); - lha.setMaxGpuMemory(request.getMaxGpuMemory()); - lha.setType(RenderPartitionType.LAYER_PARTITION); - if (localBookingSupport.bookLocal(layer, request.getHost(), request.getUsername(), lha)) { - RenderPartition partition = whiteboard.getRenderPartition(lha); - responseObserver.onNext( - LayerAddRenderPartitionResponse.newBuilder().setRenderPartition(partition).build()); + private LayerDetail layer; + private FrameSearchInterface frameSearch; + private JobManager jobManager; + private DependManager dependManager; + private JobManagerSupport jobManagerSupport; + private LayerDao layerDao; + private DispatchQueue manageQueue; + private Whiteboard whiteboard; + private LocalBookingSupport localBookingSupport; + private FrameSearchFactory frameSearchFactory; + private final String property = "layer.finished_jobs_readonly"; + @Autowired + private Environment env; + + @Override + public void findLayer(LayerFindLayerRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(LayerFindLayerResponse.newBuilder() + .setLayer(whiteboard.findLayer(request.getJob(), request.getLayer())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); + } + } + + @Override + public void getLayer(LayerGetLayerRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(LayerGetLayerResponse.newBuilder() + .setLayer(whiteboard.getLayer(request.getId())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); + } + } + + @Override + public void eatFrames(LayerEatFramesRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + manageQueue.execute(new DispatchEatFrames(frameSearch, new Source(request.toString()), + jobManagerSupport)); + responseObserver.onNext(LayerEatFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void getFrames(LayerGetFramesRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + FrameSeq frames = whiteboard.getFrames(frameSearchFactory.create(layer, request.getS())); + responseObserver.onNext(LayerGetFramesResponse.newBuilder().setFrames(frames).build()); + responseObserver.onCompleted(); + } + + @Override + public void killFrames(LayerKillFramesRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + manageQueue + .execute(new DispatchKillFrames( + frameSearch, new Source(request.toString(), request.getUsername(), + request.getPid(), request.getHostKill(), request.getReason()), + jobManagerSupport)); + responseObserver.onNext(LayerKillFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void markdoneFrames(LayerMarkdoneFramesRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + manageQueue.execute(new DispatchSatisfyDepends(layer, jobManagerSupport)); + responseObserver.onNext(LayerMarkdoneFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void retryFrames(LayerRetryFramesRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + manageQueue.execute(new DispatchRetryFrames(frameSearch, new Source(request.toString()), + jobManagerSupport)); + responseObserver.onNext(LayerRetryFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setTags(LayerSetTagsRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.updateLayerTags(layer, new HashSet<>(request.getTagsList())); + responseObserver.onNext(LayerSetTagsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setMinCores(LayerSetMinCoresRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + jobManager.setLayerMinCores(layer, Convert.coresToCoreUnits(request.getCores())); + responseObserver.onNext(LayerSetMinCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setMinGpus(LayerSetMinGpusRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + jobManager.setLayerMinGpus(layer, request.getMinGpus()); + responseObserver.onNext(LayerSetMinGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setMinMemory(LayerSetMinMemoryRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.updateLayerMinMemory(layer, request.getMemory()); + responseObserver.onNext(LayerSetMinMemoryResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setMinGpuMemory(LayerSetMinGpuMemoryRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.updateLayerMinGpuMemory(layer, request.getGpuMemory()); + responseObserver.onNext(LayerSetMinGpuMemoryResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void createDependencyOnFrame(LayerCreateDependOnFrameRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + LayerOnFrame depend = + new LayerOnFrame(layer, jobManager.getFrameDetail(request.getFrame().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(LayerCreateDependOnFrameResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)).build()); + responseObserver.onCompleted(); + } + } + + @Override + public void createDependencyOnJob(LayerCreateDependOnJobRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + LayerOnJob depend = + new LayerOnJob(layer, jobManager.getJobDetail(request.getJob().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(LayerCreateDependOnJobResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)).build()); + responseObserver.onCompleted(); + } + } + + @Override + public void createDependencyOnLayer(LayerCreateDependOnLayerRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + LayerOnLayer depend = new LayerOnLayer(layer, + jobManager.getLayerDetail(request.getDependOnLayer().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(LayerCreateDependOnLayerResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)).build()); + responseObserver.onCompleted(); + } + } + + @Override + public void createFrameByFrameDependency(LayerCreateFrameByFrameDependRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + FrameByFrame depend = new FrameByFrame(layer, + jobManager.getLayerDetail(request.getDependLayer().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(LayerCreateFrameByFrameDependResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)).build()); + responseObserver.onCompleted(); + } + } + + @Override + public void getWhatDependsOnThis(LayerGetWhatDependsOnThisRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + responseObserver.onNext(LayerGetWhatDependsOnThisResponse.newBuilder() + .setDepends(whiteboard.getWhatDependsOnThis(layer)).build()); + responseObserver.onCompleted(); + } + + @Override + public void getWhatThisDependsOn(LayerGetWhatThisDependsOnRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + responseObserver.onNext(LayerGetWhatThisDependsOnResponse.newBuilder() + .setDepends(whiteboard.getWhatThisDependsOn(layer)).build()); + responseObserver.onCompleted(); + } + + @Override + public void dropDepends(LayerDropDependsRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + manageQueue.execute(new DispatchDropDepends(layer, request.getTarget(), dependManager)); + responseObserver.onNext(LayerDropDependsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void dropLimit(LayerDropLimitRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.dropLimit(layer, request.getLimitId()); + responseObserver.onNext(LayerDropLimitResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void reorderFrames(LayerReorderFramesRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + manageQueue.execute(new DispatchReorderFrames(layer, new FrameSet(request.getRange()), + request.getOrder(), jobManagerSupport)); + responseObserver.onNext(LayerReorderFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void staggerFrames(LayerStaggerFramesRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + manageQueue.execute(new DispatchStaggerFrames(layer, request.getRange(), + request.getStagger(), jobManagerSupport)); + responseObserver.onNext(LayerStaggerFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setThreadable(LayerSetThreadableRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.updateThreadable(layer, request.getThreadable()); + responseObserver.onNext(LayerSetThreadableResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setTimeout(LayerSetTimeoutRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.updateTimeout(layer, request.getTimeout()); + responseObserver.onNext(LayerSetTimeoutResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setTimeoutLLU(LayerSetTimeoutLLURequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.updateTimeoutLLU(layer, request.getTimeoutLlu()); + responseObserver.onNext(LayerSetTimeoutLLUResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void addLimit(LayerAddLimitRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.addLimit(layer, request.getLimitId()); + responseObserver.onNext(LayerAddLimitResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void addRenderPartition(LayerAddRenderPartitionRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + LocalHostAssignment lha = new LocalHostAssignment(); + lha.setThreads(request.getThreads()); + lha.setMaxCoreUnits(request.getMaxCores() * 100); + lha.setMaxMemory(request.getMaxMemory()); + lha.setMaxGpuUnits(request.getMaxGpus()); + lha.setMaxGpuMemory(request.getMaxGpuMemory()); + lha.setType(RenderPartitionType.LAYER_PARTITION); + if (localBookingSupport.bookLocal(layer, request.getHost(), request.getUsername(), + lha)) { + RenderPartition partition = whiteboard.getRenderPartition(lha); + responseObserver.onNext(LayerAddRenderPartitionResponse.newBuilder() + .setRenderPartition(partition).build()); + responseObserver.onCompleted(); + } else { + responseObserver.onError(Status.INTERNAL + .withDescription("Failed to find suitable frames.").asRuntimeException()); + } + } + + } + + @Override + public void registerOutputPath(LayerRegisterOutputPathRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + jobManager.registerLayerOutput(layer, request.getSpec()); + responseObserver.onNext(LayerRegisterOutputPathResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void getLimits(LayerGetLimitsRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + responseObserver.onNext(LayerGetLimitsResponse.newBuilder() + .addAllLimits(whiteboard.getLimits(layer)).build()); + responseObserver.onCompleted(); + } + + @Override + public void getOutputPaths(LayerGetOutputPathsRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + responseObserver.onNext(LayerGetOutputPathsResponse.newBuilder() + .addAllOutputPaths(jobManager.getLayerOutputs(layer)).build()); + responseObserver.onCompleted(); + } + + @Override + public void enableMemoryOptimizer(LayerEnableMemoryOptimizerRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + jobManager.enableMemoryOptimizer(layer, request.getValue()); + responseObserver.onNext(LayerEnableMemoryOptimizerResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setMaxCores(LayerSetMaxCoresRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + jobManager.setLayerMaxCores(layer, Convert.coresToWholeCoreUnits(request.getCores())); + responseObserver.onNext(LayerSetMaxCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setMaxGpus(LayerSetMaxGpusRequest request, + StreamObserver responseObserver) { + updateLayer(request.getLayer()); + jobManager.setLayerMaxGpus(layer, request.getMaxGpus()); + responseObserver.onNext(LayerSetMaxGpusResponse.newBuilder().build()); responseObserver.onCompleted(); - } else { - responseObserver.onError(Status.INTERNAL.withDescription("Failed to find suitable frames.") - .asRuntimeException()); - } - } - - } - - @Override - public void registerOutputPath(LayerRegisterOutputPathRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - jobManager.registerLayerOutput(layer, request.getSpec()); - responseObserver.onNext(LayerRegisterOutputPathResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void getLimits(LayerGetLimitsRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - responseObserver.onNext( - LayerGetLimitsResponse.newBuilder().addAllLimits(whiteboard.getLimits(layer)).build()); - responseObserver.onCompleted(); - } - - @Override - public void getOutputPaths(LayerGetOutputPathsRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - responseObserver.onNext(LayerGetOutputPathsResponse.newBuilder() - .addAllOutputPaths(jobManager.getLayerOutputs(layer)).build()); - responseObserver.onCompleted(); - } - - @Override - public void enableMemoryOptimizer(LayerEnableMemoryOptimizerRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - jobManager.enableMemoryOptimizer(layer, request.getValue()); - responseObserver.onNext(LayerEnableMemoryOptimizerResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setMaxCores(LayerSetMaxCoresRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - if (attemptChange(env, property, jobManager, layer, responseObserver)) { - jobManager.setLayerMaxCores(layer, Convert.coresToWholeCoreUnits(request.getCores())); - responseObserver.onNext(LayerSetMaxCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - } - - @Override - public void setMaxGpus(LayerSetMaxGpusRequest request, - StreamObserver responseObserver) { - updateLayer(request.getLayer()); - jobManager.setLayerMaxGpus(layer, request.getMaxGpus()); - responseObserver.onNext(LayerSetMaxGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public DependManager getDependManager() { - return dependManager; - } - - public void setDependManager(DependManager dependManager) { - this.dependManager = dependManager; - } - - public DispatchQueue getManageQueue() { - return manageQueue; - } - - public void setManageQueue(DispatchQueue dispatchQueue) { - this.manageQueue = dispatchQueue; - } - - public JobManager getJobManager() { - return jobManager; - } - - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } - - public LayerDetail getLayer() { - return layer; - } - - public void setLayer(LayerDetail layer) { - this.layer = layer; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public LayerDao getLayerDao() { - return layerDao; - } - - public void setLayerDao(LayerDao layerDao) { - this.layerDao = layerDao; - } - - public JobManagerSupport getJobManagerSupport() { - return jobManagerSupport; - } - - public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { - this.jobManagerSupport = jobManagerSupport; - } - - public LocalBookingSupport getLocalBookingSupport() { - return localBookingSupport; - } - - public void setLocalBookingSupport(LocalBookingSupport localBookingSupport) { - this.localBookingSupport = localBookingSupport; - } - - public FrameSearchFactory getFrameSearchFactory() { - return frameSearchFactory; - } - - public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { - this.frameSearchFactory = frameSearchFactory; - } - - private void updateLayer(Layer layerData) { - setJobManager(jobManagerSupport.getJobManager()); - setDependManager(jobManagerSupport.getDependManager()); - layer = layerDao.getLayerDetail(layerData.getId()); - frameSearch = frameSearchFactory.create(layer); - } + } + + public DependManager getDependManager() { + return dependManager; + } + + public void setDependManager(DependManager dependManager) { + this.dependManager = dependManager; + } + + public DispatchQueue getManageQueue() { + return manageQueue; + } + + public void setManageQueue(DispatchQueue dispatchQueue) { + this.manageQueue = dispatchQueue; + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public LayerDetail getLayer() { + return layer; + } + + public void setLayer(LayerDetail layer) { + this.layer = layer; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public LayerDao getLayerDao() { + return layerDao; + } + + public void setLayerDao(LayerDao layerDao) { + this.layerDao = layerDao; + } + + public JobManagerSupport getJobManagerSupport() { + return jobManagerSupport; + } + + public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { + this.jobManagerSupport = jobManagerSupport; + } + + public LocalBookingSupport getLocalBookingSupport() { + return localBookingSupport; + } + + public void setLocalBookingSupport(LocalBookingSupport localBookingSupport) { + this.localBookingSupport = localBookingSupport; + } + + public FrameSearchFactory getFrameSearchFactory() { + return frameSearchFactory; + } + + public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { + this.frameSearchFactory = frameSearchFactory; + } + + private void updateLayer(Layer layerData) { + setJobManager(jobManagerSupport.getJobManager()); + setDependManager(jobManagerSupport.getDependManager()); + layer = layerDao.getLayerDetail(layerData.getId()); + frameSearch = frameSearchFactory.create(layer); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLimit.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLimit.java index 20013b510..cf49034dc 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLimit.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLimit.java @@ -21,80 +21,82 @@ import com.imageworks.spcue.service.Whiteboard; public class ManageLimit extends LimitInterfaceGrpc.LimitInterfaceImplBase { - private AdminManager adminManager; - private Whiteboard whiteboard; + private AdminManager adminManager; + private Whiteboard whiteboard; - @Override - public void create(LimitCreateRequest request, - StreamObserver responseObserver) { - String limitId = adminManager.createLimit(request.getName(), request.getMaxValue()); - LimitCreateResponse response = - LimitCreateResponse.newBuilder().setLimit(whiteboard.getLimit(limitId)).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } + @Override + public void create(LimitCreateRequest request, + StreamObserver responseObserver) { + String limitId = adminManager.createLimit(request.getName(), request.getMaxValue()); + LimitCreateResponse response = + LimitCreateResponse.newBuilder().setLimit(whiteboard.getLimit(limitId)).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } - @Override - public void delete(LimitDeleteRequest request, - StreamObserver responseObserver) { - adminManager.deleteLimit(adminManager.findLimit(request.getName())); - responseObserver.onNext(LimitDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + @Override + public void delete(LimitDeleteRequest request, + StreamObserver responseObserver) { + adminManager.deleteLimit(adminManager.findLimit(request.getName())); + responseObserver.onNext(LimitDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } - @Override - public void find(LimitFindRequest request, StreamObserver responseObserver) { - LimitFindResponse response = - LimitFindResponse.newBuilder().setLimit(whiteboard.findLimit(request.getName())).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } + @Override + public void find(LimitFindRequest request, StreamObserver responseObserver) { + LimitFindResponse response = LimitFindResponse.newBuilder() + .setLimit(whiteboard.findLimit(request.getName())).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } - @Override - public void get(LimitGetRequest request, StreamObserver responseObserver) { - LimitGetResponse response = - LimitGetResponse.newBuilder().setLimit(whiteboard.getLimit(request.getId())).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } + @Override + public void get(LimitGetRequest request, StreamObserver responseObserver) { + LimitGetResponse response = LimitGetResponse.newBuilder() + .setLimit(whiteboard.getLimit(request.getId())).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } - @Override - public void getAll(LimitGetAllRequest request, - StreamObserver responseObserver) { - responseObserver - .onNext(LimitGetAllResponse.newBuilder().addAllLimits(whiteboard.getLimits()).build()); - responseObserver.onCompleted(); - } + @Override + public void getAll(LimitGetAllRequest request, + StreamObserver responseObserver) { + responseObserver.onNext( + LimitGetAllResponse.newBuilder().addAllLimits(whiteboard.getLimits()).build()); + responseObserver.onCompleted(); + } - @Override - public void rename(LimitRenameRequest request, - StreamObserver responseObserver) { - adminManager.setLimitName(adminManager.findLimit(request.getOldName()), request.getNewName()); - responseObserver.onNext(LimitRenameResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + @Override + public void rename(LimitRenameRequest request, + StreamObserver responseObserver) { + adminManager.setLimitName(adminManager.findLimit(request.getOldName()), + request.getNewName()); + responseObserver.onNext(LimitRenameResponse.newBuilder().build()); + responseObserver.onCompleted(); + } - @Override - public void setMaxValue(LimitSetMaxValueRequest request, - StreamObserver responseObserver) { - adminManager.setLimitMaxValue(adminManager.findLimit(request.getName()), request.getMaxValue()); - responseObserver.onNext(LimitSetMaxValueResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + @Override + public void setMaxValue(LimitSetMaxValueRequest request, + StreamObserver responseObserver) { + adminManager.setLimitMaxValue(adminManager.findLimit(request.getName()), + request.getMaxValue()); + responseObserver.onNext(LimitSetMaxValueResponse.newBuilder().build()); + responseObserver.onCompleted(); + } - public AdminManager getAdminManager() { - return adminManager; - } + public AdminManager getAdminManager() { + return adminManager; + } - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } - public Whiteboard getWhiteboard() { - return whiteboard; - } + public Whiteboard getWhiteboard() { + return whiteboard; + } - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageMatcher.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageMatcher.java index 842f98f8c..d8552d456 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageMatcher.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageMatcher.java @@ -31,50 +31,50 @@ public class ManageMatcher extends MatcherInterfaceGrpc.MatcherInterfaceImplBase { - private FilterManager filterManager; - private Whiteboard whiteboard; + private FilterManager filterManager; + private Whiteboard whiteboard; - public void delete(MatcherDeleteRequest request, - StreamObserver responseObserver) { - filterManager.deleteMatcher(filterManager.getMatcher(request.getMatcher().getId())); - responseObserver.onNext(MatcherDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + public void delete(MatcherDeleteRequest request, + StreamObserver responseObserver) { + filterManager.deleteMatcher(filterManager.getMatcher(request.getMatcher().getId())); + responseObserver.onNext(MatcherDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } - public void getParentFilter(MatcherGetParentFilterRequest request, - StreamObserver responseObserver) { - MatcherEntity matcherEntity = filterManager.getMatcher(request.getMatcher().getId()); - MatcherGetParentFilterResponse response = MatcherGetParentFilterResponse.newBuilder() - .setFilter(whiteboard.getFilter(matcherEntity)).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } + public void getParentFilter(MatcherGetParentFilterRequest request, + StreamObserver responseObserver) { + MatcherEntity matcherEntity = filterManager.getMatcher(request.getMatcher().getId()); + MatcherGetParentFilterResponse response = MatcherGetParentFilterResponse.newBuilder() + .setFilter(whiteboard.getFilter(matcherEntity)).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } - public void commit(MatcherCommitRequest request, - StreamObserver responseObserver) { - Matcher newMatcherData = request.getMatcher(); - String id = newMatcherData.getId(); - MatcherEntity oldMatcher = filterManager.getMatcher(id); - MatcherEntity newMatcher = - MatcherEntity.build(filterManager.getFilter(oldMatcher), newMatcherData, id); - filterManager.updateMatcher(newMatcher); - responseObserver.onNext(MatcherCommitResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + public void commit(MatcherCommitRequest request, + StreamObserver responseObserver) { + Matcher newMatcherData = request.getMatcher(); + String id = newMatcherData.getId(); + MatcherEntity oldMatcher = filterManager.getMatcher(id); + MatcherEntity newMatcher = + MatcherEntity.build(filterManager.getFilter(oldMatcher), newMatcherData, id); + filterManager.updateMatcher(newMatcher); + responseObserver.onNext(MatcherCommitResponse.newBuilder().build()); + responseObserver.onCompleted(); + } - public FilterManager getFilterManager() { - return filterManager; - } + public FilterManager getFilterManager() { + return filterManager; + } - public void setFilterManager(FilterManager filterManager) { - this.filterManager = filterManager; - } + public void setFilterManager(FilterManager filterManager) { + this.filterManager = filterManager; + } - public Whiteboard getWhiteboard() { - return whiteboard; - } + public Whiteboard getWhiteboard() { + return whiteboard; + } - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageOwner.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageOwner.java index 6b157351b..7661bfb6e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageOwner.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageOwner.java @@ -40,105 +40,105 @@ public class ManageOwner extends OwnerInterfaceGrpc.OwnerInterfaceImplBase { - private HostManager hostManager; - private OwnerManager ownerManager; - private Whiteboard whiteboard; - private AdminManager adminManager; - - @Override - public void getOwner(OwnerGetOwnerRequest request, - StreamObserver responseObserver) { - try { - responseObserver.onNext(OwnerGetOwnerResponse.newBuilder() - .setOwner(whiteboard.getOwner(request.getName())).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + private HostManager hostManager; + private OwnerManager ownerManager; + private Whiteboard whiteboard; + private AdminManager adminManager; + + @Override + public void getOwner(OwnerGetOwnerRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(OwnerGetOwnerResponse.newBuilder() + .setOwner(whiteboard.getOwner(request.getName())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); + } + } + + @Override + public void delete(OwnerDeleteRequest request, + StreamObserver responseObserver) { + OwnerEntity owner = getOwnerById(request.getOwner().getId()); + ownerManager.deleteOwner((owner)); + OwnerDeleteResponse response = OwnerDeleteResponse.newBuilder().build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getDeeds(OwnerGetDeedsRequest request, + StreamObserver responseObserver) { + OwnerEntity owner = getOwnerById(request.getOwner().getId()); + OwnerGetDeedsResponse response = + OwnerGetDeedsResponse.newBuilder().setDeeds(whiteboard.getDeeds(owner)).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getHosts(OwnerGetHostsRequest request, + StreamObserver responseObserver) { + OwnerEntity owner = getOwnerById(request.getOwner().getId()); + OwnerGetHostsResponse response = + OwnerGetHostsResponse.newBuilder().setHosts(whiteboard.getHosts(owner)).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void takeOwnership(OwnerTakeOwnershipRequest request, + StreamObserver responseObserver) { + OwnerEntity owner = getOwnerById(request.getOwner().getId()); + ownerManager.takeOwnership(owner, hostManager.findHost(request.getHost())); + responseObserver.onNext(OwnerTakeOwnershipResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setShow(OwnerSetShowRequest request, + StreamObserver responseObserver) { + OwnerEntity owner = getOwnerById(request.getOwner().getId()); + ownerManager.setShow(owner, adminManager.findShowEntity(request.getShow())); + responseObserver.onNext(OwnerSetShowResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public HostManager getHostManager() { + return hostManager; + } + + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; + } + + public OwnerManager getOwnerManager() { + return ownerManager; + } + + public void setOwnerManager(OwnerManager ownerManager) { + this.ownerManager = ownerManager; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public AdminManager getAdminManager() { + return adminManager; + } + + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } + + private OwnerEntity getOwnerById(String id) { + return ownerManager.getOwner(id); } - } - - @Override - public void delete(OwnerDeleteRequest request, - StreamObserver responseObserver) { - OwnerEntity owner = getOwnerById(request.getOwner().getId()); - ownerManager.deleteOwner((owner)); - OwnerDeleteResponse response = OwnerDeleteResponse.newBuilder().build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getDeeds(OwnerGetDeedsRequest request, - StreamObserver responseObserver) { - OwnerEntity owner = getOwnerById(request.getOwner().getId()); - OwnerGetDeedsResponse response = - OwnerGetDeedsResponse.newBuilder().setDeeds(whiteboard.getDeeds(owner)).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getHosts(OwnerGetHostsRequest request, - StreamObserver responseObserver) { - OwnerEntity owner = getOwnerById(request.getOwner().getId()); - OwnerGetHostsResponse response = - OwnerGetHostsResponse.newBuilder().setHosts(whiteboard.getHosts(owner)).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void takeOwnership(OwnerTakeOwnershipRequest request, - StreamObserver responseObserver) { - OwnerEntity owner = getOwnerById(request.getOwner().getId()); - ownerManager.takeOwnership(owner, hostManager.findHost(request.getHost())); - responseObserver.onNext(OwnerTakeOwnershipResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setShow(OwnerSetShowRequest request, - StreamObserver responseObserver) { - OwnerEntity owner = getOwnerById(request.getOwner().getId()); - ownerManager.setShow(owner, adminManager.findShowEntity(request.getShow())); - responseObserver.onNext(OwnerSetShowResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public HostManager getHostManager() { - return hostManager; - } - - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; - } - - public OwnerManager getOwnerManager() { - return ownerManager; - } - - public void setOwnerManager(OwnerManager ownerManager) { - this.ownerManager = ownerManager; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public AdminManager getAdminManager() { - return adminManager; - } - - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } - - private OwnerEntity getOwnerById(String id) { - return ownerManager.getOwner(id); - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageProc.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageProc.java index 3df42fcef..4903eb8e9 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageProc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageProc.java @@ -67,246 +67,249 @@ public class ManageProc extends ProcInterfaceGrpc.ProcInterfaceImplBase { - private ProcDao procDao; - private Whiteboard whiteboard; - private JobManagerSupport jobManagerSupport; - private JobManager jobManager; - private GroupManager groupManager; - private RedirectManager redirectManager; - private ProcSearchFactory procSearchFactory; - - @Override - public void getProcs(ProcGetProcsRequest request, - StreamObserver responseObserver) { - responseObserver.onNext(ProcGetProcsResponse.newBuilder() - .setProcs(whiteboard.getProcs(procSearchFactory.create(request.getR()))).build()); - responseObserver.onCompleted(); - } - - @Override - public void unbookProcs(ProcUnbookProcsRequest request, - StreamObserver responseObserver) { - ProcSearchInterface procSearch = procSearchFactory.create(request.getR()); - procSearch.sortByBookedTime(); - responseObserver.onNext(ProcUnbookProcsResponse.newBuilder().setNumProcs(jobManagerSupport - .unbookProcs(procSearch, request.getKill(), new Source(request.toString()))).build()); - responseObserver.onCompleted(); - } - - @Override - public void unbookToGroup(ProcUnbookToGroupRequest request, - StreamObserver responseObserver) { - if (request.getR().getMaxResultsCount() == 0) { - throw new RuntimeException( - "You must specify the number of procs to unbook " + "within the ProcSearchCriteria."); + private ProcDao procDao; + private Whiteboard whiteboard; + private JobManagerSupport jobManagerSupport; + private JobManager jobManager; + private GroupManager groupManager; + private RedirectManager redirectManager; + private ProcSearchFactory procSearchFactory; + + @Override + public void getProcs(ProcGetProcsRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(ProcGetProcsResponse.newBuilder() + .setProcs(whiteboard.getProcs(procSearchFactory.create(request.getR()))).build()); + responseObserver.onCompleted(); } - GroupInterface g = groupManager.getGroup(request.getGroup().getId()); - List procs = redirectManager.addRedirect(request.getR(), g, request.getKill(), - new Source(request.toString())); - responseObserver - .onNext(ProcUnbookToGroupResponse.newBuilder().setNumProcs(procs.size()).build()); - responseObserver.onCompleted(); - } - - @Override - public void unbookToJob(ProcUnbookToJobRequest request, - StreamObserver responseObserver) { - if (request.getR().getMaxResultsCount() == 0) { - throw new RuntimeException( - "You must specify the number of procs to unbook " + "within the ProcSearchCriteria."); + @Override + public void unbookProcs(ProcUnbookProcsRequest request, + StreamObserver responseObserver) { + ProcSearchInterface procSearch = procSearchFactory.create(request.getR()); + procSearch.sortByBookedTime(); + responseObserver.onNext(ProcUnbookProcsResponse.newBuilder().setNumProcs(jobManagerSupport + .unbookProcs(procSearch, request.getKill(), new Source(request.toString()))) + .build()); + responseObserver.onCompleted(); } - List jobs = new ArrayList(request.getJobs().getJobsCount()); - - for (Job job : request.getJobs().getJobsList()) { - try { - jobs.add(jobManager.getJob(job.getId())); - } catch (EmptyResultDataAccessException e) { - // just eat it, just eat it. - // Open up your mouth and feed it. - // Have a banana. Have a whole bunch. - // It doesn't matter, when you had lunch. - // just eat it, just eat it - // get yourself and egg and beat it - } + @Override + public void unbookToGroup(ProcUnbookToGroupRequest request, + StreamObserver responseObserver) { + if (request.getR().getMaxResultsCount() == 0) { + throw new RuntimeException("You must specify the number of procs to unbook " + + "within the ProcSearchCriteria."); + } + + GroupInterface g = groupManager.getGroup(request.getGroup().getId()); + List procs = redirectManager.addRedirect(request.getR(), g, request.getKill(), + new Source(request.toString())); + responseObserver + .onNext(ProcUnbookToGroupResponse.newBuilder().setNumProcs(procs.size()).build()); + responseObserver.onCompleted(); } - int returnVal; - if (jobs.size() == 0) { - returnVal = 0; - } else { - List procs = redirectManager.addRedirect(request.getR(), jobs, request.getKill(), - new Source(request.toString())); + @Override + public void unbookToJob(ProcUnbookToJobRequest request, + StreamObserver responseObserver) { + if (request.getR().getMaxResultsCount() == 0) { + throw new RuntimeException("You must specify the number of procs to unbook " + + "within the ProcSearchCriteria."); + } + + List jobs = new ArrayList(request.getJobs().getJobsCount()); + + for (Job job : request.getJobs().getJobsList()) { + try { + jobs.add(jobManager.getJob(job.getId())); + } catch (EmptyResultDataAccessException e) { + // just eat it, just eat it. + // Open up your mouth and feed it. + // Have a banana. Have a whole bunch. + // It doesn't matter, when you had lunch. + // just eat it, just eat it + // get yourself and egg and beat it + } + } + + int returnVal; + if (jobs.size() == 0) { + returnVal = 0; + } else { + List procs = redirectManager.addRedirect(request.getR(), jobs, + request.getKill(), new Source(request.toString())); + + returnVal = procs.size(); + } + responseObserver + .onNext(ProcUnbookToJobResponse.newBuilder().setNumProcs(returnVal).build()); + responseObserver.onCompleted(); + } + + @Override + public void getFrame(ProcGetFrameRequest request, + StreamObserver responseObserver) { + VirtualProc proc = getVirtualProc(request.getProc()); + Frame frame = whiteboard.getFrame(procDao.getCurrentFrameId(proc)); + ProcGetFrameResponse response = ProcGetFrameResponse.newBuilder().setFrame(frame).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getHost(ProcGetHostRequest request, + StreamObserver responseObserver) { + VirtualProc proc = getVirtualProc(request.getProc()); + ProcGetHostResponse response = ProcGetHostResponse.newBuilder() + .setHost(whiteboard.getHost(proc.getHostId())).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getJob(ProcGetJobRequest request, + StreamObserver responseObserver) { + try { + VirtualProc proc = getVirtualProc(request.getProc()); + ProcGetJobResponse response = ProcGetJobResponse.newBuilder() + .setJob(whiteboard.getJob(procDao.getCurrentJobId(proc))).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); + } + } + + @Override + public void getLayer(ProcGetLayerRequest request, + StreamObserver responseObserver) { + VirtualProc proc = getVirtualProc(request.getProc()); + ProcGetLayerResponse response = ProcGetLayerResponse.newBuilder() + .setLayer(whiteboard.getLayer(procDao.getCurrentLayerId(proc))).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } - returnVal = procs.size(); + @Override + public void kill(ProcKillRequest request, StreamObserver responseObserver) { + VirtualProc proc = getVirtualProc(request.getProc()); + String message = "Kill Proc on " + proc.getProcId(); + jobManagerSupport.unbookProc(procDao.getVirtualProc(proc.getProcId()), true, + new Source(message)); + responseObserver.onNext(ProcKillResponse.newBuilder().build()); + responseObserver.onCompleted(); } - responseObserver.onNext(ProcUnbookToJobResponse.newBuilder().setNumProcs(returnVal).build()); - responseObserver.onCompleted(); - } - - @Override - public void getFrame(ProcGetFrameRequest request, - StreamObserver responseObserver) { - VirtualProc proc = getVirtualProc(request.getProc()); - Frame frame = whiteboard.getFrame(procDao.getCurrentFrameId(proc)); - ProcGetFrameResponse response = ProcGetFrameResponse.newBuilder().setFrame(frame).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getHost(ProcGetHostRequest request, - StreamObserver responseObserver) { - VirtualProc proc = getVirtualProc(request.getProc()); - ProcGetHostResponse response = - ProcGetHostResponse.newBuilder().setHost(whiteboard.getHost(proc.getHostId())).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getJob(ProcGetJobRequest request, - StreamObserver responseObserver) { - try { - VirtualProc proc = getVirtualProc(request.getProc()); - ProcGetJobResponse response = ProcGetJobResponse.newBuilder() - .setJob(whiteboard.getJob(procDao.getCurrentJobId(proc))).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + + @Override + public void unbook(ProcUnbookRequest request, + StreamObserver responseObserver) { + VirtualProc proc = getVirtualProc(request.getProc()); + procDao.unbookProc(proc); + if (request.getKill()) { + String message = "Kill Proc on " + proc.getProcId(); + jobManagerSupport.unbookProc(procDao.getVirtualProc(proc.getProcId()), true, + new Source(message)); + } + responseObserver.onNext(ProcUnbookResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void redirectToGroup(ProcRedirectToGroupRequest request, + StreamObserver responseObserver) { + VirtualProc proc = getVirtualProc(request.getProc()); + VirtualProc p = procDao.getVirtualProc(proc.getProcId()); + GroupInterface g = groupManager.getGroup(request.getGroupId()); + String message = + "redirectToGroup called on " + proc.getProcId() + " with Group " + g.getGroupId(); + boolean value = redirectManager.addRedirect(p, g, request.getKill(), new Source(message)); + responseObserver.onNext(ProcRedirectToGroupResponse.newBuilder().setValue(value).build()); + responseObserver.onCompleted(); } - } - - @Override - public void getLayer(ProcGetLayerRequest request, - StreamObserver responseObserver) { - VirtualProc proc = getVirtualProc(request.getProc()); - ProcGetLayerResponse response = ProcGetLayerResponse.newBuilder() - .setLayer(whiteboard.getLayer(procDao.getCurrentLayerId(proc))).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void kill(ProcKillRequest request, StreamObserver responseObserver) { - VirtualProc proc = getVirtualProc(request.getProc()); - String message = "Kill Proc on " + proc.getProcId(); - jobManagerSupport.unbookProc(procDao.getVirtualProc(proc.getProcId()), true, - new Source(message)); - responseObserver.onNext(ProcKillResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void unbook(ProcUnbookRequest request, - StreamObserver responseObserver) { - VirtualProc proc = getVirtualProc(request.getProc()); - procDao.unbookProc(proc); - if (request.getKill()) { - String message = "Kill Proc on " + proc.getProcId(); - jobManagerSupport.unbookProc(procDao.getVirtualProc(proc.getProcId()), true, - new Source(message)); + + @Override + public void redirectToJob(ProcRedirectToJobRequest request, + StreamObserver responseObserver) { + VirtualProc proc = getVirtualProc(request.getProc()); + VirtualProc p = procDao.getVirtualProc(proc.getId()); + JobInterface j = jobManager.getJob(request.getJobId()); + String message = + "redirectToJob called on " + proc.getProcId() + " with Job " + j.getJobId(); + boolean value = redirectManager.addRedirect(p, j, request.getKill(), new Source(message)); + responseObserver.onNext(ProcRedirectToJobResponse.newBuilder().setValue(value).build()); + responseObserver.onCompleted(); + } + + @Override + public void clearRedirect(ProcClearRedirectRequest request, + StreamObserver responseObserver) { + VirtualProc proc = getVirtualProc(request.getProc()); + procDao.setUnbookState(proc, false); + boolean value = redirectManager.removeRedirect(proc); + responseObserver.onNext(ProcClearRedirectResponse.newBuilder().setValue(value).build()); + responseObserver.onCompleted(); + } + + public ProcDao getProcDao() { + return procDao; + } + + public void setProcDao(ProcDao procDao) { + this.procDao = procDao; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public JobManagerSupport getJobManagerSupport() { + return jobManagerSupport; + } + + public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { + this.jobManagerSupport = jobManagerSupport; + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public GroupManager getGroupManager() { + return groupManager; + } + + public void setGroupManager(GroupManager groupManager) { + this.groupManager = groupManager; + } + + public RedirectManager getRedirectManager() { + return redirectManager; + } + + public void setRedirectManager(RedirectManager redirectManager) { + this.redirectManager = redirectManager; + } + + private VirtualProc getVirtualProc(Proc proc) { + return procDao.getVirtualProc(proc.getId()); + } + + public ProcSearchFactory getProcSearchFactory() { + return procSearchFactory; + } + + public void setProcSearchFactory(ProcSearchFactory procSearchFactory) { + this.procSearchFactory = procSearchFactory; } - responseObserver.onNext(ProcUnbookResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void redirectToGroup(ProcRedirectToGroupRequest request, - StreamObserver responseObserver) { - VirtualProc proc = getVirtualProc(request.getProc()); - VirtualProc p = procDao.getVirtualProc(proc.getProcId()); - GroupInterface g = groupManager.getGroup(request.getGroupId()); - String message = - "redirectToGroup called on " + proc.getProcId() + " with Group " + g.getGroupId(); - boolean value = redirectManager.addRedirect(p, g, request.getKill(), new Source(message)); - responseObserver.onNext(ProcRedirectToGroupResponse.newBuilder().setValue(value).build()); - responseObserver.onCompleted(); - } - - @Override - public void redirectToJob(ProcRedirectToJobRequest request, - StreamObserver responseObserver) { - VirtualProc proc = getVirtualProc(request.getProc()); - VirtualProc p = procDao.getVirtualProc(proc.getId()); - JobInterface j = jobManager.getJob(request.getJobId()); - String message = "redirectToJob called on " + proc.getProcId() + " with Job " + j.getJobId(); - boolean value = redirectManager.addRedirect(p, j, request.getKill(), new Source(message)); - responseObserver.onNext(ProcRedirectToJobResponse.newBuilder().setValue(value).build()); - responseObserver.onCompleted(); - } - - @Override - public void clearRedirect(ProcClearRedirectRequest request, - StreamObserver responseObserver) { - VirtualProc proc = getVirtualProc(request.getProc()); - procDao.setUnbookState(proc, false); - boolean value = redirectManager.removeRedirect(proc); - responseObserver.onNext(ProcClearRedirectResponse.newBuilder().setValue(value).build()); - responseObserver.onCompleted(); - } - - public ProcDao getProcDao() { - return procDao; - } - - public void setProcDao(ProcDao procDao) { - this.procDao = procDao; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public JobManagerSupport getJobManagerSupport() { - return jobManagerSupport; - } - - public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { - this.jobManagerSupport = jobManagerSupport; - } - - public JobManager getJobManager() { - return jobManager; - } - - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } - - public GroupManager getGroupManager() { - return groupManager; - } - - public void setGroupManager(GroupManager groupManager) { - this.groupManager = groupManager; - } - - public RedirectManager getRedirectManager() { - return redirectManager; - } - - public void setRedirectManager(RedirectManager redirectManager) { - this.redirectManager = redirectManager; - } - - private VirtualProc getVirtualProc(Proc proc) { - return procDao.getVirtualProc(proc.getId()); - } - - public ProcSearchFactory getProcSearchFactory() { - return procSearchFactory; - } - - public void setProcSearchFactory(ProcSearchFactory procSearchFactory) { - this.procSearchFactory = procSearchFactory; - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageRenderPartition.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageRenderPartition.java index b35159a3a..6176d58cc 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageRenderPartition.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageRenderPartition.java @@ -27,38 +27,38 @@ import com.imageworks.spcue.service.BookingManager; public class ManageRenderPartition - extends RenderPartitionInterfaceGrpc.RenderPartitionInterfaceImplBase { + extends RenderPartitionInterfaceGrpc.RenderPartitionInterfaceImplBase { - private BookingManager bookingManager; + private BookingManager bookingManager; - @Override - public void delete(RenderPartDeleteRequest request, - StreamObserver responseObserver) { - bookingManager - .deactivateLocalHostAssignment(getLocalHostAssignment(request.getRenderPartition())); - responseObserver.onNext(RenderPartDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + @Override + public void delete(RenderPartDeleteRequest request, + StreamObserver responseObserver) { + bookingManager.deactivateLocalHostAssignment( + getLocalHostAssignment(request.getRenderPartition())); + responseObserver.onNext(RenderPartDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } - @Override - public void setMaxResources(RenderPartSetMaxResourcesRequest request, - StreamObserver responseObserver) { - LocalHostAssignment localJobAssign = getLocalHostAssignment(request.getRenderPartition()); - bookingManager.setMaxResources(localJobAssign, request.getCores(), request.getMemory(), - request.getGpus(), request.getGpuMemory()); - responseObserver.onNext(RenderPartSetMaxResourcesResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + @Override + public void setMaxResources(RenderPartSetMaxResourcesRequest request, + StreamObserver responseObserver) { + LocalHostAssignment localJobAssign = getLocalHostAssignment(request.getRenderPartition()); + bookingManager.setMaxResources(localJobAssign, request.getCores(), request.getMemory(), + request.getGpus(), request.getGpuMemory()); + responseObserver.onNext(RenderPartSetMaxResourcesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } - public BookingManager getBookingManager() { - return bookingManager; - } + public BookingManager getBookingManager() { + return bookingManager; + } - public void setBookingManager(BookingManager bookingManager) { - this.bookingManager = bookingManager; - } + public void setBookingManager(BookingManager bookingManager) { + this.bookingManager = bookingManager; + } - private LocalHostAssignment getLocalHostAssignment(RenderPartition renderPartition) { - return bookingManager.getLocalHostAssignment(renderPartition.getId()); - } + private LocalHostAssignment getLocalHostAssignment(RenderPartition renderPartition) { + return bookingManager.getLocalHostAssignment(renderPartition.getId()); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageService.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageService.java index 534b5edca..b9f886fb3 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageService.java @@ -40,99 +40,99 @@ public class ManageService extends ServiceInterfaceGrpc.ServiceInterfaceImplBase { - private ServiceManager serviceManager; - private Whiteboard whiteboard; - - @Override - public void createService(ServiceCreateServiceRequest request, - StreamObserver responseObserver) { - ServiceEntity service = new ServiceEntity(); - service.name = request.getData().getName(); - service.minCores = request.getData().getMinCores(); - service.maxCores = request.getData().getMaxCores(); - service.minMemory = request.getData().getMinMemory(); - service.minGpus = request.getData().getMinGpus(); - service.maxGpus = request.getData().getMaxGpus(); - service.minGpuMemory = request.getData().getMinGpuMemory(); - service.tags = Sets.newLinkedHashSet(request.getData().getTagsList()); - service.threadable = request.getData().getThreadable(); - service.timeout = request.getData().getTimeout(); - service.timeout_llu = request.getData().getTimeoutLlu(); - service.minMemoryIncrease = request.getData().getMinMemoryIncrease(); - serviceManager.createService(service); - responseObserver.onNext(ServiceCreateServiceResponse.newBuilder() - .setService(whiteboard.getService(service.getId())).build()); - responseObserver.onCompleted(); - } - - @Override - public void getDefaultServices(ServiceGetDefaultServicesRequest request, - StreamObserver responseObserver) { - responseObserver.onNext(ServiceGetDefaultServicesResponse.newBuilder() - .setServices(whiteboard.getDefaultServices()).build()); - responseObserver.onCompleted(); - } - - @Override - public void getService(ServiceGetServiceRequest request, - StreamObserver responseObserver) { - try { - responseObserver.onNext(ServiceGetServiceResponse.newBuilder() - .setService(whiteboard.getService(request.getName())).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + private ServiceManager serviceManager; + private Whiteboard whiteboard; + + @Override + public void createService(ServiceCreateServiceRequest request, + StreamObserver responseObserver) { + ServiceEntity service = new ServiceEntity(); + service.name = request.getData().getName(); + service.minCores = request.getData().getMinCores(); + service.maxCores = request.getData().getMaxCores(); + service.minMemory = request.getData().getMinMemory(); + service.minGpus = request.getData().getMinGpus(); + service.maxGpus = request.getData().getMaxGpus(); + service.minGpuMemory = request.getData().getMinGpuMemory(); + service.tags = Sets.newLinkedHashSet(request.getData().getTagsList()); + service.threadable = request.getData().getThreadable(); + service.timeout = request.getData().getTimeout(); + service.timeout_llu = request.getData().getTimeoutLlu(); + service.minMemoryIncrease = request.getData().getMinMemoryIncrease(); + serviceManager.createService(service); + responseObserver.onNext(ServiceCreateServiceResponse.newBuilder() + .setService(whiteboard.getService(service.getId())).build()); + responseObserver.onCompleted(); + } + + @Override + public void getDefaultServices(ServiceGetDefaultServicesRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(ServiceGetDefaultServicesResponse.newBuilder() + .setServices(whiteboard.getDefaultServices()).build()); + responseObserver.onCompleted(); + } + + @Override + public void getService(ServiceGetServiceRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(ServiceGetServiceResponse.newBuilder() + .setService(whiteboard.getService(request.getName())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); + } + } + + @Override + public void delete(ServiceDeleteRequest request, + StreamObserver responseObserver) { + serviceManager.deleteService(toServiceEntity(request.getService())); + responseObserver.onNext(ServiceDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void update(ServiceUpdateRequest request, + StreamObserver responseObserver) { + serviceManager.updateService(toServiceEntity(request.getService())); + responseObserver.onNext(ServiceUpdateResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public ServiceManager getServiceManager() { + return serviceManager; + } + + public void setServiceManager(ServiceManager serviceManager) { + this.serviceManager = serviceManager; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + private ServiceEntity toServiceEntity(Service service) { + ServiceEntity entity = new ServiceEntity(); + entity.id = service.getId(); + entity.name = service.getName(); + entity.minCores = service.getMinCores(); + entity.maxCores = service.getMaxCores(); + entity.minMemory = service.getMinMemory(); + entity.minGpus = service.getMinGpus(); + entity.maxGpus = service.getMaxGpus(); + entity.minGpuMemory = service.getMinGpuMemory(); + entity.tags = new LinkedHashSet<>(service.getTagsList()); + entity.threadable = service.getThreadable(); + entity.timeout = service.getTimeout(); + entity.timeout_llu = service.getTimeoutLlu(); + entity.minMemoryIncrease = service.getMinMemoryIncrease(); + return entity; } - } - - @Override - public void delete(ServiceDeleteRequest request, - StreamObserver responseObserver) { - serviceManager.deleteService(toServiceEntity(request.getService())); - responseObserver.onNext(ServiceDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void update(ServiceUpdateRequest request, - StreamObserver responseObserver) { - serviceManager.updateService(toServiceEntity(request.getService())); - responseObserver.onNext(ServiceUpdateResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public ServiceManager getServiceManager() { - return serviceManager; - } - - public void setServiceManager(ServiceManager serviceManager) { - this.serviceManager = serviceManager; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - private ServiceEntity toServiceEntity(Service service) { - ServiceEntity entity = new ServiceEntity(); - entity.id = service.getId(); - entity.name = service.getName(); - entity.minCores = service.getMinCores(); - entity.maxCores = service.getMaxCores(); - entity.minMemory = service.getMinMemory(); - entity.minGpus = service.getMinGpus(); - entity.maxGpus = service.getMaxGpus(); - entity.minGpuMemory = service.getMinGpuMemory(); - entity.tags = new LinkedHashSet<>(service.getTagsList()); - entity.threadable = service.getThreadable(); - entity.timeout = service.getTimeout(); - entity.timeout_llu = service.getTimeoutLlu(); - entity.minMemoryIncrease = service.getMinMemoryIncrease(); - return entity; - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageServiceOverride.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageServiceOverride.java index 5769c49e5..05a02c122 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageServiceOverride.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageServiceOverride.java @@ -29,54 +29,54 @@ import com.imageworks.spcue.service.ServiceManager; public class ManageServiceOverride - extends ServiceOverrideInterfaceGrpc.ServiceOverrideInterfaceImplBase { + extends ServiceOverrideInterfaceGrpc.ServiceOverrideInterfaceImplBase { - private ServiceManager serviceManager; + private ServiceManager serviceManager; - @Override - public void delete(ServiceOverrideDeleteRequest request, - StreamObserver responseObserver) { - // Passing null on showId as the interface doesn't require a showId in this - // situation - serviceManager.deleteService(toServiceOverrideEntity(request.getService(), null)); - responseObserver.onNext(ServiceOverrideDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + @Override + public void delete(ServiceOverrideDeleteRequest request, + StreamObserver responseObserver) { + // Passing null on showId as the interface doesn't require a showId in this + // situation + serviceManager.deleteService(toServiceOverrideEntity(request.getService(), null)); + responseObserver.onNext(ServiceOverrideDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } - @Override - public void update(ServiceOverrideUpdateRequest request, - StreamObserver responseObserver) { - // Passing null on showId as the interface doesn't require a showId in this - // situation - serviceManager.updateService(toServiceOverrideEntity(request.getService(), null)); - responseObserver.onNext(ServiceOverrideUpdateResponse.newBuilder().build()); - responseObserver.onCompleted(); - } + @Override + public void update(ServiceOverrideUpdateRequest request, + StreamObserver responseObserver) { + // Passing null on showId as the interface doesn't require a showId in this + // situation + serviceManager.updateService(toServiceOverrideEntity(request.getService(), null)); + responseObserver.onNext(ServiceOverrideUpdateResponse.newBuilder().build()); + responseObserver.onCompleted(); + } - public ServiceManager getServiceManager() { - return serviceManager; - } + public ServiceManager getServiceManager() { + return serviceManager; + } - public void setServiceManager(ServiceManager serviceManager) { - this.serviceManager = serviceManager; - } + public void setServiceManager(ServiceManager serviceManager) { + this.serviceManager = serviceManager; + } - private ServiceOverrideEntity toServiceOverrideEntity(Service service, String showId) { - ServiceOverrideEntity entity = new ServiceOverrideEntity(); - entity.id = service.getId(); - entity.name = service.getName(); - entity.minCores = service.getMinCores(); - entity.maxCores = service.getMaxCores(); - entity.minMemory = service.getMinMemory(); - entity.minGpus = service.getMinGpus(); - entity.maxGpus = service.getMaxGpus(); - entity.minGpuMemory = service.getMinGpuMemory(); - entity.tags = new LinkedHashSet<>(service.getTagsList()); - entity.threadable = service.getThreadable(); - entity.showId = showId; - entity.timeout = service.getTimeout(); - entity.timeout_llu = service.getTimeoutLlu(); - entity.minMemoryIncrease = service.getMinMemoryIncrease(); - return entity; - } + private ServiceOverrideEntity toServiceOverrideEntity(Service service, String showId) { + ServiceOverrideEntity entity = new ServiceOverrideEntity(); + entity.id = service.getId(); + entity.name = service.getName(); + entity.minCores = service.getMinCores(); + entity.maxCores = service.getMaxCores(); + entity.minMemory = service.getMinMemory(); + entity.minGpus = service.getMinGpus(); + entity.maxGpus = service.getMaxGpus(); + entity.minGpuMemory = service.getMinGpuMemory(); + entity.tags = new LinkedHashSet<>(service.getTagsList()); + entity.threadable = service.getThreadable(); + entity.showId = showId; + entity.timeout = service.getTimeout(); + entity.timeout_llu = service.getTimeoutLlu(); + entity.minMemoryIncrease = service.getMinMemoryIncrease(); + return entity; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageShow.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageShow.java index d76fc8de3..e6fccad9e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageShow.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageShow.java @@ -107,386 +107,391 @@ public class ManageShow extends ShowInterfaceGrpc.ShowInterfaceImplBase { - private AdminManager adminManager; - private Whiteboard whiteboard; - private ShowDao showDao; - private DepartmentManager departmentManager; - private FilterManager filterManager; - private OwnerManager ownerManager; - private ServiceManager serviceManager; - private JobSearchFactory jobSearchFactory; - - @Override - public void createShow(ShowCreateShowRequest request, - StreamObserver responseObserver) { - try { - ShowEntity show = new ShowEntity(); - show.name = request.getName(); - adminManager.createShow(show); - responseObserver.onNext(ShowCreateShowResponse.newBuilder() - .setShow(whiteboard.getShow(show.getShowId())).build()); - responseObserver.onCompleted(); - } catch (Exception e) { - responseObserver - .onError(Status.INTERNAL.withDescription("Show could not be created." + e.getMessage()) - .withCause(e).asRuntimeException()); - } - } - - @Override - public void findShow(ShowFindShowRequest request, - StreamObserver responseObserver) { - try { - responseObserver.onNext(ShowFindShowResponse.newBuilder() - .setShow(whiteboard.findShow(request.getName())).build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); - } - } - - @Override - public void getActiveShows(ShowGetActiveShowsRequest request, - StreamObserver responseObserver) { - responseObserver.onNext( - ShowGetActiveShowsResponse.newBuilder().setShows(whiteboard.getActiveShows()).build()); - responseObserver.onCompleted(); - } - - @Override - public void getShows(ShowGetShowsRequest request, - StreamObserver responseObserver) { - responseObserver - .onNext(ShowGetShowsResponse.newBuilder().setShows(whiteboard.getShows()).build()); - responseObserver.onCompleted(); - } - - @Override - public void getFilters(ShowGetFiltersRequest request, - StreamObserver responseObserver) { - FilterSeq filterSeq = whiteboard.getFilters(getShowEntity(request.getShow())); - ShowGetFiltersResponse response = - ShowGetFiltersResponse.newBuilder().setFilters(filterSeq).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getSubscriptions(ShowGetSubscriptionRequest request, - StreamObserver responseObserver) { - SubscriptionSeq subscriptionSeq = whiteboard.getSubscriptions(getShowEntity(request.getShow())); - ShowGetSubscriptionResponse response = - ShowGetSubscriptionResponse.newBuilder().setSubscriptions(subscriptionSeq).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getRootGroup(ShowGetRootGroupRequest request, - StreamObserver responseObserver) { - Group rootGroup = whiteboard.getRootGroup(getShowEntity(request.getShow())); - ShowGetRootGroupResponse response = - ShowGetRootGroupResponse.newBuilder().setGroup(rootGroup).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void createSubscription(ShowCreateSubscriptionRequest request, - StreamObserver responseObserver) { - AllocationEntity allocationEntity = adminManager.getAllocationDetail(request.getAllocationId()); - SubscriptionInterface s = adminManager.createSubscription(getShowEntity(request.getShow()), - allocationEntity, Convert.coresToCoreUnits(request.getSize()), - Convert.coresToCoreUnits(request.getBurst())); - Subscription subscription = whiteboard.getSubscription(s.getSubscriptionId()); - ShowCreateSubscriptionResponse response = - ShowCreateSubscriptionResponse.newBuilder().setSubscription(subscription).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getGroups(ShowGetGroupsRequest request, - StreamObserver responseObserver) { - GroupSeq groupSeq = whiteboard.getGroups(getShowEntity(request.getShow())); - ShowGetGroupsResponse response = ShowGetGroupsResponse.newBuilder().setGroups(groupSeq).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getJobWhiteboard(ShowGetJobWhiteboardRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - ShowGetJobWhiteboardResponse response = ShowGetJobWhiteboardResponse.newBuilder() - .setWhiteboard(whiteboard.getJobWhiteboard(show)).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getJobs(ShowGetJobsRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - JobSeq jobSeq = whiteboard.getJobs(jobSearchFactory.create(show)); - ShowGetJobsResponse response = ShowGetJobsResponse.newBuilder().setJobs(jobSeq).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void setDefaultMaxCores(ShowSetDefaultMaxCoresRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - showDao.updateShowDefaultMaxCores(show, Convert.coresToWholeCoreUnits(request.getMaxCores())); - responseObserver.onNext(ShowSetDefaultMaxCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setDefaultMinCores(ShowSetDefaultMinCoresRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - showDao.updateShowDefaultMinCores(show, Convert.coresToWholeCoreUnits(request.getMinCores())); - responseObserver.onNext(ShowSetDefaultMinCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setDefaultMaxGpus(ShowSetDefaultMaxGpusRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - showDao.updateShowDefaultMaxGpus(show, request.getMaxGpus()); - responseObserver.onNext(ShowSetDefaultMaxGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setDefaultMinGpus(ShowSetDefaultMinGpusRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - showDao.updateShowDefaultMinGpus(show, request.getMinGpus()); - responseObserver.onNext(ShowSetDefaultMinGpusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void findFilter(ShowFindFilterRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - ShowFindFilterResponse response = ShowFindFilterResponse.newBuilder() - .setFilter(whiteboard.findFilter(show, request.getName())).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void createFilter(ShowCreateFilterRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - FilterEntity filter = new FilterEntity(); - filter.name = request.getName(); - filter.showId = show.id; - filter.type = FilterType.MATCH_ALL; - filter.order = 0; - filterManager.createFilter(filter); - ShowCreateFilterResponse response = ShowCreateFilterResponse.newBuilder() - .setFilter(whiteboard.findFilter(show, request.getName())).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getDepartment(ShowGetDepartmentRequest request, - StreamObserver responseObserver) { - ShowGetDepartmentResponse response = ShowGetDepartmentResponse.newBuilder() - .setDepartment( - whiteboard.getDepartment(getShowEntity(request.getShow()), request.getDepartment())) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void getDepartments(ShowGetDepartmentsRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - ShowGetDepartmentsResponse response = ShowGetDepartmentsResponse.newBuilder() - .setDepartments(whiteboard.getDepartments(show)).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void enableBooking(ShowEnableBookingRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - showDao.updateBookingEnabled(show, request.getEnabled()); - responseObserver.onNext(ShowEnableBookingResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void enableDispatching(ShowEnableDispatchingRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - showDao.updateDispatchingEnabled(show, request.getEnabled()); - responseObserver.onNext(ShowEnableDispatchingResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void getDeeds(ShowGetDeedsRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - responseObserver - .onNext(ShowGetDeedsResponse.newBuilder().setDeeds(whiteboard.getDeeds(show)).build()); - responseObserver.onCompleted(); - } - - @Override - public void createOwner(ShowCreateOwnerRequest request, - StreamObserver responseObserver) { - ownerManager.createOwner(request.getName(), getShowEntity(request.getShow())); - ShowCreateOwnerResponse response = ShowCreateOwnerResponse.newBuilder() - .setOwner(whiteboard.getOwner(request.getName())).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } - - @Override - public void setActive(ShowSetActiveRequest request, - StreamObserver responseObserver) { - adminManager.setShowActive(getShowEntity(request.getShow()), request.getValue()); - responseObserver.onNext(ShowSetActiveResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void createServiceOverride(ShowCreateServiceOverrideRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - Service requestService = request.getService(); - ServiceOverrideEntity service = new ServiceOverrideEntity(); - service.showId = show.getId(); - service.name = requestService.getName(); - service.minCores = requestService.getMinCores(); - service.maxCores = requestService.getMaxCores(); - service.minMemory = requestService.getMinMemory(); - service.minGpus = requestService.getMinGpus(); - service.maxGpus = requestService.getMaxGpus(); - service.minGpuMemory = requestService.getMinGpuMemory(); - service.tags = Sets.newLinkedHashSet(requestService.getTagsList()); - service.threadable = requestService.getThreadable(); - service.minMemoryIncrease = requestService.getMinMemoryIncrease(); - serviceManager.createService(service); - ServiceOverride serviceOverride = whiteboard.getServiceOverride(show, service.name); - responseObserver.onNext( - ShowCreateServiceOverrideResponse.newBuilder().setServiceOverride(serviceOverride).build()); - responseObserver.onCompleted(); - } - - @Override - public void getServiceOverrides(ShowGetServiceOverridesRequest request, - StreamObserver responseObserver) { - ShowEntity show = getShowEntity(request.getShow()); - responseObserver.onNext(ShowGetServiceOverridesResponse.newBuilder() - .setServiceOverrides(whiteboard.getServiceOverrides(show)).build()); - responseObserver.onCompleted(); - } - - @Override - public void delete(ShowDeleteRequest request, - StreamObserver responseObserver) { - showDao.delete(getShowEntity(request.getShow())); - responseObserver.onNext(ShowDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void getServiceOverride(ShowGetServiceOverrideRequest request, - StreamObserver responseObserver) { - ServiceOverride serviceOverride = - whiteboard.getServiceOverride(getShowEntity(request.getShow()), request.getName()); - responseObserver.onNext( - ShowGetServiceOverrideResponse.newBuilder().setServiceOverride(serviceOverride).build()); - responseObserver.onCompleted(); - } - - @Override - public void setCommentEmail(ShowSetCommentEmailRequest request, - StreamObserver responseObserver) { - adminManager.updateShowCommentEmail(getShowEntity(request.getShow()), - request.getEmail().split(",")); - responseObserver.onNext(ShowSetCommentEmailResponse.newBuilder().build()); - } - - public AdminManager getAdminManager() { - return adminManager; - } - - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - public ShowDao getShowDao() { - return showDao; - } - - public void setShowDao(ShowDao showDao) { - this.showDao = showDao; - } - - public FilterManager getFilterManager() { - return filterManager; - } - - public void setFilterManager(FilterManager filterManager) { - this.filterManager = filterManager; - } - - public DepartmentManager getDepartmentManager() { - return departmentManager; - } - - public void setDepartmentManager(DepartmentManager departmentManager) { - this.departmentManager = departmentManager; - } - - public OwnerManager getOwnerManager() { - return ownerManager; - } - - public void setOwnerManager(OwnerManager ownerManager) { - this.ownerManager = ownerManager; - } - - public ServiceManager getServiceManager() { - return serviceManager; - } - - public void setServiceManager(ServiceManager serviceManager) { - this.serviceManager = serviceManager; - } - - private ShowEntity getShowEntity(Show show) { - return adminManager.getShowEntity(show.getId()); - } - - public JobSearchFactory getJobSearchFactory() { - return jobSearchFactory; - } - - public void setJobSearchFactory(JobSearchFactory jobSearchFactory) { - this.jobSearchFactory = jobSearchFactory; - } + private AdminManager adminManager; + private Whiteboard whiteboard; + private ShowDao showDao; + private DepartmentManager departmentManager; + private FilterManager filterManager; + private OwnerManager ownerManager; + private ServiceManager serviceManager; + private JobSearchFactory jobSearchFactory; + + @Override + public void createShow(ShowCreateShowRequest request, + StreamObserver responseObserver) { + try { + ShowEntity show = new ShowEntity(); + show.name = request.getName(); + adminManager.createShow(show); + responseObserver.onNext(ShowCreateShowResponse.newBuilder() + .setShow(whiteboard.getShow(show.getShowId())).build()); + responseObserver.onCompleted(); + } catch (Exception e) { + responseObserver.onError( + Status.INTERNAL.withDescription("Show could not be created." + e.getMessage()) + .withCause(e).asRuntimeException()); + } + } + + @Override + public void findShow(ShowFindShowRequest request, + StreamObserver responseObserver) { + try { + responseObserver.onNext(ShowFindShowResponse.newBuilder() + .setShow(whiteboard.findShow(request.getName())).build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); + } + } + + @Override + public void getActiveShows(ShowGetActiveShowsRequest request, + StreamObserver responseObserver) { + responseObserver.onNext(ShowGetActiveShowsResponse.newBuilder() + .setShows(whiteboard.getActiveShows()).build()); + responseObserver.onCompleted(); + } + + @Override + public void getShows(ShowGetShowsRequest request, + StreamObserver responseObserver) { + responseObserver + .onNext(ShowGetShowsResponse.newBuilder().setShows(whiteboard.getShows()).build()); + responseObserver.onCompleted(); + } + + @Override + public void getFilters(ShowGetFiltersRequest request, + StreamObserver responseObserver) { + FilterSeq filterSeq = whiteboard.getFilters(getShowEntity(request.getShow())); + ShowGetFiltersResponse response = + ShowGetFiltersResponse.newBuilder().setFilters(filterSeq).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getSubscriptions(ShowGetSubscriptionRequest request, + StreamObserver responseObserver) { + SubscriptionSeq subscriptionSeq = + whiteboard.getSubscriptions(getShowEntity(request.getShow())); + ShowGetSubscriptionResponse response = + ShowGetSubscriptionResponse.newBuilder().setSubscriptions(subscriptionSeq).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getRootGroup(ShowGetRootGroupRequest request, + StreamObserver responseObserver) { + Group rootGroup = whiteboard.getRootGroup(getShowEntity(request.getShow())); + ShowGetRootGroupResponse response = + ShowGetRootGroupResponse.newBuilder().setGroup(rootGroup).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void createSubscription(ShowCreateSubscriptionRequest request, + StreamObserver responseObserver) { + AllocationEntity allocationEntity = + adminManager.getAllocationDetail(request.getAllocationId()); + SubscriptionInterface s = adminManager.createSubscription(getShowEntity(request.getShow()), + allocationEntity, Convert.coresToCoreUnits(request.getSize()), + Convert.coresToCoreUnits(request.getBurst())); + Subscription subscription = whiteboard.getSubscription(s.getSubscriptionId()); + ShowCreateSubscriptionResponse response = + ShowCreateSubscriptionResponse.newBuilder().setSubscription(subscription).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getGroups(ShowGetGroupsRequest request, + StreamObserver responseObserver) { + GroupSeq groupSeq = whiteboard.getGroups(getShowEntity(request.getShow())); + ShowGetGroupsResponse response = + ShowGetGroupsResponse.newBuilder().setGroups(groupSeq).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getJobWhiteboard(ShowGetJobWhiteboardRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + ShowGetJobWhiteboardResponse response = ShowGetJobWhiteboardResponse.newBuilder() + .setWhiteboard(whiteboard.getJobWhiteboard(show)).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getJobs(ShowGetJobsRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + JobSeq jobSeq = whiteboard.getJobs(jobSearchFactory.create(show)); + ShowGetJobsResponse response = ShowGetJobsResponse.newBuilder().setJobs(jobSeq).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultMaxCores(ShowSetDefaultMaxCoresRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + showDao.updateShowDefaultMaxCores(show, + Convert.coresToWholeCoreUnits(request.getMaxCores())); + responseObserver.onNext(ShowSetDefaultMaxCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultMinCores(ShowSetDefaultMinCoresRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + showDao.updateShowDefaultMinCores(show, + Convert.coresToWholeCoreUnits(request.getMinCores())); + responseObserver.onNext(ShowSetDefaultMinCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultMaxGpus(ShowSetDefaultMaxGpusRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + showDao.updateShowDefaultMaxGpus(show, request.getMaxGpus()); + responseObserver.onNext(ShowSetDefaultMaxGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultMinGpus(ShowSetDefaultMinGpusRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + showDao.updateShowDefaultMinGpus(show, request.getMinGpus()); + responseObserver.onNext(ShowSetDefaultMinGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void findFilter(ShowFindFilterRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + ShowFindFilterResponse response = ShowFindFilterResponse.newBuilder() + .setFilter(whiteboard.findFilter(show, request.getName())).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void createFilter(ShowCreateFilterRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + FilterEntity filter = new FilterEntity(); + filter.name = request.getName(); + filter.showId = show.id; + filter.type = FilterType.MATCH_ALL; + filter.order = 0; + filterManager.createFilter(filter); + ShowCreateFilterResponse response = ShowCreateFilterResponse.newBuilder() + .setFilter(whiteboard.findFilter(show, request.getName())).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getDepartment(ShowGetDepartmentRequest request, + StreamObserver responseObserver) { + ShowGetDepartmentResponse response = ShowGetDepartmentResponse + .newBuilder().setDepartment(whiteboard + .getDepartment(getShowEntity(request.getShow()), request.getDepartment())) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void getDepartments(ShowGetDepartmentsRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + ShowGetDepartmentsResponse response = ShowGetDepartmentsResponse.newBuilder() + .setDepartments(whiteboard.getDepartments(show)).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void enableBooking(ShowEnableBookingRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + showDao.updateBookingEnabled(show, request.getEnabled()); + responseObserver.onNext(ShowEnableBookingResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void enableDispatching(ShowEnableDispatchingRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + showDao.updateDispatchingEnabled(show, request.getEnabled()); + responseObserver.onNext(ShowEnableDispatchingResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void getDeeds(ShowGetDeedsRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + responseObserver.onNext( + ShowGetDeedsResponse.newBuilder().setDeeds(whiteboard.getDeeds(show)).build()); + responseObserver.onCompleted(); + } + + @Override + public void createOwner(ShowCreateOwnerRequest request, + StreamObserver responseObserver) { + ownerManager.createOwner(request.getName(), getShowEntity(request.getShow())); + ShowCreateOwnerResponse response = ShowCreateOwnerResponse.newBuilder() + .setOwner(whiteboard.getOwner(request.getName())).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } + + @Override + public void setActive(ShowSetActiveRequest request, + StreamObserver responseObserver) { + adminManager.setShowActive(getShowEntity(request.getShow()), request.getValue()); + responseObserver.onNext(ShowSetActiveResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void createServiceOverride(ShowCreateServiceOverrideRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + Service requestService = request.getService(); + ServiceOverrideEntity service = new ServiceOverrideEntity(); + service.showId = show.getId(); + service.name = requestService.getName(); + service.minCores = requestService.getMinCores(); + service.maxCores = requestService.getMaxCores(); + service.minMemory = requestService.getMinMemory(); + service.minGpus = requestService.getMinGpus(); + service.maxGpus = requestService.getMaxGpus(); + service.minGpuMemory = requestService.getMinGpuMemory(); + service.tags = Sets.newLinkedHashSet(requestService.getTagsList()); + service.threadable = requestService.getThreadable(); + service.minMemoryIncrease = requestService.getMinMemoryIncrease(); + serviceManager.createService(service); + ServiceOverride serviceOverride = whiteboard.getServiceOverride(show, service.name); + responseObserver.onNext(ShowCreateServiceOverrideResponse.newBuilder() + .setServiceOverride(serviceOverride).build()); + responseObserver.onCompleted(); + } + + @Override + public void getServiceOverrides(ShowGetServiceOverridesRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + responseObserver.onNext(ShowGetServiceOverridesResponse.newBuilder() + .setServiceOverrides(whiteboard.getServiceOverrides(show)).build()); + responseObserver.onCompleted(); + } + + @Override + public void delete(ShowDeleteRequest request, + StreamObserver responseObserver) { + showDao.delete(getShowEntity(request.getShow())); + responseObserver.onNext(ShowDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void getServiceOverride(ShowGetServiceOverrideRequest request, + StreamObserver responseObserver) { + ServiceOverride serviceOverride = + whiteboard.getServiceOverride(getShowEntity(request.getShow()), request.getName()); + responseObserver.onNext(ShowGetServiceOverrideResponse.newBuilder() + .setServiceOverride(serviceOverride).build()); + responseObserver.onCompleted(); + } + + @Override + public void setCommentEmail(ShowSetCommentEmailRequest request, + StreamObserver responseObserver) { + adminManager.updateShowCommentEmail(getShowEntity(request.getShow()), + request.getEmail().split(",")); + responseObserver.onNext(ShowSetCommentEmailResponse.newBuilder().build()); + } + + public AdminManager getAdminManager() { + return adminManager; + } + + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + public ShowDao getShowDao() { + return showDao; + } + + public void setShowDao(ShowDao showDao) { + this.showDao = showDao; + } + + public FilterManager getFilterManager() { + return filterManager; + } + + public void setFilterManager(FilterManager filterManager) { + this.filterManager = filterManager; + } + + public DepartmentManager getDepartmentManager() { + return departmentManager; + } + + public void setDepartmentManager(DepartmentManager departmentManager) { + this.departmentManager = departmentManager; + } + + public OwnerManager getOwnerManager() { + return ownerManager; + } + + public void setOwnerManager(OwnerManager ownerManager) { + this.ownerManager = ownerManager; + } + + public ServiceManager getServiceManager() { + return serviceManager; + } + + public void setServiceManager(ServiceManager serviceManager) { + this.serviceManager = serviceManager; + } + + private ShowEntity getShowEntity(Show show) { + return adminManager.getShowEntity(show.getId()); + } + + public JobSearchFactory getJobSearchFactory() { + return jobSearchFactory; + } + + public void setJobSearchFactory(JobSearchFactory jobSearchFactory) { + this.jobSearchFactory = jobSearchFactory; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageSubscription.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageSubscription.java index bf907da59..3d36e4b93 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageSubscription.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageSubscription.java @@ -40,87 +40,88 @@ public class ManageSubscription extends SubscriptionInterfaceGrpc.SubscriptionInterfaceImplBase { - private AdminManager adminManager; - private Whiteboard whiteboard; - - @Override - public void delete(SubscriptionDeleteRequest request, - StreamObserver responseObserver) { - adminManager.deleteSubscription(getSubscriptionDetail(request.getSubscription())); - responseObserver.onNext(SubscriptionDeleteResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void find(SubscriptionFindRequest request, - StreamObserver responseObserver) throws CueGrpcException { - String name = request.getName(); - try { - String[] parts = name.split("\\.", 3); - if (parts.length != 3) { - throw new CueGrpcException("Subscription names must be in the form of alloc.show"); - } - SubscriptionFindResponse response = SubscriptionFindResponse.newBuilder() - .setSubscription(whiteboard.findSubscription(parts[2], parts[0] + "." + parts[1])) - .build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver - .onError(Status.NOT_FOUND.withDescription("A subscription to " + name + " was not found.") - .withCause(e).asRuntimeException()); + private AdminManager adminManager; + private Whiteboard whiteboard; + + @Override + public void delete(SubscriptionDeleteRequest request, + StreamObserver responseObserver) { + adminManager.deleteSubscription(getSubscriptionDetail(request.getSubscription())); + responseObserver.onNext(SubscriptionDeleteResponse.newBuilder().build()); + responseObserver.onCompleted(); } - } - - @Override - public void get(SubscriptionGetRequest request, - StreamObserver responseObserver) { - try { - SubscriptionGetResponse response = SubscriptionGetResponse.newBuilder() - .setSubscription(whiteboard.getSubscription(request.getId())).build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { - responseObserver.onError( - Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e).asRuntimeException()); + + @Override + public void find(SubscriptionFindRequest request, + StreamObserver responseObserver) throws CueGrpcException { + String name = request.getName(); + try { + String[] parts = name.split("\\.", 3); + if (parts.length != 3) { + throw new CueGrpcException("Subscription names must be in the form of alloc.show"); + } + SubscriptionFindResponse response = SubscriptionFindResponse.newBuilder() + .setSubscription( + whiteboard.findSubscription(parts[2], parts[0] + "." + parts[1])) + .build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND + .withDescription("A subscription to " + name + " was not found.").withCause(e) + .asRuntimeException()); + } + } + + @Override + public void get(SubscriptionGetRequest request, + StreamObserver responseObserver) { + try { + SubscriptionGetResponse response = SubscriptionGetResponse.newBuilder() + .setSubscription(whiteboard.getSubscription(request.getId())).build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.NOT_FOUND.withDescription(e.getMessage()).withCause(e) + .asRuntimeException()); + } + } + + @Override + public void setBurst(SubscriptionSetBurstRequest request, + StreamObserver responseObserver) { + adminManager.setSubscriptionBurst(getSubscriptionDetail(request.getSubscription()), + request.getBurst()); + responseObserver.onNext(SubscriptionSetBurstResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setSize(SubscriptionSetSizeRequest request, + StreamObserver responseObserver) { + adminManager.setSubscriptionSize(getSubscriptionDetail(request.getSubscription()), + request.getNewSize()); + responseObserver.onNext(SubscriptionSetSizeResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public AdminManager getAdminManager() { + return adminManager; + } + + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } + + public Whiteboard getWhiteboard() { + return whiteboard; + } + + public void setWhiteboard(Whiteboard whiteboard) { + this.whiteboard = whiteboard; + } + + private SubscriptionEntity getSubscriptionDetail(Subscription subscription) { + return adminManager.getSubscriptionDetail(subscription.getId()); } - } - - @Override - public void setBurst(SubscriptionSetBurstRequest request, - StreamObserver responseObserver) { - adminManager.setSubscriptionBurst(getSubscriptionDetail(request.getSubscription()), - request.getBurst()); - responseObserver.onNext(SubscriptionSetBurstResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setSize(SubscriptionSetSizeRequest request, - StreamObserver responseObserver) { - adminManager.setSubscriptionSize(getSubscriptionDetail(request.getSubscription()), - request.getNewSize()); - responseObserver.onNext(SubscriptionSetSizeResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public AdminManager getAdminManager() { - return adminManager; - } - - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } - - public Whiteboard getWhiteboard() { - return whiteboard; - } - - public void setWhiteboard(Whiteboard whiteboard) { - this.whiteboard = whiteboard; - } - - private SubscriptionEntity getSubscriptionDetail(Subscription subscription) { - return adminManager.getSubscriptionDetail(subscription.getId()); - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageTask.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageTask.java index 4c75824e6..3626294fa 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageTask.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageTask.java @@ -31,45 +31,45 @@ public class ManageTask extends TaskInterfaceGrpc.TaskInterfaceImplBase { - private DepartmentManager departmentManager; + private DepartmentManager departmentManager; - @Override - public void delete(TaskDeleteRequest request, - StreamObserver responseObserver) { - departmentManager.removeTask(getTaskDetail(request.getTask())); - TaskDeleteResponse response = TaskDeleteResponse.newBuilder().build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } + @Override + public void delete(TaskDeleteRequest request, + StreamObserver responseObserver) { + departmentManager.removeTask(getTaskDetail(request.getTask())); + TaskDeleteResponse response = TaskDeleteResponse.newBuilder().build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } - @Override - public void setMinCores(TaskSetMinCoresRequest request, - StreamObserver responseObserver) { - departmentManager.setMinCores(getTaskDetail(request.getTask()), - Convert.coresToWholeCoreUnits(request.getNewMinCores())); - TaskSetMinCoresResponse response = TaskSetMinCoresResponse.newBuilder().build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } + @Override + public void setMinCores(TaskSetMinCoresRequest request, + StreamObserver responseObserver) { + departmentManager.setMinCores(getTaskDetail(request.getTask()), + Convert.coresToWholeCoreUnits(request.getNewMinCores())); + TaskSetMinCoresResponse response = TaskSetMinCoresResponse.newBuilder().build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } - @Override - public void clearAdjustments(TaskClearAdjustmentsRequest request, - StreamObserver responseObserver) { - departmentManager.clearTaskAdjustment(getTaskDetail(request.getTask())); - TaskClearAdjustmentsResponse response = TaskClearAdjustmentsResponse.newBuilder().build(); - responseObserver.onNext(response); - responseObserver.onCompleted(); - } + @Override + public void clearAdjustments(TaskClearAdjustmentsRequest request, + StreamObserver responseObserver) { + departmentManager.clearTaskAdjustment(getTaskDetail(request.getTask())); + TaskClearAdjustmentsResponse response = TaskClearAdjustmentsResponse.newBuilder().build(); + responseObserver.onNext(response); + responseObserver.onCompleted(); + } - public DepartmentManager getDepartmentManager() { - return departmentManager; - } + public DepartmentManager getDepartmentManager() { + return departmentManager; + } - public void setDepartmentManager(DepartmentManager departmentManager) { - this.departmentManager = departmentManager; - } + public void setDepartmentManager(DepartmentManager departmentManager) { + this.departmentManager = departmentManager; + } - private TaskEntity getTaskDetail(Task task) { - return departmentManager.getTaskDetail(task.getName()); - } + private TaskEntity getTaskDetail(Task task) { + return departmentManager.getTaskDetail(task.getName()); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/RqdReportStatic.java b/cuebot/src/main/java/com/imageworks/spcue/servant/RqdReportStatic.java index e827cc5f6..2a30fb2cd 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/RqdReportStatic.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/RqdReportStatic.java @@ -15,48 +15,48 @@ public class RqdReportStatic extends RqdReportInterfaceGrpc.RqdReportInterfaceImplBase { - private FrameCompleteHandler frameCompleteHandler; - private HostReportHandler hostReportHandler; - - @SuppressWarnings("unused") - - @Override - public void reportRqdStartup(RqdReportRqdStartupRequest request, - StreamObserver responseObserver) { - hostReportHandler.queueBootReport(request.getBootReport()); - responseObserver.onNext(RqdReportRqdStartupResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void reportRunningFrameCompletion(RqdReportRunningFrameCompletionRequest request, - StreamObserver responseObserver) { - frameCompleteHandler.handleFrameCompleteReport(request.getFrameCompleteReport()); - responseObserver.onNext(RqdReportRunningFrameCompletionResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void reportStatus(RqdReportStatusRequest request, - StreamObserver responseObserver) { - hostReportHandler.queueHostReport(request.getHostReport()); - responseObserver.onNext(RqdReportStatusResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public FrameCompleteHandler getFrameCompleteHandler() { - return frameCompleteHandler; - } - - public void setFrameCompleteHandler(FrameCompleteHandler frameCompleteHandler) { - this.frameCompleteHandler = frameCompleteHandler; - } - - public HostReportHandler getHostReportHandler() { - return hostReportHandler; - } - - public void setHostReportHandler(HostReportHandler hostReportHandler) { - this.hostReportHandler = hostReportHandler; - } + private FrameCompleteHandler frameCompleteHandler; + private HostReportHandler hostReportHandler; + + @SuppressWarnings("unused") + + @Override + public void reportRqdStartup(RqdReportRqdStartupRequest request, + StreamObserver responseObserver) { + hostReportHandler.queueBootReport(request.getBootReport()); + responseObserver.onNext(RqdReportRqdStartupResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void reportRunningFrameCompletion(RqdReportRunningFrameCompletionRequest request, + StreamObserver responseObserver) { + frameCompleteHandler.handleFrameCompleteReport(request.getFrameCompleteReport()); + responseObserver.onNext(RqdReportRunningFrameCompletionResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void reportStatus(RqdReportStatusRequest request, + StreamObserver responseObserver) { + hostReportHandler.queueHostReport(request.getHostReport()); + responseObserver.onNext(RqdReportStatusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + public FrameCompleteHandler getFrameCompleteHandler() { + return frameCompleteHandler; + } + + public void setFrameCompleteHandler(FrameCompleteHandler frameCompleteHandler) { + this.frameCompleteHandler = frameCompleteHandler; + } + + public HostReportHandler getHostReportHandler() { + return hostReportHandler; + } + + public void setHostReportHandler(HostReportHandler hostReportHandler) { + this.hostReportHandler = hostReportHandler; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ServantUtil.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ServantUtil.java index 331299a38..34ca73bad 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ServantUtil.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ServantUtil.java @@ -33,58 +33,58 @@ public class ServantUtil { - public static List convertLayerFilterList(LayerSeq layers) { - final List result = new ArrayList(); - for (final Layer layer : layers.getLayersList()) { - final String id = layer.getId(); - result.add(new LayerInterface() { - String _id = id; + public static List convertLayerFilterList(LayerSeq layers) { + final List result = new ArrayList(); + for (final Layer layer : layers.getLayersList()) { + final String id = layer.getId(); + result.add(new LayerInterface() { + String _id = id; - public String getLayerId() { - return _id; - } + public String getLayerId() { + return _id; + } - public String getJobId() { - throw new RuntimeException("not implemented"); - } + public String getJobId() { + throw new RuntimeException("not implemented"); + } - public String getShowId() { - throw new RuntimeException("not implemented"); - } + public String getShowId() { + throw new RuntimeException("not implemented"); + } - public String getId() { - return _id; - } + public String getId() { + return _id; + } - public String getName() { - throw new RuntimeException("not implemented"); - } + public String getName() { + throw new RuntimeException("not implemented"); + } - public String getFacilityId() { - throw new RuntimeException("not implemented"); + public String getFacilityId() { + throw new RuntimeException("not implemented"); + } + }); } - }); + return result; } - return result; - } - private static boolean isJobFinished(Environment env, String property, JobManager jobManager, - JobInterface job) { - if (env.getProperty(property, String.class) != null - && Objects.equals(env.getProperty(property, String.class), "true")) { - JobDetail jobDetail = jobManager.getJobDetail(job.getJobId()); - return jobDetail.state == JobState.FINISHED; + private static boolean isJobFinished(Environment env, String property, JobManager jobManager, + JobInterface job) { + if (env.getProperty(property, String.class) != null + && Objects.equals(env.getProperty(property, String.class), "true")) { + JobDetail jobDetail = jobManager.getJobDetail(job.getJobId()); + return jobDetail.state == JobState.FINISHED; + } + return false; } - return false; - } - public static boolean attemptChange(Environment env, String property, JobManager jobManager, - JobInterface job, StreamObserver responseObserver) { - if (ServantUtil.isJobFinished(env, property, jobManager, job)) { - responseObserver.onError(Status.FAILED_PRECONDITION - .withDescription("Finished jobs are readonly").asRuntimeException()); - return false; + public static boolean attemptChange(Environment env, String property, JobManager jobManager, + JobInterface job, StreamObserver responseObserver) { + if (ServantUtil.isJobFinished(env, property, jobManager, job)) { + responseObserver.onError(Status.FAILED_PRECONDITION + .withDescription("Finished jobs are readonly").asRuntimeException()); + return false; + } + return true; } - return true; - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/AdminManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/AdminManager.java index 306518c6f..54fc73f46 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/AdminManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/AdminManager.java @@ -27,99 +27,99 @@ public interface AdminManager { - /* - * Shows - */ - boolean showExists(String name); + /* + * Shows + */ + boolean showExists(String name); - void createShow(ShowEntity show); + void createShow(ShowEntity show); - ShowEntity findShowEntity(String name); + ShowEntity findShowEntity(String name); - ShowEntity getShowEntity(String id); + ShowEntity getShowEntity(String id); - void setShowActive(ShowInterface show, boolean value); + void setShowActive(ShowInterface show, boolean value); - void updateShowCommentEmail(ShowInterface s, String[] emails); + void updateShowCommentEmail(ShowInterface s, String[] emails); - void updateShowsStatus(); + void updateShowsStatus(); - /* - * Facilities - */ - FacilityInterface createFacility(String name); + /* + * Facilities + */ + FacilityInterface createFacility(String name); - void deleteFacility(FacilityInterface facility); + void deleteFacility(FacilityInterface facility); - void setFacilityName(FacilityInterface facility, String name); + void setFacilityName(FacilityInterface facility, String name); - FacilityInterface getFacility(String id); + FacilityInterface getFacility(String id); - FacilityInterface getDefaultFacility(); + FacilityInterface getDefaultFacility(); - /* - * Allocations - */ - void createAllocation(FacilityInterface facility, AllocationEntity alloc); + /* + * Allocations + */ + void createAllocation(FacilityInterface facility, AllocationEntity alloc); - void deleteAllocation(AllocationInterface alloc); + void deleteAllocation(AllocationInterface alloc); - void setAllocationName(AllocationInterface a, String name); + void setAllocationName(AllocationInterface a, String name); - void setAllocationTag(AllocationInterface a, String tag); + void setAllocationTag(AllocationInterface a, String tag); - AllocationEntity getDefaultAllocation(); + AllocationEntity getDefaultAllocation(); - void setDefaultAllocation(AllocationInterface a); + void setDefaultAllocation(AllocationInterface a); - AllocationEntity findAllocationDetail(String facility, String name); + AllocationEntity findAllocationDetail(String facility, String name); - AllocationEntity getAllocationDetail(String id); + AllocationEntity getAllocationDetail(String id); - void setAllocationBillable(AllocationInterface alloc, boolean value); + void setAllocationBillable(AllocationInterface alloc, boolean value); - /* - * Subscriptions - */ - SubscriptionInterface createSubscription(ShowInterface show, AllocationInterface alloc, int size, - int burst); + /* + * Subscriptions + */ + SubscriptionInterface createSubscription(ShowInterface show, AllocationInterface alloc, + int size, int burst); - SubscriptionInterface createSubscription(SubscriptionEntity sub); + SubscriptionInterface createSubscription(SubscriptionEntity sub); - void deleteSubscription(SubscriptionInterface sub); + void deleteSubscription(SubscriptionInterface sub); - void setSubscriptionBurst(SubscriptionInterface sub, int burst); + void setSubscriptionBurst(SubscriptionInterface sub, int burst); - void setSubscriptionSize(SubscriptionInterface sub, int size); + void setSubscriptionSize(SubscriptionInterface sub, int size); - SubscriptionEntity getSubscriptionDetail(String id); + SubscriptionEntity getSubscriptionDetail(String id); - /* - * Departments - */ - DepartmentInterface findDepartment(String name); + /* + * Departments + */ + DepartmentInterface findDepartment(String name); - DepartmentInterface getDefaultDepartment(); + DepartmentInterface getDefaultDepartment(); - DepartmentInterface getDepartment(DepartmentInterface d); + DepartmentInterface getDepartment(DepartmentInterface d); - DepartmentInterface createDepartment(String name); + DepartmentInterface createDepartment(String name); - void removeDepartment(DepartmentInterface d); + void removeDepartment(DepartmentInterface d); - /* - * Limits - */ - String createLimit(String name, int maxValue); + /* + * Limits + */ + String createLimit(String name, int maxValue); - void deleteLimit(LimitInterface limit); + void deleteLimit(LimitInterface limit); - LimitInterface findLimit(String name); + LimitInterface findLimit(String name); - LimitInterface getLimit(String id); + LimitInterface getLimit(String id); - void setLimitName(LimitInterface limit, String name); + void setLimitName(LimitInterface limit, String name); - void setLimitMaxValue(LimitInterface limit, int maxValue); + void setLimitMaxValue(LimitInterface limit, int maxValue); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/AdminManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/AdminManagerService.java index 30d67f9af..dfc997fb4 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/AdminManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/AdminManagerService.java @@ -42,292 +42,292 @@ @Transactional public class AdminManagerService implements AdminManager { - @SuppressWarnings("unused") - private static final Logger logger = LogManager.getLogger(AdminManagerService.class); + @SuppressWarnings("unused") + private static final Logger logger = LogManager.getLogger(AdminManagerService.class); - private ShowDao showDao; - - private AllocationDao allocationDao; - - private SubscriptionDao subscriptionDao; - - private DepartmentDao departmentDao; - - private FacilityDao facilityDao; - - private GroupManager groupManager; - - private LimitDao limitDao; - - public void setShowActive(ShowInterface show, boolean value) { - showDao.updateActive(show, value); - } - - public boolean showExists(String name) { - return showDao.showExists(name); - } - - public void createShow(ShowEntity show) { - - show.name = JobSpec.conformShowName(show.name); - - DepartmentInterface dept = getDefaultDepartment(); - showDao.insertShow(show); - - /* - * This is for the show's default group - */ - GroupDetail newGroup = new GroupDetail(); - newGroup.name = show.getName(); - newGroup.parentId = null; - newGroup.showId = show.getShowId(); - newGroup.deptId = dept.getId(); - groupManager.createGroup(newGroup, null); - } - - @Override - public void createAllocation(FacilityInterface facility, AllocationEntity alloc) { - allocationDao.insertAllocation(facility, alloc); - } - - public void deleteAllocation(AllocationInterface alloc) { - allocationDao.deleteAllocation(alloc); - } - - public void setAllocationName(AllocationInterface a, String name) { - allocationDao.updateAllocationName(a, name); - } - - @Transactional(propagation = Propagation.NEVER) - public void setAllocationTag(AllocationInterface a, String tag) { - allocationDao.updateAllocationTag(a, tag); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public AllocationEntity getDefaultAllocation() { - return allocationDao.getDefaultAllocationEntity(); - } - - @Override - public void setDefaultAllocation(AllocationInterface a) { - allocationDao.setDefaultAllocation(a); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public ShowEntity findShowEntity(String name) { - return showDao.findShowDetail(name); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public ShowEntity getShowEntity(String id) { - return showDao.getShowDetail(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void updateShowCommentEmail(ShowInterface s, String[] emails) { - showDao.updateShowCommentEmail(s, emails); - } - - @Override - public void updateShowsStatus() { - showDao.updateShowsStatus(); - } - - public SubscriptionInterface createSubscription(SubscriptionEntity sub) { - subscriptionDao.insertSubscription(sub); - return sub; - } - - public SubscriptionInterface createSubscription(ShowInterface show, AllocationInterface alloc, - int size, int burst) { - SubscriptionEntity s = new SubscriptionEntity(); - s.size = size; - s.burst = burst; - s.showId = show.getShowId(); - s.allocationId = alloc.getAllocationId(); - subscriptionDao.insertSubscription(s); - return s; - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public AllocationEntity findAllocationDetail(String facility, String name) { - return allocationDao.findAllocationEntity(facility, name); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public AllocationEntity getAllocationDetail(String id) { - return allocationDao.getAllocationEntity(id); - } - - public void deleteSubscription(SubscriptionInterface sub) { - subscriptionDao.deleteSubscription(sub); - } - - public void setSubscriptionBurst(SubscriptionInterface sub, int burst) { - subscriptionDao.updateSubscriptionBurst(sub, burst); - } - - public void setSubscriptionSize(SubscriptionInterface sub, int size) { - subscriptionDao.updateSubscriptionSize(sub, size); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public SubscriptionEntity getSubscriptionDetail(String id) { - return subscriptionDao.getSubscriptionDetail(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public DepartmentInterface findDepartment(String name) { - return departmentDao.findDepartment(name); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public DepartmentInterface getDefaultDepartment() { - return departmentDao.getDefaultDepartment(); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public DepartmentInterface getDepartment(DepartmentInterface d) { - return departmentDao.getDepartment(d.getDepartmentId()); - } - - @Override - public DepartmentInterface createDepartment(String name) { - departmentDao.insertDepartment(name); - return findDepartment(name); - } - - @Override - public void removeDepartment(DepartmentInterface d) { - departmentDao.deleteDepartment(d); - } - - @Override - public FacilityInterface createFacility(String name) { - FacilityEntity facility = new FacilityEntity(); - facility.name = name; - return facilityDao.insertFacility(facility); - } - - @Override - public void deleteFacility(FacilityInterface facility) { - facilityDao.deleteFacility(facility); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public FacilityInterface getFacility(String id) { - return facilityDao.getFacility(id); - } - - @Override - public void setFacilityName(FacilityInterface facility, String name) { - facilityDao.updateFacilityName(facility, name); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public FacilityInterface getDefaultFacility() { - return facilityDao.getDefaultFacility(); - } - - @Override - public void setAllocationBillable(AllocationInterface alloc, boolean value) { - allocationDao.updateAllocationBillable(alloc, value); - } - - @Override - public String createLimit(String name, int maxValue) { - return limitDao.createLimit(name, maxValue); - } - - public void deleteLimit(LimitInterface limit) { - limitDao.deleteLimit(limit); - } - - @Override - public LimitInterface findLimit(String name) { - return limitDao.findLimit(name); - } - - @Override - public LimitInterface getLimit(String id) { - return limitDao.getLimit(id); - } - - @Override - public void setLimitName(LimitInterface limit, String name) { - limitDao.setLimitName(limit, name); - } - - @Override - public void setLimitMaxValue(LimitInterface limit, int maxValue) { - limitDao.setMaxValue(limit, maxValue); - } - - public AllocationDao getAllocationDao() { - return allocationDao; - } - - public void setAllocationDao(AllocationDao allocationDao) { - this.allocationDao = allocationDao; - } - - public ShowDao getShowDao() { - return showDao; - } - - public void setShowDao(ShowDao showDao) { - this.showDao = showDao; - } - - public SubscriptionDao getSubscriptionDao() { - return subscriptionDao; - } - - public void setSubscriptionDao(SubscriptionDao subscriptionDao) { - this.subscriptionDao = subscriptionDao; - } - - public DepartmentDao getDepartmentDao() { - return departmentDao; - } - - public void setDepartmentDao(DepartmentDao departmentDao) { - this.departmentDao = departmentDao; - } - - public GroupManager getGroupManager() { - return groupManager; - } - - public void setGroupManager(GroupManager groupManager) { - this.groupManager = groupManager; - } - - public FacilityDao getFacilityDao() { - return facilityDao; - } - - public void setFacilityDao(FacilityDao facilityDao) { - this.facilityDao = facilityDao; - } - - public LimitDao getLimitDao() { - return limitDao; - } - - public void setLimitDao(LimitDao limitDao) { - this.limitDao = limitDao; - } + private ShowDao showDao; + + private AllocationDao allocationDao; + + private SubscriptionDao subscriptionDao; + + private DepartmentDao departmentDao; + + private FacilityDao facilityDao; + + private GroupManager groupManager; + + private LimitDao limitDao; + + public void setShowActive(ShowInterface show, boolean value) { + showDao.updateActive(show, value); + } + + public boolean showExists(String name) { + return showDao.showExists(name); + } + + public void createShow(ShowEntity show) { + + show.name = JobSpec.conformShowName(show.name); + + DepartmentInterface dept = getDefaultDepartment(); + showDao.insertShow(show); + + /* + * This is for the show's default group + */ + GroupDetail newGroup = new GroupDetail(); + newGroup.name = show.getName(); + newGroup.parentId = null; + newGroup.showId = show.getShowId(); + newGroup.deptId = dept.getId(); + groupManager.createGroup(newGroup, null); + } + + @Override + public void createAllocation(FacilityInterface facility, AllocationEntity alloc) { + allocationDao.insertAllocation(facility, alloc); + } + + public void deleteAllocation(AllocationInterface alloc) { + allocationDao.deleteAllocation(alloc); + } + + public void setAllocationName(AllocationInterface a, String name) { + allocationDao.updateAllocationName(a, name); + } + + @Transactional(propagation = Propagation.NEVER) + public void setAllocationTag(AllocationInterface a, String tag) { + allocationDao.updateAllocationTag(a, tag); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public AllocationEntity getDefaultAllocation() { + return allocationDao.getDefaultAllocationEntity(); + } + + @Override + public void setDefaultAllocation(AllocationInterface a) { + allocationDao.setDefaultAllocation(a); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public ShowEntity findShowEntity(String name) { + return showDao.findShowDetail(name); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public ShowEntity getShowEntity(String id) { + return showDao.getShowDetail(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void updateShowCommentEmail(ShowInterface s, String[] emails) { + showDao.updateShowCommentEmail(s, emails); + } + + @Override + public void updateShowsStatus() { + showDao.updateShowsStatus(); + } + + public SubscriptionInterface createSubscription(SubscriptionEntity sub) { + subscriptionDao.insertSubscription(sub); + return sub; + } + + public SubscriptionInterface createSubscription(ShowInterface show, AllocationInterface alloc, + int size, int burst) { + SubscriptionEntity s = new SubscriptionEntity(); + s.size = size; + s.burst = burst; + s.showId = show.getShowId(); + s.allocationId = alloc.getAllocationId(); + subscriptionDao.insertSubscription(s); + return s; + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public AllocationEntity findAllocationDetail(String facility, String name) { + return allocationDao.findAllocationEntity(facility, name); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public AllocationEntity getAllocationDetail(String id) { + return allocationDao.getAllocationEntity(id); + } + + public void deleteSubscription(SubscriptionInterface sub) { + subscriptionDao.deleteSubscription(sub); + } + + public void setSubscriptionBurst(SubscriptionInterface sub, int burst) { + subscriptionDao.updateSubscriptionBurst(sub, burst); + } + + public void setSubscriptionSize(SubscriptionInterface sub, int size) { + subscriptionDao.updateSubscriptionSize(sub, size); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public SubscriptionEntity getSubscriptionDetail(String id) { + return subscriptionDao.getSubscriptionDetail(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public DepartmentInterface findDepartment(String name) { + return departmentDao.findDepartment(name); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public DepartmentInterface getDefaultDepartment() { + return departmentDao.getDefaultDepartment(); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public DepartmentInterface getDepartment(DepartmentInterface d) { + return departmentDao.getDepartment(d.getDepartmentId()); + } + + @Override + public DepartmentInterface createDepartment(String name) { + departmentDao.insertDepartment(name); + return findDepartment(name); + } + + @Override + public void removeDepartment(DepartmentInterface d) { + departmentDao.deleteDepartment(d); + } + + @Override + public FacilityInterface createFacility(String name) { + FacilityEntity facility = new FacilityEntity(); + facility.name = name; + return facilityDao.insertFacility(facility); + } + + @Override + public void deleteFacility(FacilityInterface facility) { + facilityDao.deleteFacility(facility); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FacilityInterface getFacility(String id) { + return facilityDao.getFacility(id); + } + + @Override + public void setFacilityName(FacilityInterface facility, String name) { + facilityDao.updateFacilityName(facility, name); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FacilityInterface getDefaultFacility() { + return facilityDao.getDefaultFacility(); + } + + @Override + public void setAllocationBillable(AllocationInterface alloc, boolean value) { + allocationDao.updateAllocationBillable(alloc, value); + } + + @Override + public String createLimit(String name, int maxValue) { + return limitDao.createLimit(name, maxValue); + } + + public void deleteLimit(LimitInterface limit) { + limitDao.deleteLimit(limit); + } + + @Override + public LimitInterface findLimit(String name) { + return limitDao.findLimit(name); + } + + @Override + public LimitInterface getLimit(String id) { + return limitDao.getLimit(id); + } + + @Override + public void setLimitName(LimitInterface limit, String name) { + limitDao.setLimitName(limit, name); + } + + @Override + public void setLimitMaxValue(LimitInterface limit, int maxValue) { + limitDao.setMaxValue(limit, maxValue); + } + + public AllocationDao getAllocationDao() { + return allocationDao; + } + + public void setAllocationDao(AllocationDao allocationDao) { + this.allocationDao = allocationDao; + } + + public ShowDao getShowDao() { + return showDao; + } + + public void setShowDao(ShowDao showDao) { + this.showDao = showDao; + } + + public SubscriptionDao getSubscriptionDao() { + return subscriptionDao; + } + + public void setSubscriptionDao(SubscriptionDao subscriptionDao) { + this.subscriptionDao = subscriptionDao; + } + + public DepartmentDao getDepartmentDao() { + return departmentDao; + } + + public void setDepartmentDao(DepartmentDao departmentDao) { + this.departmentDao = departmentDao; + } + + public GroupManager getGroupManager() { + return groupManager; + } + + public void setGroupManager(GroupManager groupManager) { + this.groupManager = groupManager; + } + + public FacilityDao getFacilityDao() { + return facilityDao; + } + + public void setFacilityDao(FacilityDao facilityDao) { + this.facilityDao = facilityDao; + } + + public LimitDao getLimitDao() { + return limitDao; + } + + public void setLimitDao(LimitDao limitDao) { + this.limitDao = limitDao; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/BookingManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/BookingManager.java index aff143fce..b583e19e7 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/BookingManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/BookingManager.java @@ -26,115 +26,115 @@ public interface BookingManager { - /** - * Return an active LocalHostAssignment for the given host. - * - * @param host - * @return - */ - public List getLocalHostAssignment(HostInterface host); - - /** - * Return an active LocalHostAssignment for the given unique ID. - * - * @param id - * @return - */ - public LocalHostAssignment getLocalHostAssignment(String id); - - /** - * Return an active LocalHostAssignment for the given job ID and host ID. - * - * @param hostId - * @param jobId - * @return - */ - public LocalHostAssignment getLocalHostAssignment(String hostId, String jobId); - - /** - * Create a local host assignment for the given job. - * - * @param host - * @param job - * @param lja - */ - public void createLocalHostAssignment(DispatchHost host, JobInterface job, - LocalHostAssignment lja); - - /** - * Create a local host assignment for the given layer. - * - * @param host - * @param layer - * @param lja - */ - public void createLocalHostAssignment(DispatchHost host, LayerInterface layer, - LocalHostAssignment lja); - - /** - * Create a local host assignment for the given frame. - * - * @param host - * @param frame - * @param lja - */ - public void createLocalHostAssignment(DispatchHost host, FrameInterface frame, - LocalHostAssignment lja); - - /** - * Return true if the host as a local assignment. - * - * @param host - * @return - */ - public boolean hasLocalHostAssignment(HostInterface host); - - /** - * Return true if the given host has active local frames. - * - * @param host - * @return - */ - public boolean hasActiveLocalFrames(HostInterface host); - - /** - * Remove the given LocalHostAssignment. - * - * @param lha - */ - void removeLocalHostAssignment(LocalHostAssignment lha); - - /** - * Deactivate the the given LocalHostAssignment. Deactivated entries will not book procs. - * - * @param lha - */ - void deactivateLocalHostAssignment(LocalHostAssignment lha); - - /** - * Set the max resource usage for the given LocalHostAssignment. - * - * @param l - * @param maxCoreUnits - * @param maxMemory - * @param maxGpuUnits - * @param maxGpuMemory - */ - void setMaxResources(LocalHostAssignment l, int maxCoreUnits, long maxMemory, int maxGpuUnits, - long maxGpuMemory); - - /** - * Remove a LocalHostAssignment if there are no procs assigned to it. - * - * @param lha - */ - void removeInactiveLocalHostAssignment(LocalHostAssignment lha); - - /** - * Return true if the host is running more cores than the maximum allowed. - * - * @param host - * @return - */ - boolean hasResourceDeficit(HostInterface host); + /** + * Return an active LocalHostAssignment for the given host. + * + * @param host + * @return + */ + public List getLocalHostAssignment(HostInterface host); + + /** + * Return an active LocalHostAssignment for the given unique ID. + * + * @param id + * @return + */ + public LocalHostAssignment getLocalHostAssignment(String id); + + /** + * Return an active LocalHostAssignment for the given job ID and host ID. + * + * @param hostId + * @param jobId + * @return + */ + public LocalHostAssignment getLocalHostAssignment(String hostId, String jobId); + + /** + * Create a local host assignment for the given job. + * + * @param host + * @param job + * @param lja + */ + public void createLocalHostAssignment(DispatchHost host, JobInterface job, + LocalHostAssignment lja); + + /** + * Create a local host assignment for the given layer. + * + * @param host + * @param layer + * @param lja + */ + public void createLocalHostAssignment(DispatchHost host, LayerInterface layer, + LocalHostAssignment lja); + + /** + * Create a local host assignment for the given frame. + * + * @param host + * @param frame + * @param lja + */ + public void createLocalHostAssignment(DispatchHost host, FrameInterface frame, + LocalHostAssignment lja); + + /** + * Return true if the host as a local assignment. + * + * @param host + * @return + */ + public boolean hasLocalHostAssignment(HostInterface host); + + /** + * Return true if the given host has active local frames. + * + * @param host + * @return + */ + public boolean hasActiveLocalFrames(HostInterface host); + + /** + * Remove the given LocalHostAssignment. + * + * @param lha + */ + void removeLocalHostAssignment(LocalHostAssignment lha); + + /** + * Deactivate the the given LocalHostAssignment. Deactivated entries will not book procs. + * + * @param lha + */ + void deactivateLocalHostAssignment(LocalHostAssignment lha); + + /** + * Set the max resource usage for the given LocalHostAssignment. + * + * @param l + * @param maxCoreUnits + * @param maxMemory + * @param maxGpuUnits + * @param maxGpuMemory + */ + void setMaxResources(LocalHostAssignment l, int maxCoreUnits, long maxMemory, int maxGpuUnits, + long maxGpuMemory); + + /** + * Remove a LocalHostAssignment if there are no procs assigned to it. + * + * @param lha + */ + void removeInactiveLocalHostAssignment(LocalHostAssignment lha); + + /** + * Return true if the host is running more cores than the maximum allowed. + * + * @param host + * @return + */ + boolean hasResourceDeficit(HostInterface host); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/BookingManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/BookingManagerService.java index d3d1848e2..405e4e5b2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/BookingManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/BookingManagerService.java @@ -43,199 +43,199 @@ @Transactional public class BookingManagerService implements BookingManager { - @SuppressWarnings("unused") - private static final Logger logger = LogManager.getLogger(BookingManagerService.class); + @SuppressWarnings("unused") + private static final Logger logger = LogManager.getLogger(BookingManagerService.class); + + private BookingQueue bookingQueue; + private BookingDao bookingDao; + private Dispatcher localDispatcher; + private JobManager jobManager; + private JobManagerSupport jobManagerSupport; + private JobDao jobDao; + private HostDao hostDao; + private ProcDao procDao; + + @Override + public boolean hasLocalHostAssignment(HostInterface host) { + return bookingDao.hasLocalJob(host); + } + + @Override + public boolean hasActiveLocalFrames(HostInterface host) { + return bookingDao.hasActiveLocalJob(host); + } - private BookingQueue bookingQueue; - private BookingDao bookingDao; - private Dispatcher localDispatcher; - private JobManager jobManager; - private JobManagerSupport jobManagerSupport; - private JobDao jobDao; - private HostDao hostDao; - private ProcDao procDao; + @Override + public void setMaxResources(LocalHostAssignment l, int maxCoreUnits, long maxMemory, + int maxGpuUnits, long maxGpuMemory) { - @Override - public boolean hasLocalHostAssignment(HostInterface host) { - return bookingDao.hasLocalJob(host); - } + HostInterface host = hostDao.getHost(l.getHostId()); - @Override - public boolean hasActiveLocalFrames(HostInterface host) { - return bookingDao.hasActiveLocalJob(host); - } + if (maxCoreUnits > 0) { + bookingDao.updateMaxCores(l, maxCoreUnits); + } - @Override - public void setMaxResources(LocalHostAssignment l, int maxCoreUnits, long maxMemory, - int maxGpuUnits, long maxGpuMemory) { + if (maxMemory > 0) { + bookingDao.updateMaxMemory(l, maxMemory); + } - HostInterface host = hostDao.getHost(l.getHostId()); + if (maxGpuUnits > 0) { + bookingDao.updateMaxGpus(l, maxGpuUnits); + } - if (maxCoreUnits > 0) { - bookingDao.updateMaxCores(l, maxCoreUnits); + if (maxGpuMemory > 0) { + bookingDao.updateMaxGpuMemory(l, maxGpuMemory); + } } - if (maxMemory > 0) { - bookingDao.updateMaxMemory(l, maxMemory); + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void removeInactiveLocalHostAssignment(LocalHostAssignment lha) { + String jobId = lha.getJobId(); + try { + JobDetail jobDetail = jobDao.getJobDetail(jobId); + if (jobManager.isJobComplete(jobDetail) || jobDetail.state.equals(JobState.FINISHED)) { + removeLocalHostAssignment(lha); + } + } catch (EmptyResultDataAccessException e) { + removeLocalHostAssignment(lha); + } + } + + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void removeLocalHostAssignment(LocalHostAssignment l) { + + LocalHostAssignment lja = bookingDao.getLocalJobAssignment(l.id); + HostInterface host = hostDao.getHost(l.getHostId()); + + bookingDao.deleteLocalJobAssignment(lja); } - if (maxGpuUnits > 0) { - bookingDao.updateMaxGpus(l, maxGpuUnits); + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void deactivateLocalHostAssignment(LocalHostAssignment l) { + + /* + * De-activate the local booking and unbook procs. The last proc to report in should remove + * the LHA. + */ + bookingDao.deactivate(l); + + List procs = procDao.findVirtualProcs(l); + for (VirtualProc p : procs) { + jobManagerSupport.unbookProc(p, true, new Source("user cleared local jobs")); + } + removeLocalHostAssignment(l); } - if (maxGpuMemory > 0) { - bookingDao.updateMaxGpuMemory(l, maxGpuMemory); + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getLocalHostAssignment(HostInterface host) { + return bookingDao.getLocalJobAssignment(host); } - } - @Override - @Transactional(propagation = Propagation.SUPPORTS) - public void removeInactiveLocalHostAssignment(LocalHostAssignment lha) { - String jobId = lha.getJobId(); - try { - JobDetail jobDetail = jobDao.getJobDetail(jobId); - if (jobManager.isJobComplete(jobDetail) || jobDetail.state.equals(JobState.FINISHED)) { - removeLocalHostAssignment(lha); - } - } catch (EmptyResultDataAccessException e) { - removeLocalHostAssignment(lha); + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public LocalHostAssignment getLocalHostAssignment(String id) { + return bookingDao.getLocalJobAssignment(id); } - } - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void removeLocalHostAssignment(LocalHostAssignment l) { + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public LocalHostAssignment getLocalHostAssignment(String hostId, String jobId) { + return bookingDao.getLocalJobAssignment(hostId, jobId); + } - LocalHostAssignment lja = bookingDao.getLocalJobAssignment(l.id); - HostInterface host = hostDao.getHost(l.getHostId()); + /** + * Create LocalHostAssignments + */ - bookingDao.deleteLocalJobAssignment(lja); - } + @Override + public void createLocalHostAssignment(DispatchHost host, JobInterface job, + LocalHostAssignment lja) { + bookingDao.insertLocalHostAssignment(host, job, lja); + } - @Override - @Transactional(propagation = Propagation.SUPPORTS) - public void deactivateLocalHostAssignment(LocalHostAssignment l) { + @Override + public void createLocalHostAssignment(DispatchHost host, LayerInterface layer, + LocalHostAssignment lja) { + bookingDao.insertLocalHostAssignment(host, layer, lja); + } - /* - * De-activate the local booking and unbook procs. The last proc to report in should remove the - * LHA. - */ - bookingDao.deactivate(l); - - List procs = procDao.findVirtualProcs(l); - for (VirtualProc p : procs) { - jobManagerSupport.unbookProc(p, true, new Source("user cleared local jobs")); - } - removeLocalHostAssignment(l); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List getLocalHostAssignment(HostInterface host) { - return bookingDao.getLocalJobAssignment(host); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public LocalHostAssignment getLocalHostAssignment(String id) { - return bookingDao.getLocalJobAssignment(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public LocalHostAssignment getLocalHostAssignment(String hostId, String jobId) { - return bookingDao.getLocalJobAssignment(hostId, jobId); - } - - /** - * Create LocalHostAssignments - */ - - @Override - public void createLocalHostAssignment(DispatchHost host, JobInterface job, - LocalHostAssignment lja) { - bookingDao.insertLocalHostAssignment(host, job, lja); - } - - @Override - public void createLocalHostAssignment(DispatchHost host, LayerInterface layer, - LocalHostAssignment lja) { - bookingDao.insertLocalHostAssignment(host, layer, lja); - } - - @Override - public void createLocalHostAssignment(DispatchHost host, FrameInterface frame, - LocalHostAssignment lja) { - bookingDao.insertLocalHostAssignment(host, frame, lja); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean hasResourceDeficit(HostInterface host) { - return bookingDao.hasResourceDeficit(host); - } - - public BookingQueue getBookingQueue() { - return bookingQueue; - } - - public void setBookingQueue(BookingQueue bookingQueue) { - this.bookingQueue = bookingQueue; - } - - public BookingDao getBookingDao() { - return bookingDao; - } - - public void setBookingDao(BookingDao bookingDao) { - this.bookingDao = bookingDao; - } - - public Dispatcher getLocalDispatcher() { - return localDispatcher; - } - - public void setLocalDispatcher(Dispatcher localDispatcher) { - this.localDispatcher = localDispatcher; - } - - public JobManagerSupport getJobManagerSupport() { - return jobManagerSupport; - } - - public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { - this.jobManagerSupport = jobManagerSupport; - } - - public JobManager getJobManager() { - return jobManager; - } - - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } - - public JobDao getJobDao() { - return jobDao; - } - - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } - - public HostDao getHostDao() { - return hostDao; - } - - public void setHostDao(HostDao hostDao) { - this.hostDao = hostDao; - } - - public ProcDao getProcDao() { - return procDao; - } - - public void setProcDao(ProcDao procDao) { - this.procDao = procDao; - } + @Override + public void createLocalHostAssignment(DispatchHost host, FrameInterface frame, + LocalHostAssignment lja) { + bookingDao.insertLocalHostAssignment(host, frame, lja); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean hasResourceDeficit(HostInterface host) { + return bookingDao.hasResourceDeficit(host); + } + + public BookingQueue getBookingQueue() { + return bookingQueue; + } + + public void setBookingQueue(BookingQueue bookingQueue) { + this.bookingQueue = bookingQueue; + } + + public BookingDao getBookingDao() { + return bookingDao; + } + + public void setBookingDao(BookingDao bookingDao) { + this.bookingDao = bookingDao; + } + + public Dispatcher getLocalDispatcher() { + return localDispatcher; + } + + public void setLocalDispatcher(Dispatcher localDispatcher) { + this.localDispatcher = localDispatcher; + } + + public JobManagerSupport getJobManagerSupport() { + return jobManagerSupport; + } + + public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { + this.jobManagerSupport = jobManagerSupport; + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public JobDao getJobDao() { + return jobDao; + } + + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } + + public HostDao getHostDao() { + return hostDao; + } + + public void setHostDao(HostDao hostDao) { + this.hostDao = hostDao; + } + + public ProcDao getProcDao() { + return procDao; + } + + public void setProcDao(ProcDao procDao) { + this.procDao = procDao; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/CommentManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/CommentManager.java index 1e185de9a..3e9945cb1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/CommentManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/CommentManager.java @@ -23,68 +23,69 @@ public interface CommentManager { - /** - * Add a comment to a job. - * - * @param job - * @param comment - */ - public void addComment(JobInterface job, CommentDetail comment); + /** + * Add a comment to a job. + * + * @param job + * @param comment + */ + public void addComment(JobInterface job, CommentDetail comment); - /** - * Add a comment to a host - * - * @param host - * @param comment - */ - public void addComment(HostInterface host, CommentDetail comment); + /** + * Add a comment to a host + * + * @param host + * @param comment + */ + public void addComment(HostInterface host, CommentDetail comment); - /** - * - * @param id - */ - public void deleteComment(String id); + /** + * + * @param id + */ + public void deleteComment(String id); - /** - * Deletes comments using host, user, and subject - * - * @param host - * @param user - * @param subject - * @return boolean: returns true if one or more comments where deleted - */ - public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, String subject); + /** + * Deletes comments using host, user, and subject + * + * @param host + * @param user + * @param subject + * @return boolean: returns true if one or more comments where deleted + */ + public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, + String subject); - /** - * Get comments using host, user, and subject - * - * @param host - * @param user - * @param subject - * @return List - */ - public List getCommentsByHostUserAndSubject(HostInterface host, String user, - String subject); + /** + * Get comments using host, user, and subject + * + * @param host + * @param user + * @param subject + * @return List + */ + public List getCommentsByHostUserAndSubject(HostInterface host, String user, + String subject); - /** - * - * @param id - * @param message - */ - public void setCommentMessage(String id, String message); + /** + * + * @param id + * @param message + */ + public void setCommentMessage(String id, String message); - /** - * - * @param id - * @param subject - */ - public void setCommentSubject(String id, String subject); + /** + * + * @param id + * @param subject + */ + public void setCommentSubject(String id, String subject); - /** - * Save the specified comment - * - * @param detail - */ - public void saveComment(CommentDetail detail); + /** + * Save the specified comment + * + * @param detail + */ + public void saveComment(CommentDetail detail); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/CommentManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/CommentManagerService.java index 9cbd2ba6e..b03492332 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/CommentManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/CommentManagerService.java @@ -29,78 +29,78 @@ @Transactional public class CommentManagerService implements CommentManager { - private EmailSupport emailSupport; - private AdminManager adminManager; + private EmailSupport emailSupport; + private AdminManager adminManager; + + CommentDao commentDao; + + @Transactional(propagation = Propagation.SUPPORTS) + public void addComment(JobInterface job, CommentDetail comment) { + commentDao.insertComment(job, comment); + ShowEntity show = adminManager.getShowEntity(job.getShowId()); + if (show.commentMail.length > 0) { + emailSupport.reportJobComment(job, comment, show.commentMail); + } + } + + @Transactional(propagation = Propagation.REQUIRED) + public void addComment(HostInterface host, CommentDetail comment) { + commentDao.insertComment(host, comment); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void deleteComment(String id) { + commentDao.deleteComment(id); + } + + @Transactional(propagation = Propagation.REQUIRED) + public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, + String subject) { + return commentDao.deleteCommentByHostUserAndSubject(host, user, subject); + } + + @Transactional(propagation = Propagation.REQUIRED) + public List getCommentsByHostUserAndSubject(HostInterface host, String user, + String subject) { + return commentDao.getCommentsByHostUserAndSubject(host, user, subject); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void setCommentSubject(String id, String subject) { + commentDao.updateCommentSubject(id, subject); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void setCommentMessage(String id, String message) { + commentDao.updateCommentMessage(id, message); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void saveComment(CommentDetail detail) { + commentDao.updateComment(detail); + } + + public CommentDao getCommentDao() { + return commentDao; + } + + public void setCommentDao(CommentDao commentDao) { + this.commentDao = commentDao; + } + + public EmailSupport getEmailSupport() { + return emailSupport; + } - CommentDao commentDao; + public void setEmailSupport(EmailSupport emailSupport) { + this.emailSupport = emailSupport; + } + + public AdminManager getAdminManager() { + return adminManager; + } - @Transactional(propagation = Propagation.SUPPORTS) - public void addComment(JobInterface job, CommentDetail comment) { - commentDao.insertComment(job, comment); - ShowEntity show = adminManager.getShowEntity(job.getShowId()); - if (show.commentMail.length > 0) { - emailSupport.reportJobComment(job, comment, show.commentMail); + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; } - } - - @Transactional(propagation = Propagation.REQUIRED) - public void addComment(HostInterface host, CommentDetail comment) { - commentDao.insertComment(host, comment); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void deleteComment(String id) { - commentDao.deleteComment(id); - } - - @Transactional(propagation = Propagation.REQUIRED) - public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, - String subject) { - return commentDao.deleteCommentByHostUserAndSubject(host, user, subject); - } - - @Transactional(propagation = Propagation.REQUIRED) - public List getCommentsByHostUserAndSubject(HostInterface host, String user, - String subject) { - return commentDao.getCommentsByHostUserAndSubject(host, user, subject); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void setCommentSubject(String id, String subject) { - commentDao.updateCommentSubject(id, subject); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void setCommentMessage(String id, String message) { - commentDao.updateCommentMessage(id, message); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void saveComment(CommentDetail detail) { - commentDao.updateComment(detail); - } - - public CommentDao getCommentDao() { - return commentDao; - } - - public void setCommentDao(CommentDao commentDao) { - this.commentDao = commentDao; - } - - public EmailSupport getEmailSupport() { - return emailSupport; - } - - public void setEmailSupport(EmailSupport emailSupport) { - this.emailSupport = emailSupport; - } - - public AdminManager getAdminManager() { - return adminManager; - } - - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManager.java index fccb204af..761760d8e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManager.java @@ -27,177 +27,177 @@ public interface DepartmentManager { - /** - * Creates a new render point configurtion. A render point configuration maps an OpenCue - * department to a Track-It task and priorizes shots based frame and final date info. - * - * @param renderPoint - */ - public void createDepartmentConfig(PointDetail renderPoint); - - /** - * Creates a new render point configurtion. A render point configuration maps an OpenCue - * department to a Track-It task and priorizes shots based frame and final date info. - * - * @param renderPoint - */ - public PointInterface createDepartmentConfig(ShowInterface show, DepartmentInterface dept); - - /** - * Returns true if a render point configuration already exists for the specified show and - * department. - * - * @param show - * @param dept - * @return - */ - public boolean departmentConfigExists(ShowInterface show, DepartmentInterface dept); - - /** - * Creates a new task. A task is for setting minimum procs by shot and department - * - * @param t - */ - public void createTask(TaskEntity t); - - /** - * Removes the specified task - * - * @param t - */ - public void removeTask(TaskInterface t); - - /** - * Returns task details - * - * @param id - * @return - */ - public TaskEntity getTaskDetail(String id); - - /** - * Sets the minimum core value for the specified task. If the task is managed then the cores will - * be adjusted, leaving the original min cores value in tact. If the task is not managed then the - * min cores value is altered directly. - * - * @param t - * @param value - */ - public void setMinCores(TaskInterface t, int coreUnits); - - /** - * Sets the minimum core value for the specified task. If the task is managed then the cores will - * be adjusted, leaving the original min cores value in tact. If the task is not managed then the - * min cores value is altered directly. - * - * @param t - * @param value - */ - public void clearTaskAdjustments(PointInterface rp); - - /** - * Enables TI integration - * - * @param rp - * @param tiTask - * @param cores - */ - public void enableTiManaged(PointInterface rp, String tiTask, int coreUnits); - - /** - * Disables Track-It management - * - * @param rp - */ - public void disableTiManaged(PointInterface rp); - - /** - * Updates TI Managed tasks and recalculates all of the min core values - * - * @param rp - */ - public void updateManagedTasks(PointInterface rp); - - /** - * Set the number of cores to normalize the proc point shots with. - * - * @param cdept - * @param cores - */ - public void setManagedCores(PointInterface cdept, int coreUnits); - - /** - * Returns a department configuration detail object from its id. - * - * @param id - * @return - */ - PointDetail getDepartmentConfigDetail(String id); - - /** - * Returns a department configuration detail object - * - * @param show - * @param dept - * @return - */ - PointDetail getDepartmentConfigDetail(ShowInterface show, DepartmentInterface dept); - - /** - * Returns a list of all managed point configurations. Managed point configurations get - * priortities from an outside source, such as track it. - * - * @return a list of point configurations being managed by Track-It - */ - List getManagedPointConfs(); - - /** - * Clears all existing tasks for specified department - * - * @param cdept - */ - public void clearTasks(PointInterface cdept); - - /** - * Clears all existing tasks for specified show and department - * - * @param show - * @param dept - */ - public void clearTasks(ShowInterface show, DepartmentInterface dept); - - /** - * Updates the min proc value of all jobs that fall within the specified task. - * - * @param TaskDetail the task to sync with - */ - void syncJobsWithTask(TaskEntity t); - - /** - * Updates the min proc value of all jobs that fall within the specified task. - * - * @param TaskDetail the task to sync with - */ - void syncJobsWithTask(DepartmentInterface d, String shot); - - /** - * Updates the min proc value of all jobs that fall within the specified task. - * - * @param TaskDetail the task to sync with - */ - void syncJobsWithTask(JobInterface job); - - /** - * Returns true of the job is managed by a department manager. - * - * @param j - */ - boolean isManaged(JobInterface j); - - /** - * - * @param t - */ - void clearTaskAdjustment(TaskInterface t); + /** + * Creates a new render point configurtion. A render point configuration maps an OpenCue + * department to a Track-It task and priorizes shots based frame and final date info. + * + * @param renderPoint + */ + public void createDepartmentConfig(PointDetail renderPoint); + + /** + * Creates a new render point configurtion. A render point configuration maps an OpenCue + * department to a Track-It task and priorizes shots based frame and final date info. + * + * @param renderPoint + */ + public PointInterface createDepartmentConfig(ShowInterface show, DepartmentInterface dept); + + /** + * Returns true if a render point configuration already exists for the specified show and + * department. + * + * @param show + * @param dept + * @return + */ + public boolean departmentConfigExists(ShowInterface show, DepartmentInterface dept); + + /** + * Creates a new task. A task is for setting minimum procs by shot and department + * + * @param t + */ + public void createTask(TaskEntity t); + + /** + * Removes the specified task + * + * @param t + */ + public void removeTask(TaskInterface t); + + /** + * Returns task details + * + * @param id + * @return + */ + public TaskEntity getTaskDetail(String id); + + /** + * Sets the minimum core value for the specified task. If the task is managed then the cores + * will be adjusted, leaving the original min cores value in tact. If the task is not managed + * then the min cores value is altered directly. + * + * @param t + * @param value + */ + public void setMinCores(TaskInterface t, int coreUnits); + + /** + * Sets the minimum core value for the specified task. If the task is managed then the cores + * will be adjusted, leaving the original min cores value in tact. If the task is not managed + * then the min cores value is altered directly. + * + * @param t + * @param value + */ + public void clearTaskAdjustments(PointInterface rp); + + /** + * Enables TI integration + * + * @param rp + * @param tiTask + * @param cores + */ + public void enableTiManaged(PointInterface rp, String tiTask, int coreUnits); + + /** + * Disables Track-It management + * + * @param rp + */ + public void disableTiManaged(PointInterface rp); + + /** + * Updates TI Managed tasks and recalculates all of the min core values + * + * @param rp + */ + public void updateManagedTasks(PointInterface rp); + + /** + * Set the number of cores to normalize the proc point shots with. + * + * @param cdept + * @param cores + */ + public void setManagedCores(PointInterface cdept, int coreUnits); + + /** + * Returns a department configuration detail object from its id. + * + * @param id + * @return + */ + PointDetail getDepartmentConfigDetail(String id); + + /** + * Returns a department configuration detail object + * + * @param show + * @param dept + * @return + */ + PointDetail getDepartmentConfigDetail(ShowInterface show, DepartmentInterface dept); + + /** + * Returns a list of all managed point configurations. Managed point configurations get + * priortities from an outside source, such as track it. + * + * @return a list of point configurations being managed by Track-It + */ + List getManagedPointConfs(); + + /** + * Clears all existing tasks for specified department + * + * @param cdept + */ + public void clearTasks(PointInterface cdept); + + /** + * Clears all existing tasks for specified show and department + * + * @param show + * @param dept + */ + public void clearTasks(ShowInterface show, DepartmentInterface dept); + + /** + * Updates the min proc value of all jobs that fall within the specified task. + * + * @param TaskDetail the task to sync with + */ + void syncJobsWithTask(TaskEntity t); + + /** + * Updates the min proc value of all jobs that fall within the specified task. + * + * @param TaskDetail the task to sync with + */ + void syncJobsWithTask(DepartmentInterface d, String shot); + + /** + * Updates the min proc value of all jobs that fall within the specified task. + * + * @param TaskDetail the task to sync with + */ + void syncJobsWithTask(JobInterface job); + + /** + * Returns true of the job is managed by a department manager. + * + * @param j + */ + boolean isManaged(JobInterface j); + + /** + * + * @param t + */ + void clearTaskAdjustment(TaskInterface t); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManagerService.java index 5b916ac5e..3453b8126 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManagerService.java @@ -43,240 +43,240 @@ @Transactional public class DepartmentManagerService implements DepartmentManager { - @Autowired - private Environment env; - - private PointDao pointDao; - private TaskDao taskDao; - private ShowDao showDao; - private JobDao jobDao; - - @Override - public void createDepartmentConfig(PointDetail renderPoint) { - pointDao.insertPointConf(renderPoint); - } - - @Override - public boolean departmentConfigExists(ShowInterface show, DepartmentInterface dept) { - return pointDao.pointConfExists(show, dept); - } - - @Override - public void createTask(TaskEntity t) { - taskDao.insertTask(t); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public TaskEntity getTaskDetail(String id) { - return taskDao.getTaskDetail(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public PointDetail getDepartmentConfigDetail(String id) { - return pointDao.getPointConfDetail(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public PointDetail getDepartmentConfigDetail(ShowInterface show, DepartmentInterface dept) { - return pointDao.getPointConfigDetail(show, dept); - } - - @Override - public void removeTask(TaskInterface t) { - taskDao.deleteTask(t); - } - - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void setMinCores(TaskInterface t, int coreUnits) { - if (taskDao.isManaged(t)) { - taskDao.adjustTaskMinCores(t, coreUnits); - } else { - taskDao.updateTaskMinCores(t, coreUnits); + @Autowired + private Environment env; + + private PointDao pointDao; + private TaskDao taskDao; + private ShowDao showDao; + private JobDao jobDao; + + @Override + public void createDepartmentConfig(PointDetail renderPoint) { + pointDao.insertPointConf(renderPoint); } - this.syncJobsWithTask(taskDao.getTaskDetail(t.getTaskId())); - } - - @Override - public PointInterface createDepartmentConfig(ShowInterface show, DepartmentInterface dept) { - return pointDao.insertPointConf(show, dept); - } - - @Override - public void clearTasks(PointInterface cdept) { - taskDao.deleteTasks(cdept); - } - - @Override - public void clearTasks(ShowInterface show, DepartmentInterface dept) { - taskDao.deleteTasks(show, dept); - } - - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void clearTaskAdjustment(TaskInterface t) { - taskDao.clearTaskAdjustment(t); - } - - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void disableTiManaged(PointInterface cdept) { - pointDao.updateDisableManaged(cdept); - clearTasks(cdept); - } - - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void enableTiManaged(PointInterface p, String tiTask, int cores) { - pointDao.updateEnableManaged(p, tiTask, cores); - updateManagedTasks(p); - } - - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void setManagedCores(PointInterface p, int cores) { - pointDao.updateManagedCores(p, cores); - if (pointDao.isManaged(p, p)) { - updateManagedTasks(p); + + @Override + public boolean departmentConfigExists(ShowInterface show, DepartmentInterface dept) { + return pointDao.pointConfExists(show, dept); } - } - - @Override - public void clearTaskAdjustments(PointInterface cdept) { - taskDao.clearTaskAdjustments(cdept); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List getManagedPointConfs() { - return pointDao.getManagedPointConfs(); - } - - /** - * Any task with one of these as the production status is considered in progress. - */ - private static final Set IN_PROGRESS_TASK_STATUS = new HashSet(); - static { - IN_PROGRESS_TASK_STATUS - .addAll(java.util.Arrays.asList(new String[] {"I/P", "Kicked To", "CBB", "Blocked"})); - } - - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void updateManagedTasks(PointInterface pd) {} - - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void syncJobsWithTask(TaskEntity t) { - - List jobs = jobDao.getJobs(t); - if (jobs.size() == 0) { - return; + + @Override + public void createTask(TaskEntity t) { + taskDao.insertTask(t); } - if (jobs.size() == 1) { - jobDao.updateMinCores(jobs.get(0), t.minCoreUnits); - return; + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public TaskEntity getTaskDetail(String id) { + return taskDao.getTaskDetail(id); } - int core_units_per_job = t.minCoreUnits / (jobs.size() * 100); - int core_units_left_over = (t.minCoreUnits % (jobs.size() * 100) / 100); + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public PointDetail getDepartmentConfigDetail(String id) { + return pointDao.getPointConfDetail(id); + } - /* - * Calculate a base for each job - */ - Map minCores = new HashMap(jobs.size()); - int core_units_unalloc = 0; - - for (JobInterface j : jobs) { - FrameStateTotals totals = jobDao.getFrameStateTotals(j); - if (totals.waiting < core_units_per_job) { - core_units_unalloc = core_units_unalloc + (core_units_per_job - totals.waiting); - minCores.put(j, new Integer[] {totals.waiting, totals.waiting}); - } else { - minCores.put(j, new Integer[] {core_units_per_job, totals.waiting}); - } + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public PointDetail getDepartmentConfigDetail(ShowInterface show, DepartmentInterface dept) { + return pointDao.getPointConfigDetail(show, dept); } - /* - * Apply any left over core units. If the job doesn't have waiting frames to apply them to then - * don't do anything. - */ - core_units_left_over = core_units_left_over + core_units_unalloc; - while (core_units_left_over > 0) { - boolean applied = false; - for (JobInterface j : jobs) { - if (core_units_left_over < 1) { - break; + @Override + public void removeTask(TaskInterface t) { + taskDao.deleteTask(t); + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void setMinCores(TaskInterface t, int coreUnits) { + if (taskDao.isManaged(t)) { + taskDao.adjustTaskMinCores(t, coreUnits); + } else { + taskDao.updateTaskMinCores(t, coreUnits); } - if (minCores.get(j)[1] - minCores.get(j)[0] > 0) { - minCores.get(j)[0] = minCores.get(j)[0] + 1; - core_units_left_over = core_units_left_over - 1; - applied = true; + this.syncJobsWithTask(taskDao.getTaskDetail(t.getTaskId())); + } + + @Override + public PointInterface createDepartmentConfig(ShowInterface show, DepartmentInterface dept) { + return pointDao.insertPointConf(show, dept); + } + + @Override + public void clearTasks(PointInterface cdept) { + taskDao.deleteTasks(cdept); + } + + @Override + public void clearTasks(ShowInterface show, DepartmentInterface dept) { + taskDao.deleteTasks(show, dept); + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void clearTaskAdjustment(TaskInterface t) { + taskDao.clearTaskAdjustment(t); + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void disableTiManaged(PointInterface cdept) { + pointDao.updateDisableManaged(cdept); + clearTasks(cdept); + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void enableTiManaged(PointInterface p, String tiTask, int cores) { + pointDao.updateEnableManaged(p, tiTask, cores); + updateManagedTasks(p); + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void setManagedCores(PointInterface p, int cores) { + pointDao.updateManagedCores(p, cores); + if (pointDao.isManaged(p, p)) { + updateManagedTasks(p); } - } - if (!applied) { - break; - } } - /* - * Update the DB + @Override + public void clearTaskAdjustments(PointInterface cdept) { + taskDao.clearTaskAdjustments(cdept); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getManagedPointConfs() { + return pointDao.getManagedPointConfs(); + } + + /** + * Any task with one of these as the production status is considered in progress. */ - for (JobInterface j : jobs) { - jobDao.updateMinCores(j, minCores.get(j)[0] * 100); + private static final Set IN_PROGRESS_TASK_STATUS = new HashSet(); + static { + IN_PROGRESS_TASK_STATUS.addAll( + java.util.Arrays.asList(new String[] {"I/P", "Kicked To", "CBB", "Blocked"})); + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void updateManagedTasks(PointInterface pd) {} + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void syncJobsWithTask(TaskEntity t) { + + List jobs = jobDao.getJobs(t); + if (jobs.size() == 0) { + return; + } + + if (jobs.size() == 1) { + jobDao.updateMinCores(jobs.get(0), t.minCoreUnits); + return; + } + + int core_units_per_job = t.minCoreUnits / (jobs.size() * 100); + int core_units_left_over = (t.minCoreUnits % (jobs.size() * 100) / 100); + + /* + * Calculate a base for each job + */ + Map minCores = new HashMap(jobs.size()); + int core_units_unalloc = 0; + + for (JobInterface j : jobs) { + FrameStateTotals totals = jobDao.getFrameStateTotals(j); + if (totals.waiting < core_units_per_job) { + core_units_unalloc = core_units_unalloc + (core_units_per_job - totals.waiting); + minCores.put(j, new Integer[] {totals.waiting, totals.waiting}); + } else { + minCores.put(j, new Integer[] {core_units_per_job, totals.waiting}); + } + } + + /* + * Apply any left over core units. If the job doesn't have waiting frames to apply them to + * then don't do anything. + */ + core_units_left_over = core_units_left_over + core_units_unalloc; + while (core_units_left_over > 0) { + boolean applied = false; + for (JobInterface j : jobs) { + if (core_units_left_over < 1) { + break; + } + if (minCores.get(j)[1] - minCores.get(j)[0] > 0) { + minCores.get(j)[0] = minCores.get(j)[0] + 1; + core_units_left_over = core_units_left_over - 1; + applied = true; + } + } + if (!applied) { + break; + } + } + + /* + * Update the DB + */ + for (JobInterface j : jobs) { + jobDao.updateMinCores(j, minCores.get(j)[0] * 100); + } + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void syncJobsWithTask(DepartmentInterface d, String shot) { + syncJobsWithTask(taskDao.getTaskDetail(d, shot)); + } + + @Override + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void syncJobsWithTask(JobInterface job) { + syncJobsWithTask(taskDao.getTaskDetail(job)); + } + + @Override + public boolean isManaged(JobInterface j) { + return taskDao.isManaged(j); + } + + public TaskDao getTaskDao() { + return taskDao; + } + + public void setTaskDao(TaskDao taskDao) { + this.taskDao = taskDao; + } + + public ShowDao getShowDao() { + return showDao; + } + + public void setShowDao(ShowDao showDao) { + this.showDao = showDao; + } + + public PointDao getPointDao() { + return pointDao; + } + + public void setPointDao(PointDao pointDao) { + this.pointDao = pointDao; + } + + public JobDao getJobDao() { + return jobDao; + } + + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; } - } - - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void syncJobsWithTask(DepartmentInterface d, String shot) { - syncJobsWithTask(taskDao.getTaskDetail(d, shot)); - } - - @Override - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void syncJobsWithTask(JobInterface job) { - syncJobsWithTask(taskDao.getTaskDetail(job)); - } - - @Override - public boolean isManaged(JobInterface j) { - return taskDao.isManaged(j); - } - - public TaskDao getTaskDao() { - return taskDao; - } - - public void setTaskDao(TaskDao taskDao) { - this.taskDao = taskDao; - } - - public ShowDao getShowDao() { - return showDao; - } - - public void setShowDao(ShowDao showDao) { - this.showDao = showDao; - } - - public PointDao getPointDao() { - return pointDao; - } - - public void setPointDao(PointDao pointDao) { - this.pointDao = pointDao; - } - - public JobDao getJobDao() { - return jobDao; - } - - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/DependManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/DependManager.java index 54debfbc3..6448a66f5 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/DependManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/DependManager.java @@ -38,164 +38,165 @@ public interface DependManager { - /** - * This just calls createDepend(Dependency depend) with the result of - * buildDepend(BuildableDependency depend). This is mainly for convenience. - * - * @param BuildableDependency depend - */ - void createDepend(BuildableDependency depend); - - List getWhatDependsOn(JobInterface job); - - List getWhatDependsOn(JobInterface job, DependTarget target); - - /** - * Return any dependencies that reference the given frame as the frame to depend on. - * - * @param frame - * @param active To limit results to only active depends, set this to true. To limit results to - * only inactive depends, set this to false. - * @return - */ - List getWhatDependsOn(FrameInterface frame, boolean active); - - List getWhatDependsOn(FrameInterface frame); - - List getWhatDependsOn(LayerInterface layer); - - /** - * Return any dependencies that reference the given layer as the layer to depend on. - * - * @param layer - * @param active To limit results to only active depends, set this to true. To limit results to - * only inactive depends, set this to false. - * @return - */ - List getWhatDependsOn(LayerInterface layer, boolean active); - - LightweightDependency getDepend(String id); - - void satisfyDepend(LightweightDependency depend); - - /** - * Returns a list of depends where the specified job is the depender. Passing a depend target will - * limit the results to either internal or external. This method returns active depends only. - * - * @param Job - * @param DependTarget - * @return List - */ - public List getWhatThisDependsOn(JobInterface job, DependTarget target); - - /** - * Returns a list of depends the layer depends on. Passing in a depend target will limit the - * results to either internal, external or both. This method returns active depends only. - * - * @param Layer - * @return List - */ - public List getWhatThisDependsOn(LayerInterface layer, - DependTarget target); - - /** - * Returns a list of depends the frame depends on. Passing in a depend target will limit the - * results to either internal, external, or both.This method returns active depends only. - * - * @param Frame - * @return List - */ - public List getWhatThisDependsOn(FrameInterface frame, - DependTarget target); - - /** - * Create a JobOnJob depend. - * - * @param depend - */ - void createDepend(JobOnJob depend); - - /** - * Create a JobOnLayer depend - * - * @param depend - */ - void createDepend(JobOnLayer depend); - - /** - * Create a JobOnFrame depend - * - * @param depend - */ - void createDepend(JobOnFrame depend); - - /** - * Create a LayerOnJob depend. - * - * @param depend - */ - void createDepend(LayerOnJob depend); - - /** - * Create a LayerOnLayer depend. - * - * @param depend - */ - void createDepend(LayerOnLayer depend); - - /** - * Create a LayerOnFrame depend. - * - * @param depend - */ - void createDepend(LayerOnFrame depend); - - /** - * Create a FrameOnJob depend. - * - * @param depend - */ - void createDepend(FrameOnJob depend); - - /** - * Create a FrameOnLayer depend. - * - * @param depend - */ - void createDepend(FrameOnLayer depend); - - /** - * Create a FrameOnFrame depend. - * - * @param depend - */ - void createDepend(FrameOnFrame depend); - - /** - * Create a FrameByFrame depend. - * - * @param depend - */ - void createDepend(FrameByFrame depend); - - /** - * Creates a previous frame dependency. - * - * @param depend - */ - void createDepend(PreviousFrame depend); - - /** - * Unsatisfy the specified dependency. Currently only works for FrameOnFrame depends. - * - * @param depend - */ - void unsatisfyDepend(LightweightDependency depend); - - /** - * Create a depend of type LayerOnSimFrame - * - * @param depend - */ - void createDepend(LayerOnSimFrame depend); + /** + * This just calls createDepend(Dependency depend) with the result of + * buildDepend(BuildableDependency depend). This is mainly for convenience. + * + * @param BuildableDependency depend + */ + void createDepend(BuildableDependency depend); + + List getWhatDependsOn(JobInterface job); + + List getWhatDependsOn(JobInterface job, DependTarget target); + + /** + * Return any dependencies that reference the given frame as the frame to depend on. + * + * @param frame + * @param active To limit results to only active depends, set this to true. To limit results to + * only inactive depends, set this to false. + * @return + */ + List getWhatDependsOn(FrameInterface frame, boolean active); + + List getWhatDependsOn(FrameInterface frame); + + List getWhatDependsOn(LayerInterface layer); + + /** + * Return any dependencies that reference the given layer as the layer to depend on. + * + * @param layer + * @param active To limit results to only active depends, set this to true. To limit results to + * only inactive depends, set this to false. + * @return + */ + List getWhatDependsOn(LayerInterface layer, boolean active); + + LightweightDependency getDepend(String id); + + void satisfyDepend(LightweightDependency depend); + + /** + * Returns a list of depends where the specified job is the depender. Passing a depend target + * will limit the results to either internal or external. This method returns active depends + * only. + * + * @param Job + * @param DependTarget + * @return List + */ + public List getWhatThisDependsOn(JobInterface job, DependTarget target); + + /** + * Returns a list of depends the layer depends on. Passing in a depend target will limit the + * results to either internal, external or both. This method returns active depends only. + * + * @param Layer + * @return List + */ + public List getWhatThisDependsOn(LayerInterface layer, + DependTarget target); + + /** + * Returns a list of depends the frame depends on. Passing in a depend target will limit the + * results to either internal, external, or both.This method returns active depends only. + * + * @param Frame + * @return List + */ + public List getWhatThisDependsOn(FrameInterface frame, + DependTarget target); + + /** + * Create a JobOnJob depend. + * + * @param depend + */ + void createDepend(JobOnJob depend); + + /** + * Create a JobOnLayer depend + * + * @param depend + */ + void createDepend(JobOnLayer depend); + + /** + * Create a JobOnFrame depend + * + * @param depend + */ + void createDepend(JobOnFrame depend); + + /** + * Create a LayerOnJob depend. + * + * @param depend + */ + void createDepend(LayerOnJob depend); + + /** + * Create a LayerOnLayer depend. + * + * @param depend + */ + void createDepend(LayerOnLayer depend); + + /** + * Create a LayerOnFrame depend. + * + * @param depend + */ + void createDepend(LayerOnFrame depend); + + /** + * Create a FrameOnJob depend. + * + * @param depend + */ + void createDepend(FrameOnJob depend); + + /** + * Create a FrameOnLayer depend. + * + * @param depend + */ + void createDepend(FrameOnLayer depend); + + /** + * Create a FrameOnFrame depend. + * + * @param depend + */ + void createDepend(FrameOnFrame depend); + + /** + * Create a FrameByFrame depend. + * + * @param depend + */ + void createDepend(FrameByFrame depend); + + /** + * Creates a previous frame dependency. + * + * @param depend + */ + void createDepend(PreviousFrame depend); + + /** + * Unsatisfy the specified dependency. Currently only works for FrameOnFrame depends. + * + * @param depend + */ + void unsatisfyDepend(LightweightDependency depend); + + /** + * Create a depend of type LayerOnSimFrame + * + * @param depend + */ + void createDepend(LayerOnSimFrame depend); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/DependManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/DependManagerService.java index 128571ce0..1b62e54c0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/DependManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/DependManagerService.java @@ -60,548 +60,557 @@ @Transactional public class DependManagerService implements DependManager { - private static final Logger logger = LogManager.getLogger(DependManagerService.class); - - private DependDao dependDao; - private JobDao jobDao; - private LayerDao layerDao; - private FrameDao frameDao; - private FrameSearchFactory frameSearchFactory; - - /** Job Depends **/ - @Override - @Transactional(propagation = Propagation.SUPPORTS) - public void createDepend(JobOnJob depend) { - if (jobDao.isJobComplete(depend.getDependOnJob())) { - throw new DependException("The job you are depending on is already complete."); - } - dependDao.insertDepend(depend); - updateDependCount(depend.getDependErJob()); - } - - @Override - @Transactional(propagation = Propagation.SUPPORTS) - public void createDepend(JobOnLayer depend) { - if (layerDao.isLayerComplete(depend.getDependOnLayer())) { - depend.setActive(false); - } - dependDao.insertDepend(depend); - if (depend.isActive()) { - updateDependCount(depend.getDependErJob()); - } - } - - @Override - @Transactional(propagation = Propagation.SUPPORTS) - public void createDepend(JobOnFrame depend) { - if (frameDao.isFrameComplete(depend.getDependOnFrame())) { - depend.setActive(false); - } - dependDao.insertDepend(depend); - if (depend.isActive()) { - updateDependCount(depend.getDependErJob()); - } - } - - /** Layer Depends **/ - @Override - @Transactional(propagation = Propagation.SUPPORTS) - public void createDepend(LayerOnJob depend) { - if (jobDao.isJobComplete(depend.getDependOnJob())) { - throw new DependException("The job you are depending on is already complete."); - } - dependDao.insertDepend(depend); - updateDependCount(depend.getDependErLayer()); - } - - @Override - @Transactional(propagation = Propagation.SUPPORTS) - public void createDepend(LayerOnLayer depend) { - if (layerDao.isLayerComplete(depend.getDependOnLayer())) { - depend.setActive(false); - } - dependDao.insertDepend(depend); - if (depend.isActive()) { - updateDependCount(depend.getDependErLayer()); - } - } - - @Override - @Transactional(propagation = Propagation.SUPPORTS) - public void createDepend(LayerOnFrame depend) { - if (frameDao.isFrameComplete(depend.getDependOnFrame())) { - depend.setActive(false); - } - dependDao.insertDepend(depend); - if (depend.isActive()) { - updateDependCount(depend.getDependErLayer()); - } - } - - /** Frame Depends **/ - @Override - @Transactional(propagation = Propagation.SUPPORTS) - public void createDepend(FrameOnJob depend) { - if (jobDao.isJobComplete(depend.getDependOnJob())) { - throw new DependException("The job you are depending on is already complete."); - } - dependDao.insertDepend(depend); - if (depend.isActive()) { - updateDependCounts(depend.getDependErFrame()); - } - } - - @Override - @Transactional(propagation = Propagation.SUPPORTS) - public void createDepend(FrameOnLayer depend) { - if (layerDao.isLayerComplete(depend.getDependOnLayer())) { - depend.setActive(false); - } - dependDao.insertDepend(depend); - if (depend.isActive()) { - updateDependCounts(depend.getDependErFrame()); - } - } - - @Override - @Transactional(propagation = Propagation.SUPPORTS) - public void createDepend(FrameOnFrame depend) { - if (frameDao.isFrameComplete(depend.getDependOnFrame())) { - depend.setActive(false); - } - dependDao.insertDepend(depend); - if (depend.isActive()) { - updateDependCounts(depend.getDependErFrame()); - } - } - - @Override - @Transactional(propagation = Propagation.SUPPORTS) - public void createDepend(LayerOnSimFrame depend) { - - /* - * Need the frame range to make all the dependencies. - */ - LayerDetail dependErLayer = layerDao.getLayerDetail(depend.getDependErLayer().getLayerId()); - - /* - * A normalized list of frames. - */ - List dependErFrameSet = - CueUtil.normalizeFrameRange(dependErLayer.range, dependErLayer.chunkSize); - - int dependErFrameSetSize = dependErFrameSet.size(); - for (int idx = 0; idx < dependErFrameSetSize; idx = idx + 1) { - /* - * Lookup the frame we need out of our depend-er layer. - */ - int frameNum = dependErFrameSet.get(idx); - - FrameInterface dependErFrame = frameDao.findFrame(dependErLayer, frameNum); - FrameOnFrame fofDepend = new FrameOnFrame(dependErFrame, depend.getDependOnFrame()); - createDepend(fofDepend); - } - } - - @Override - @Transactional(propagation = Propagation.SUPPORTS) - public void createDepend(FrameByFrame depend) { - - /* - * Obtain the full layer record so we have access to the frame range and other properties. - */ - LayerDetail dependErLayer = layerDao.getLayerDetail(depend.getDependErLayer().getLayerId()); - - LayerDetail dependOnLayer = layerDao.getLayerDetail(depend.getDependOnLayer().getLayerId()); - - /* - * Do not create external dependencies on tile layers. - */ - if (depend.getTarget().equals(DependTarget.EXTERNAL) - && dependOnLayer.getName().contains("_tile_")) { - return; - } - - /* - * Please note. The job frame ranges are not normalized in any way, there is going to be - * duplicates. (why a "Set" would allow dups is unknown). Anyways, When iterating over these - * frame sets, you must do so by chunk size and ignore duplicate frames. - */ - - List dependErFrameSet = - CueUtil.normalizeFrameRange(dependErLayer.range, dependErLayer.chunkSize); - - List dependOnFrameSet = - CueUtil.normalizeFrameRange(dependOnLayer.range, dependOnLayer.chunkSize); - - /* - * When a layer is chunked so large it contains only a single frame, any FrameByFrame depends - * to/from that that layer are converted to LayerOnLayer depends. - */ - if ((dependOnFrameSet.size() == 1 && dependOnLayer.chunkSize > 1) - || (dependErFrameSet.size() == 1 && dependErLayer.chunkSize > 1)) { - - LayerOnLayer lolDepend = - new LayerOnLayer(depend.getDependErLayer(), depend.getDependOnLayer()); - - createDepend(lolDepend); - depend.setId(lolDepend.getId()); - return; - } - - /* - * Create the parent depends. - */ - try { - dependDao.insertDepend(depend); - } catch (DataIntegrityViolationException e) { - LightweightDependency originalDep = dependDao.getDependBySignature(depend.getSignature()); - depend.setId(originalDep.getId()); - if (!depend.isActive()) { - unsatisfyDepend(originalDep); - } else { - return; - } - } - - int dependErFrameSetSize = dependErFrameSet.size(); - for (int idx = 0; idx < dependErFrameSetSize; idx = idx + 1) { - - Set dependOnFrames = new HashSet(dependOnFrameSet.size()); - - int dependErFrameNum = dependErFrameSet.get(idx); - /* The frame always depends on the corresponding frame. */ - int dependOnFrameNum = dependErFrameNum; - - /* - * Finds any additional frames the dependErFrame might need to depend on. - */ - if (dependOnLayer.chunkSize > dependErLayer.chunkSize) { - dependOnFrameNum = CueUtil.findChunk(dependOnFrameSet, dependErFrameNum); - dependOnFrames.add(dependOnFrameNum); - } else if (dependOnLayer.chunkSize < dependErLayer.chunkSize) { - dependOnFrameNum = CueUtil.findChunk(dependOnFrameSet, dependErFrameNum); - dependOnFrames.add(dependOnFrameNum); - - for (int i = 0; i <= dependErLayer.chunkSize - dependOnLayer.chunkSize; i++) { - int nextFrameIdx = dependOnFrameSet.indexOf(dependOnFrameNum) + i; - try { - dependOnFrames.add(dependOnFrameSet.get(nextFrameIdx)); - } catch (java.lang.IndexOutOfBoundsException e) { - continue; - } + private static final Logger logger = LogManager.getLogger(DependManagerService.class); + + private DependDao dependDao; + private JobDao jobDao; + private LayerDao layerDao; + private FrameDao frameDao; + private FrameSearchFactory frameSearchFactory; + + /** Job Depends **/ + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(JobOnJob depend) { + if (jobDao.isJobComplete(depend.getDependOnJob())) { + throw new DependException("The job you are depending on is already complete."); } - } else if (!dependErFrameSet.equals(dependOnFrameSet)) { - if (dependOnFrameSet.contains(dependErFrameNum)) { - dependOnFrames.add(dependErFrameNum); - } else { - continue; + dependDao.insertDepend(depend); + updateDependCount(depend.getDependErJob()); + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(JobOnLayer depend) { + if (layerDao.isLayerComplete(depend.getDependOnLayer())) { + depend.setActive(false); + } + dependDao.insertDepend(depend); + if (depend.isActive()) { + updateDependCount(depend.getDependErJob()); + } + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(JobOnFrame depend) { + if (frameDao.isFrameComplete(depend.getDependOnFrame())) { + depend.setActive(false); + } + dependDao.insertDepend(depend); + if (depend.isActive()) { + updateDependCount(depend.getDependErJob()); + } + } + + /** Layer Depends **/ + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(LayerOnJob depend) { + if (jobDao.isJobComplete(depend.getDependOnJob())) { + throw new DependException("The job you are depending on is already complete."); + } + dependDao.insertDepend(depend); + updateDependCount(depend.getDependErLayer()); + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(LayerOnLayer depend) { + if (layerDao.isLayerComplete(depend.getDependOnLayer())) { + depend.setActive(false); + } + dependDao.insertDepend(depend); + if (depend.isActive()) { + updateDependCount(depend.getDependErLayer()); + } + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(LayerOnFrame depend) { + if (frameDao.isFrameComplete(depend.getDependOnFrame())) { + depend.setActive(false); + } + dependDao.insertDepend(depend); + if (depend.isActive()) { + updateDependCount(depend.getDependErLayer()); + } + } + + /** Frame Depends **/ + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(FrameOnJob depend) { + if (jobDao.isJobComplete(depend.getDependOnJob())) { + throw new DependException("The job you are depending on is already complete."); + } + dependDao.insertDepend(depend); + if (depend.isActive()) { + updateDependCounts(depend.getDependErFrame()); + } + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(FrameOnLayer depend) { + if (layerDao.isLayerComplete(depend.getDependOnLayer())) { + depend.setActive(false); + } + dependDao.insertDepend(depend); + if (depend.isActive()) { + updateDependCounts(depend.getDependErFrame()); + } + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(FrameOnFrame depend) { + if (frameDao.isFrameComplete(depend.getDependOnFrame())) { + depend.setActive(false); + } + dependDao.insertDepend(depend); + if (depend.isActive()) { + updateDependCounts(depend.getDependErFrame()); + } + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(LayerOnSimFrame depend) { + + /* + * Need the frame range to make all the dependencies. + */ + LayerDetail dependErLayer = layerDao.getLayerDetail(depend.getDependErLayer().getLayerId()); + + /* + * A normalized list of frames. + */ + List dependErFrameSet = + CueUtil.normalizeFrameRange(dependErLayer.range, dependErLayer.chunkSize); + + int dependErFrameSetSize = dependErFrameSet.size(); + for (int idx = 0; idx < dependErFrameSetSize; idx = idx + 1) { + /* + * Lookup the frame we need out of our depend-er layer. + */ + int frameNum = dependErFrameSet.get(idx); + + FrameInterface dependErFrame = frameDao.findFrame(dependErLayer, frameNum); + FrameOnFrame fofDepend = new FrameOnFrame(dependErFrame, depend.getDependOnFrame()); + createDepend(fofDepend); + } + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(FrameByFrame depend) { + + /* + * Obtain the full layer record so we have access to the frame range and other properties. + */ + LayerDetail dependErLayer = layerDao.getLayerDetail(depend.getDependErLayer().getLayerId()); + + LayerDetail dependOnLayer = layerDao.getLayerDetail(depend.getDependOnLayer().getLayerId()); + + /* + * Do not create external dependencies on tile layers. + */ + if (depend.getTarget().equals(DependTarget.EXTERNAL) + && dependOnLayer.getName().contains("_tile_")) { + return; + } + + /* + * Please note. The job frame ranges are not normalized in any way, there is going to be + * duplicates. (why a "Set" would allow dups is unknown). Anyways, When iterating over these + * frame sets, you must do so by chunk size and ignore duplicate frames. + */ + + List dependErFrameSet = + CueUtil.normalizeFrameRange(dependErLayer.range, dependErLayer.chunkSize); + + List dependOnFrameSet = + CueUtil.normalizeFrameRange(dependOnLayer.range, dependOnLayer.chunkSize); + + /* + * When a layer is chunked so large it contains only a single frame, any FrameByFrame + * depends to/from that that layer are converted to LayerOnLayer depends. + */ + if ((dependOnFrameSet.size() == 1 && dependOnLayer.chunkSize > 1) + || (dependErFrameSet.size() == 1 && dependErLayer.chunkSize > 1)) { + + LayerOnLayer lolDepend = + new LayerOnLayer(depend.getDependErLayer(), depend.getDependOnLayer()); + + createDepend(lolDepend); + depend.setId(lolDepend.getId()); + return; + } + + /* + * Create the parent depends. + */ + try { + dependDao.insertDepend(depend); + } catch (DataIntegrityViolationException e) { + LightweightDependency originalDep = + dependDao.getDependBySignature(depend.getSignature()); + depend.setId(originalDep.getId()); + if (!depend.isActive()) { + unsatisfyDepend(originalDep); + } else { + return; + } + } + + int dependErFrameSetSize = dependErFrameSet.size(); + for (int idx = 0; idx < dependErFrameSetSize; idx = idx + 1) { + + Set dependOnFrames = new HashSet(dependOnFrameSet.size()); + + int dependErFrameNum = dependErFrameSet.get(idx); + /* The frame always depends on the corresponding frame. */ + int dependOnFrameNum = dependErFrameNum; + + /* + * Finds any additional frames the dependErFrame might need to depend on. + */ + if (dependOnLayer.chunkSize > dependErLayer.chunkSize) { + dependOnFrameNum = CueUtil.findChunk(dependOnFrameSet, dependErFrameNum); + dependOnFrames.add(dependOnFrameNum); + } else if (dependOnLayer.chunkSize < dependErLayer.chunkSize) { + dependOnFrameNum = CueUtil.findChunk(dependOnFrameSet, dependErFrameNum); + dependOnFrames.add(dependOnFrameNum); + + for (int i = 0; i <= dependErLayer.chunkSize - dependOnLayer.chunkSize; i++) { + int nextFrameIdx = dependOnFrameSet.indexOf(dependOnFrameNum) + i; + try { + dependOnFrames.add(dependOnFrameSet.get(nextFrameIdx)); + } catch (java.lang.IndexOutOfBoundsException e) { + continue; + } + } + } else if (!dependErFrameSet.equals(dependOnFrameSet)) { + if (dependOnFrameSet.contains(dependErFrameNum)) { + dependOnFrames.add(dependErFrameNum); + } else { + continue; + } + } else { + dependOnFrames.add(dependErFrameNum); + } + + /* + * Now we can finally start adding child dependencies. + */ + try { + FrameInterface dependErFrame = frameDao.findFrame(dependErLayer, dependErFrameNum); + for (int frameNum : dependOnFrames) { + FrameInterface dependOnFrame = frameDao.findFrame(dependOnLayer, frameNum); + FrameOnFrame fofDepend = new FrameOnFrame(dependErFrame, dependOnFrame, depend); + + createDepend(fofDepend); + + } + } catch (DataRetrievalFailureException dre) { + logger.warn("failed to create frame by frame depend, " + + "part of frame on frame depend: " + depend.getId() + " reason: " + dre); + } + } + } + + @Override + public void createDepend(PreviousFrame depend) { + + /* + * Obtain the full layer record so we have access to the frame range and other properties. + */ + LayerDetail dependErLayer = layerDao.getLayerDetail(depend.getDependErLayer().getLayerId()); + + LayerDetail dependOnLayer = layerDao.getLayerDetail(depend.getDependOnLayer().getLayerId()); + + FrameSet dependErFrameSet = new FrameSet(dependErLayer.range); + FrameSet dependOnFrameSet = new FrameSet(dependOnLayer.range); + + dependDao.insertDepend(depend); + int dependErFrameSetSize = dependErFrameSet.size(); + for (int idx = 1; idx < dependErFrameSetSize; idx = idx + 1) { + + try { + FrameInterface dependErFrame = + frameDao.findFrame(dependErLayer, dependErFrameSet.get(idx)); + + FrameInterface dependOnFrame = + frameDao.findFrame(dependOnLayer, dependOnFrameSet.get(idx - 1)); + + createDepend(new FrameOnFrame(dependErFrame, dependOnFrame, depend)); + } catch (DataRetrievalFailureException dre) { + logger.warn("failed to create frame by frame depend, " + + "part of a previous frame depend: " + depend.getId() + " reason: " + dre); + } + } + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void createDepend(BuildableDependency depend) { + + JobInterface onJob = null; + JobInterface erJob = null; + + try { + onJob = jobDao.findJob(depend.getDependOnJobName()); + erJob = jobDao.findJob(depend.getDependErJobName()); + } catch (Exception e) { + throw new DependencyManagerException("failed to setup new dependency: " + + depend.getType().toString() + ", was unable to find job info for " + + depend.getDependOnJobName() + " or " + depend.getDependErJobName() + "," + e); } - } else { - dependOnFrames.add(dependErFrameNum); - } - /* - * Now we can finally start adding child dependencies. - */ - try { - FrameInterface dependErFrame = frameDao.findFrame(dependErLayer, dependErFrameNum); - for (int frameNum : dependOnFrames) { - FrameInterface dependOnFrame = frameDao.findFrame(dependOnLayer, frameNum); - FrameOnFrame fofDepend = new FrameOnFrame(dependErFrame, dependOnFrame, depend); + switch (depend.getType()) { + + case FRAME_BY_FRAME: + createDepend( + new FrameByFrame(layerDao.findLayer(erJob, depend.getDependErLayerName()), + layerDao.findLayer(onJob, depend.getDependOnLayerName()))); + break; + + case JOB_ON_JOB: + createDepend(new JobOnJob(erJob, onJob)); + break; + + case JOB_ON_LAYER: + createDepend(new JobOnLayer(erJob, + layerDao.findLayer(onJob, depend.getDependOnLayerName()))); + break; + + case JOB_ON_FRAME: + createDepend(new JobOnFrame(erJob, + frameDao.findFrame(onJob, depend.getDependOnFrameName()))); + break; + + case LAYER_ON_JOB: + createDepend(new LayerOnJob( + layerDao.findLayer(erJob, depend.getDependErLayerName()), onJob)); + break; + + case LAYER_ON_LAYER: + LayerOnLayer lol = + new LayerOnLayer(layerDao.findLayer(erJob, depend.getDependErLayerName()), + layerDao.findLayer(onJob, depend.getDependOnLayerName())); + lol.setAnyFrame(depend.anyFrame); + createDepend(lol); + break; + + case LAYER_ON_FRAME: + createDepend( + new LayerOnFrame(layerDao.findLayer(erJob, depend.getDependErLayerName()), + frameDao.findFrame(onJob, depend.getDependOnLayerName()))); + break; + + case FRAME_ON_JOB: + createDepend(new FrameOnJob( + frameDao.findFrame(erJob, depend.getDependErFrameName()), onJob)); + break; + + case FRAME_ON_LAYER: + createDepend( + new FrameOnLayer(frameDao.findFrame(erJob, depend.getDependErFrameName()), + layerDao.findLayer(onJob, depend.getDependOnLayerName()))); + break; + + case FRAME_ON_FRAME: + createDepend( + new FrameOnFrame(frameDao.findFrame(erJob, depend.getDependErFrameName()), + frameDao.findFrame(onJob, depend.getDependOnFrameName()))); + break; + + case PREVIOUS_FRAME: + createDepend( + new PreviousFrame(layerDao.findLayer(erJob, depend.getDependErLayerName()), + layerDao.findLayer(onJob, depend.getDependOnLayerName()))); + break; + + case LAYER_ON_SIM_FRAME: + createDepend(new LayerOnSimFrame( + layerDao.findLayer(erJob, depend.getDependErLayerName()), + frameDao.findFrame(onJob, depend.getDependOnFrameName()))); + break; + } + } - createDepend(fofDepend); + private void updateDependCount(LayerInterface layer) { + FrameSearchInterface r = frameSearchFactory.create(layer); + for (FrameInterface f : frameDao.findFrames(r)) { + updateDependCounts(f); + } + } + private void updateDependCount(JobInterface job) { + FrameSearchInterface r = frameSearchFactory.create(job); + for (FrameInterface f : frameDao.findFrames(r)) { + updateDependCounts(f); } - } catch (DataRetrievalFailureException dre) { - logger.warn("failed to create frame by frame depend, " + "part of frame on frame depend: " - + depend.getId() + " reason: " + dre); - } - } - } - - @Override - public void createDepend(PreviousFrame depend) { - - /* - * Obtain the full layer record so we have access to the frame range and other properties. - */ - LayerDetail dependErLayer = layerDao.getLayerDetail(depend.getDependErLayer().getLayerId()); - - LayerDetail dependOnLayer = layerDao.getLayerDetail(depend.getDependOnLayer().getLayerId()); - - FrameSet dependErFrameSet = new FrameSet(dependErLayer.range); - FrameSet dependOnFrameSet = new FrameSet(dependOnLayer.range); - - dependDao.insertDepend(depend); - int dependErFrameSetSize = dependErFrameSet.size(); - for (int idx = 1; idx < dependErFrameSetSize; idx = idx + 1) { - - try { - FrameInterface dependErFrame = frameDao.findFrame(dependErLayer, dependErFrameSet.get(idx)); + } - FrameInterface dependOnFrame = - frameDao.findFrame(dependOnLayer, dependOnFrameSet.get(idx - 1)); - - createDepend(new FrameOnFrame(dependErFrame, dependOnFrame, depend)); - } catch (DataRetrievalFailureException dre) { - logger.warn("failed to create frame by frame depend, " + "part of a previous frame depend: " - + depend.getId() + " reason: " + dre); - } - } - } + private void updateDependCounts(FrameInterface f) { + dependDao.incrementDependCount(f); + } - @Override - @Transactional(propagation = Propagation.SUPPORTS) - public void createDepend(BuildableDependency depend) { - - JobInterface onJob = null; - JobInterface erJob = null; - - try { - onJob = jobDao.findJob(depend.getDependOnJobName()); - erJob = jobDao.findJob(depend.getDependErJobName()); - } catch (Exception e) { - throw new DependencyManagerException("failed to setup new dependency: " - + depend.getType().toString() + ", was unable to find job info for " - + depend.getDependOnJobName() + " or " + depend.getDependErJobName() + "," + e); + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public LightweightDependency getDepend(String id) { + return dependDao.getDepend(id); } - switch (depend.getType()) { - - case FRAME_BY_FRAME: - createDepend(new FrameByFrame(layerDao.findLayer(erJob, depend.getDependErLayerName()), - layerDao.findLayer(onJob, depend.getDependOnLayerName()))); - break; - - case JOB_ON_JOB: - createDepend(new JobOnJob(erJob, onJob)); - break; + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void unsatisfyDepend(LightweightDependency depend) { + + // Currently only handles FrameOnFrame and LayerOnLayer. + if (dependDao.setActive(depend)) { + + switch (depend.type) { + + case FRAME_ON_FRAME: + FrameInterface frame = frameDao.getFrame(depend.dependErFrameId); + updateDependCounts(frame); + break; + + case LAYER_ON_LAYER: + updateDependCount(layerDao.getLayer(depend.dependErLayerId)); + break; + } + } + } - case JOB_ON_LAYER: - createDepend( - new JobOnLayer(erJob, layerDao.findLayer(onJob, depend.getDependOnLayerName()))); - break; - - case JOB_ON_FRAME: - createDepend( - new JobOnFrame(erJob, frameDao.findFrame(onJob, depend.getDependOnFrameName()))); - break; - - case LAYER_ON_JOB: - createDepend( - new LayerOnJob(layerDao.findLayer(erJob, depend.getDependErLayerName()), onJob)); - break; - - case LAYER_ON_LAYER: - LayerOnLayer lol = - new LayerOnLayer(layerDao.findLayer(erJob, depend.getDependErLayerName()), - layerDao.findLayer(onJob, depend.getDependOnLayerName())); - lol.setAnyFrame(depend.anyFrame); - createDepend(lol); - break; - - case LAYER_ON_FRAME: - createDepend(new LayerOnFrame(layerDao.findLayer(erJob, depend.getDependErLayerName()), - frameDao.findFrame(onJob, depend.getDependOnLayerName()))); - break; - - case FRAME_ON_JOB: - createDepend( - new FrameOnJob(frameDao.findFrame(erJob, depend.getDependErFrameName()), onJob)); - break; - - case FRAME_ON_LAYER: - createDepend(new FrameOnLayer(frameDao.findFrame(erJob, depend.getDependErFrameName()), - layerDao.findLayer(onJob, depend.getDependOnLayerName()))); - break; - - case FRAME_ON_FRAME: - createDepend(new FrameOnFrame(frameDao.findFrame(erJob, depend.getDependErFrameName()), - frameDao.findFrame(onJob, depend.getDependOnFrameName()))); - break; - - case PREVIOUS_FRAME: - createDepend(new PreviousFrame(layerDao.findLayer(erJob, depend.getDependErLayerName()), - layerDao.findLayer(onJob, depend.getDependOnLayerName()))); - break; - - case LAYER_ON_SIM_FRAME: - createDepend(new LayerOnSimFrame(layerDao.findLayer(erJob, depend.getDependErLayerName()), - frameDao.findFrame(onJob, depend.getDependOnFrameName()))); - break; - } - } - - private void updateDependCount(LayerInterface layer) { - FrameSearchInterface r = frameSearchFactory.create(layer); - for (FrameInterface f : frameDao.findFrames(r)) { - updateDependCounts(f); - } - } - - private void updateDependCount(JobInterface job) { - FrameSearchInterface r = frameSearchFactory.create(job); - for (FrameInterface f : frameDao.findFrames(r)) { - updateDependCounts(f); - } - } - - private void updateDependCounts(FrameInterface f) { - dependDao.incrementDependCount(f); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public LightweightDependency getDepend(String id) { - return dependDao.getDepend(id); - } - - @Override - @Transactional(propagation = Propagation.SUPPORTS) - public void unsatisfyDepend(LightweightDependency depend) { - - // Currently only handles FrameOnFrame and LayerOnLayer. - if (dependDao.setActive(depend)) { - - switch (depend.type) { - - case FRAME_ON_FRAME: - FrameInterface frame = frameDao.getFrame(depend.dependErFrameId); - updateDependCounts(frame); - break; - - case LAYER_ON_LAYER: - updateDependCount(layerDao.getLayer(depend.dependErLayerId)); - break; - } - } - } - - @Transactional(propagation = Propagation.SUPPORTS) - public void satisfyDepend(LightweightDependency depend) { - /* - * Before setting the depend to in-active, obtain a list of frames and decrement the depend - * count on them. - */ - if (DependType.FRAME_BY_FRAME.equals(depend.type)) { - List children = dependDao.getChildDepends(depend); - - for (LightweightDependency lwd : children) { - satisfyDepend(lwd); - } - return; - } - - /* - * Only decrement the depend counts if the depend is actually set to inactive. - */ - if (dependDao.setInactive(depend)) { - logger.info("satisfied depend: " + depend.getId()); - for (FrameInterface f : frameDao.getDependentFrames(depend)) { - if (!dependDao.decrementDependCount(f)) { - logger.warn("warning, depend count for " + depend.getId() + "was not decremented " - + "for frame " + f + "because the count is " + "already 0."); + @Transactional(propagation = Propagation.SUPPORTS) + public void satisfyDepend(LightweightDependency depend) { + /* + * Before setting the depend to in-active, obtain a list of frames and decrement the depend + * count on them. + */ + if (DependType.FRAME_BY_FRAME.equals(depend.type)) { + List children = dependDao.getChildDepends(depend); + + for (LightweightDependency lwd : children) { + satisfyDepend(lwd); + } + return; } - } - } - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List getWhatThisDependsOn(JobInterface job, DependTarget target) { - return dependDao.getWhatThisDependsOn(job, target); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List getWhatThisDependsOn(LayerInterface layer, - DependTarget target) { - return dependDao.getWhatThisDependsOn(layer, target); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List getWhatThisDependsOn(FrameInterface frame, - DependTarget target) { - return dependDao.getWhatThisDependsOn(frame, target); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public LightweightDependency getCurrentDepend(String id) { - return dependDao.getDepend(id); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List getWhatDependsOn(JobInterface job) { - return dependDao.getWhatDependsOn(job); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List getWhatDependsOn(JobInterface job, DependTarget target) { - return dependDao.getWhatDependsOn(job, target); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List getWhatDependsOn(FrameInterface frame) { - return dependDao.getWhatDependsOn(frame); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List getWhatDependsOn(FrameInterface frame, boolean active) { - return dependDao.getWhatDependsOn(frame, active); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List getWhatDependsOn(LayerInterface layer) { - return dependDao.getWhatDependsOn(layer); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List getWhatDependsOn(LayerInterface layer, boolean active) { - return dependDao.getWhatDependsOn(layer, active); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void deleteDepend(LightweightDependency depend) { - dependDao.deleteDepend(depend); - } - - public FrameDao getFrameDao() { - return frameDao; - } - - public void setFrameDao(FrameDao frameDao) { - this.frameDao = frameDao; - } - - public JobDao getJobDao() { - return jobDao; - } - - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } - - public LayerDao getLayerDao() { - return layerDao; - } - - public void setLayerDao(LayerDao layerDao) { - this.layerDao = layerDao; - } - - public DependDao getDependDao() { - return dependDao; - } - - public void setDependDao(DependDao workDao) { - this.dependDao = workDao; - } - - public FrameSearchFactory getFrameSearchFactory() { - return frameSearchFactory; - } - - public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { - this.frameSearchFactory = frameSearchFactory; - } + + /* + * Only decrement the depend counts if the depend is actually set to inactive. + */ + if (dependDao.setInactive(depend)) { + logger.info("satisfied depend: " + depend.getId()); + for (FrameInterface f : frameDao.getDependentFrames(depend)) { + if (!dependDao.decrementDependCount(f)) { + logger.warn( + "warning, depend count for " + depend.getId() + "was not decremented " + + "for frame " + f + "because the count is " + "already 0."); + } + } + } + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getWhatThisDependsOn(JobInterface job, DependTarget target) { + return dependDao.getWhatThisDependsOn(job, target); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getWhatThisDependsOn(LayerInterface layer, + DependTarget target) { + return dependDao.getWhatThisDependsOn(layer, target); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getWhatThisDependsOn(FrameInterface frame, + DependTarget target) { + return dependDao.getWhatThisDependsOn(frame, target); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public LightweightDependency getCurrentDepend(String id) { + return dependDao.getDepend(id); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getWhatDependsOn(JobInterface job) { + return dependDao.getWhatDependsOn(job); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getWhatDependsOn(JobInterface job, DependTarget target) { + return dependDao.getWhatDependsOn(job, target); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getWhatDependsOn(FrameInterface frame) { + return dependDao.getWhatDependsOn(frame); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getWhatDependsOn(FrameInterface frame, boolean active) { + return dependDao.getWhatDependsOn(frame, active); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getWhatDependsOn(LayerInterface layer) { + return dependDao.getWhatDependsOn(layer); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getWhatDependsOn(LayerInterface layer, boolean active) { + return dependDao.getWhatDependsOn(layer, active); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void deleteDepend(LightweightDependency depend) { + dependDao.deleteDepend(depend); + } + + public FrameDao getFrameDao() { + return frameDao; + } + + public void setFrameDao(FrameDao frameDao) { + this.frameDao = frameDao; + } + + public JobDao getJobDao() { + return jobDao; + } + + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } + + public LayerDao getLayerDao() { + return layerDao; + } + + public void setLayerDao(LayerDao layerDao) { + this.layerDao = layerDao; + } + + public DependDao getDependDao() { + return dependDao; + } + + public void setDependDao(DependDao workDao) { + this.dependDao = workDao; + } + + public FrameSearchFactory getFrameSearchFactory() { + return frameSearchFactory; + } + + public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { + this.frameSearchFactory = frameSearchFactory; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/EmailSupport.java b/cuebot/src/main/java/com/imageworks/spcue/service/EmailSupport.java index 67a11d712..211fdaf48 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/EmailSupport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/EmailSupport.java @@ -52,277 +52,277 @@ public class EmailSupport { - private MailSender mailSender; - private JobManager jobManager; - private String emailDomain; - private String emailFromAddress; - private String[] emailCcAddresses; - - private Map imageMap; - - private static final Logger logger = LogManager.getLogger(EmailSupport.class); - - @Autowired - public EmailSupport(Environment env) { - this.emailDomain = env.getProperty("email.domain", "opencue.io"); - this.emailFromAddress = env.getProperty("email.from.address", "opencue-noreply@opencue.io"); - this.emailCcAddresses = env.getProperty("email.cc.addresses", "").split(","); - } - - private static void loadImage(Map map, String path) { - InputStream is = null; - ByteArrayOutputStream os = null; - try { - // Try loading as classpath resource - is = EmailSupport.class.getResourceAsStream("/public/" + path); - - // Try loading as file (sbt-pack layout) - if (is == null) { - try { - is = new FileInputStream("public/" + path); - } catch (FileNotFoundException fnfe) { - // do nothing - } - } + private MailSender mailSender; + private JobManager jobManager; + private String emailDomain; + private String emailFromAddress; + private String[] emailCcAddresses; - // Try loading as file (unit tests don't have image paths loaded into classpath) - if (is == null) { - try { - is = new FileInputStream("conf/webapp/html/" + path); - } catch (FileNotFoundException fnfe) { - // do nothing - } - } - - // If neither loaded, throw an exception - if (is == null) { - throw new IOException("Unable to load"); - } - - // Read contents to byte array - os = new ByteArrayOutputStream(); - byte[] buffer = new byte[1024]; - int len; - while ((len = is.read(buffer)) != -1) { - os.write(buffer, 0, len); - } - byte[] data = os.toByteArray(); - - // Put in map - map.put(path, data); - } catch (IOException ioe) { - logger.error("Unable to read " + path, ioe); - } finally { - - // Close streams - if (os != null) { - try { - os.close(); - } catch (IOException ioe) { - logger.error("Unable to close buffer for " + path, ioe); - } - } - if (is != null) { + private Map imageMap; + + private static final Logger logger = LogManager.getLogger(EmailSupport.class); + + @Autowired + public EmailSupport(Environment env) { + this.emailDomain = env.getProperty("email.domain", "opencue.io"); + this.emailFromAddress = env.getProperty("email.from.address", "opencue-noreply@opencue.io"); + this.emailCcAddresses = env.getProperty("email.cc.addresses", "").split(","); + } + + private static void loadImage(Map map, String path) { + InputStream is = null; + ByteArrayOutputStream os = null; try { - is.close(); + // Try loading as classpath resource + is = EmailSupport.class.getResourceAsStream("/public/" + path); + + // Try loading as file (sbt-pack layout) + if (is == null) { + try { + is = new FileInputStream("public/" + path); + } catch (FileNotFoundException fnfe) { + // do nothing + } + } + + // Try loading as file (unit tests don't have image paths loaded into classpath) + if (is == null) { + try { + is = new FileInputStream("conf/webapp/html/" + path); + } catch (FileNotFoundException fnfe) { + // do nothing + } + } + + // If neither loaded, throw an exception + if (is == null) { + throw new IOException("Unable to load"); + } + + // Read contents to byte array + os = new ByteArrayOutputStream(); + byte[] buffer = new byte[1024]; + int len; + while ((len = is.read(buffer)) != -1) { + os.write(buffer, 0, len); + } + byte[] data = os.toByteArray(); + + // Put in map + map.put(path, data); } catch (IOException ioe) { - logger.error("Unable to load " + path, ioe); + logger.error("Unable to read " + path, ioe); + } finally { + + // Close streams + if (os != null) { + try { + os.close(); + } catch (IOException ioe) { + logger.error("Unable to close buffer for " + path, ioe); + } + } + if (is != null) { + try { + is.close(); + } catch (IOException ioe) { + logger.error("Unable to load " + path, ioe); + } + } } - } - } - } - - public void reportLaunchError(JobSpec spec, Throwable t) { - - SimpleMailMessage msg = new SimpleMailMessage(); - msg.setTo(String.format("%s@%s", spec.getUser(), this.emailDomain)); - msg.setFrom(this.emailFromAddress); - msg.setCc(this.emailCcAddresses); - msg.setSubject("Failed to launch OpenCue job."); - - StringBuilder sb = new StringBuilder(131072); - sb.append("This is an automatic message from cuebot that is sent"); - sb.append(" after a queued\njob has failed to launch. This usually"); - sb.append(" occurs if you have made a mistake\nediting an outline"); - sb.append(" script. If you have no idea why you are receiving\nthis"); - sb.append(" message and your jobs are not hitting the cue, please"); - sb.append(" open a\nhelpdesk ticket with the debugging information"); - sb.append(" provided below.\n\n"); - - sb.append("Failed to launch jobs:\n"); - for (BuildableJob job : spec.getJobs()) { - sb.append(job.detail.name); - sb.append("\n"); } - sb.append("\n\n"); - sb.append(new XMLOutputter(Format.getPrettyFormat()).outputString(spec.getDoc())); - sb.append("\n\n"); - sb.append(CueExceptionUtil.getStackTrace(t)); - - String body = sb.toString(); - msg.setText(body); - sendMessage(msg); - } - - public void reportJobComment(JobInterface job, CommentDetail c, String[] emails) { - - SimpleMailMessage msg = new SimpleMailMessage(); - msg.setTo(emails); - msg.setFrom(this.emailFromAddress); - msg.setSubject("New comment on " + job.getName()); - - StringBuilder sb = new StringBuilder(8096); - sb.append("Job: " + job.getName() + "\n"); - sb.append("User: " + c.user + "\n"); - sb.append("Subject: " + c.subject + "\n"); - sb.append("-----------------------------------------\n"); - sb.append(c.message); - - msg.setText(sb.toString()); - sendMessage(msg); - } - - public void sendMessage(SimpleMailMessage message) { - try { - mailSender.send(message); - } catch (MailException ex) { - logger.warn("Failed to send launch failure email, " + ex.getMessage()); + + public void reportLaunchError(JobSpec spec, Throwable t) { + + SimpleMailMessage msg = new SimpleMailMessage(); + msg.setTo(String.format("%s@%s", spec.getUser(), this.emailDomain)); + msg.setFrom(this.emailFromAddress); + msg.setCc(this.emailCcAddresses); + msg.setSubject("Failed to launch OpenCue job."); + + StringBuilder sb = new StringBuilder(131072); + sb.append("This is an automatic message from cuebot that is sent"); + sb.append(" after a queued\njob has failed to launch. This usually"); + sb.append(" occurs if you have made a mistake\nediting an outline"); + sb.append(" script. If you have no idea why you are receiving\nthis"); + sb.append(" message and your jobs are not hitting the cue, please"); + sb.append(" open a\nhelpdesk ticket with the debugging information"); + sb.append(" provided below.\n\n"); + + sb.append("Failed to launch jobs:\n"); + for (BuildableJob job : spec.getJobs()) { + sb.append(job.detail.name); + sb.append("\n"); + } + sb.append("\n\n"); + sb.append(new XMLOutputter(Format.getPrettyFormat()).outputString(spec.getDoc())); + sb.append("\n\n"); + sb.append(CueExceptionUtil.getStackTrace(t)); + + String body = sb.toString(); + msg.setText(body); + sendMessage(msg); } - } - public void sendShutdownEmail(JobInterface job) { + public void reportJobComment(JobInterface job, CommentDetail c, String[] emails) { + + SimpleMailMessage msg = new SimpleMailMessage(); + msg.setTo(emails); + msg.setFrom(this.emailFromAddress); + msg.setSubject("New comment on " + job.getName()); + + StringBuilder sb = new StringBuilder(8096); + sb.append("Job: " + job.getName() + "\n"); + sb.append("User: " + c.user + "\n"); + sb.append("Subject: " + c.subject + "\n"); + sb.append("-----------------------------------------\n"); + sb.append(c.message); - JobDetail d = jobManager.getJobDetail(job.getJobId()); - if (d.email == null) { - return; + msg.setText(sb.toString()); + sendMessage(msg); } - try { - - VelocityEngine ve = new VelocityEngine(); - ve.setProperty("resource.loader", "class"); - ve.setProperty("class.resource.loader.class", - "org.apache.velocity.runtime.resource.loader.ClasspathResourceLoader"); - ve.init(); - - VelocityContext context = new VelocityContext(); - ExecutionSummary exj = jobManager.getExecutionSummary(job); - FrameStateTotals jts = jobManager.getFrameStateTotals(job); - - String status = ""; - if (jts.total != jts.succeeded) { - status = "Failed "; - } else { - status = "Succeeded "; - } - - context.put("jobName", d.name); - context.put("jobStatus", status.toUpperCase()); - context.put("deptName", d.deptName.toUpperCase()); - context.put("showName", d.showName.toUpperCase()); - context.put("totalLayers", d.totalLayers); - context.put("shotName", d.shot.toUpperCase()); - context.put("succeededFrames", jts.succeeded); - context.put("totalFrames", jts.total); - context.put("dependFrames", jts.depend); - context.put("deadFrames", jts.dead); - context.put("waitingFrames", jts.waiting); - context.put("eatenFrames", jts.eaten); - context.put("failedFrames", jts.dead + jts.eaten + jts.waiting); - context.put("checkpointFrames", jts.checkpoint); - context.put("maxRSS", - String.format(Locale.ROOT, "%.1fGB", exj.highMemoryKb / 1024.0 / 1024.0)); - context.put("coreTime", String.format(Locale.ROOT, "%.1f", exj.coreTime / 3600.0)); - - Template t = ve.getTemplate("/conf/webapp/html/email_template.html"); - - List layers = jobManager.getLayerDetails(job); - List layerStats = new ArrayList(layers.size()); - - boolean shouldCreateFile = false; - - Map map = new HashMap(); - loadImage(map, "opencue_logo.png"); - - for (LayerDetail layer : layers) { - if (layer.type.equals(LayerType.RENDER)) { - LayerStats stats = new LayerStats(); - stats.setDetail(layer); - stats.setExecutionSummary(jobManager.getExecutionSummary(layer)); - stats.setFrameStateTotals(jobManager.getFrameStateTotals(layer)); - stats.setThreadStats(jobManager.getThreadStats(layer)); - stats.setOutputs( - jobManager.getLayerOutputs(layer).stream().sorted().collect(Collectors.toList())); - layerStats.add(stats); - if (stats.getOutputs().size() > 3) - shouldCreateFile = true; - if (!layer.services.isEmpty()) - loadImage(map, "services/" + layer.services.toArray()[0] + ".png"); + public void sendMessage(SimpleMailMessage message) { + try { + mailSender.send(message); + } catch (MailException ex) { + logger.warn("Failed to send launch failure email, " + ex.getMessage()); } - } - - imageMap = Collections.unmodifiableMap(map); + } - context.put("layers", layerStats); + public void sendShutdownEmail(JobInterface job) { - StringWriter w = new StringWriter(); - t.merge(context, w); + JobDetail d = jobManager.getJobDetail(job.getJobId()); + if (d.email == null) { + return; + } - String subject = "OpenCue Job " + d.getName(); + try { - subject = status + subject; + VelocityEngine ve = new VelocityEngine(); + ve.setProperty("resource.loader", "class"); + ve.setProperty("class.resource.loader.class", + "org.apache.velocity.runtime.resource.loader.ClasspathResourceLoader"); + ve.init(); + + VelocityContext context = new VelocityContext(); + ExecutionSummary exj = jobManager.getExecutionSummary(job); + FrameStateTotals jts = jobManager.getFrameStateTotals(job); + + String status = ""; + if (jts.total != jts.succeeded) { + status = "Failed "; + } else { + status = "Succeeded "; + } - BufferedWriter output = null; - File file = null; - if (shouldCreateFile) { - try { - file = new File("my_outputs.txt"); - output = new BufferedWriter(new FileWriter(file)); - for (LayerDetail layer : layers) { - if (layer.type.equals(LayerType.RENDER)) { - List sortedNames = - jobManager.getLayerOutputs(layer).stream().sorted().collect(Collectors.toList()); - output.write(layer.name + "\n" + String.join("\n", sortedNames) + "\n"); + context.put("jobName", d.name); + context.put("jobStatus", status.toUpperCase()); + context.put("deptName", d.deptName.toUpperCase()); + context.put("showName", d.showName.toUpperCase()); + context.put("totalLayers", d.totalLayers); + context.put("shotName", d.shot.toUpperCase()); + context.put("succeededFrames", jts.succeeded); + context.put("totalFrames", jts.total); + context.put("dependFrames", jts.depend); + context.put("deadFrames", jts.dead); + context.put("waitingFrames", jts.waiting); + context.put("eatenFrames", jts.eaten); + context.put("failedFrames", jts.dead + jts.eaten + jts.waiting); + context.put("checkpointFrames", jts.checkpoint); + context.put("maxRSS", + String.format(Locale.ROOT, "%.1fGB", exj.highMemoryKb / 1024.0 / 1024.0)); + context.put("coreTime", String.format(Locale.ROOT, "%.1f", exj.coreTime / 3600.0)); + + Template t = ve.getTemplate("/conf/webapp/html/email_template.html"); + + List layers = jobManager.getLayerDetails(job); + List layerStats = new ArrayList(layers.size()); + + boolean shouldCreateFile = false; + + Map map = new HashMap(); + loadImage(map, "opencue_logo.png"); + + for (LayerDetail layer : layers) { + if (layer.type.equals(LayerType.RENDER)) { + LayerStats stats = new LayerStats(); + stats.setDetail(layer); + stats.setExecutionSummary(jobManager.getExecutionSummary(layer)); + stats.setFrameStateTotals(jobManager.getFrameStateTotals(layer)); + stats.setThreadStats(jobManager.getThreadStats(layer)); + stats.setOutputs(jobManager.getLayerOutputs(layer).stream().sorted() + .collect(Collectors.toList())); + layerStats.add(stats); + if (stats.getOutputs().size() > 3) + shouldCreateFile = true; + if (!layer.services.isEmpty()) + loadImage(map, "services/" + layer.services.toArray()[0] + ".png"); + } } - } - } catch (IOException e) { - e.printStackTrace(); - } finally { - if (output != null) { - try { - output.close(); - } catch (IOException e) { - e.printStackTrace(); + + imageMap = Collections.unmodifiableMap(map); + + context.put("layers", layerStats); + + StringWriter w = new StringWriter(); + t.merge(context, w); + + String subject = "OpenCue Job " + d.getName(); + + subject = status + subject; + + BufferedWriter output = null; + File file = null; + if (shouldCreateFile) { + try { + file = new File("my_outputs.txt"); + output = new BufferedWriter(new FileWriter(file)); + for (LayerDetail layer : layers) { + if (layer.type.equals(LayerType.RENDER)) { + List sortedNames = jobManager.getLayerOutputs(layer).stream() + .sorted().collect(Collectors.toList()); + output.write(layer.name + "\n" + String.join("\n", sortedNames) + "\n"); + } + } + } catch (IOException e) { + e.printStackTrace(); + } finally { + if (output != null) { + try { + output.close(); + } catch (IOException e) { + e.printStackTrace(); + } + } + } } - } - } - } - for (String email : d.email.split(",")) { - try { - CueUtil.sendmail(email, this.emailFromAddress, subject, new StringBuilder(w.toString()), - imageMap, file); + for (String email : d.email.split(",")) { + try { + CueUtil.sendmail(email, this.emailFromAddress, subject, + new StringBuilder(w.toString()), imageMap, file); + } catch (Exception e) { + // just log and eat if the mail server is down or something + // of that nature. + logger.info("Failed to send job complete mail, reason: " + e); + } + } } catch (Exception e) { - // just log and eat if the mail server is down or something - // of that nature. - logger.info("Failed to send job complete mail, reason: " + e); + e.printStackTrace(); + throw new SpcueRuntimeException("Failed " + e, e); } - } - } catch (Exception e) { - e.printStackTrace(); - throw new SpcueRuntimeException("Failed " + e, e); } - } - public JobManager getJobManager() { - return jobManager; - } + public JobManager getJobManager() { + return jobManager; + } - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } - public void setMailSender(MailSender mailSender) { - this.mailSender = mailSender; - } + public void setMailSender(MailSender mailSender) { + this.mailSender = mailSender; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/FilterManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/FilterManager.java index e8bcdc5de..0a6daf923 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/FilterManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/FilterManager.java @@ -28,57 +28,57 @@ public interface FilterManager { - void runFiltersOnJob(JobDetail job); + void runFiltersOnJob(JobDetail job); - void runFilterOnJob(FilterEntity filter, JobDetail job); + void runFilterOnJob(FilterEntity filter, JobDetail job); - void runFilterOnJob(FilterEntity filter, String id); + void runFilterOnJob(FilterEntity filter, String id); - void runFilterOnGroup(FilterEntity filter, GroupInterface group); + void runFilterOnGroup(FilterEntity filter, GroupInterface group); - void lowerFilterOrder(FilterInterface f); + void lowerFilterOrder(FilterInterface f); - void raiseFilterOrder(FilterInterface f); + void raiseFilterOrder(FilterInterface f); - void setFilterOrder(FilterInterface f, double order); + void setFilterOrder(FilterInterface f, double order); - void createFilter(FilterEntity filter); + void createFilter(FilterEntity filter); - void createAction(ActionEntity action); + void createAction(ActionEntity action); - void createMatcher(MatcherEntity action); + void createMatcher(MatcherEntity action); - void deleteFilter(FilterInterface f); + void deleteFilter(FilterInterface f); - void deleteAction(ActionInterface action); + void deleteAction(ActionInterface action); - void deleteMatcher(MatcherInterface matcher); + void deleteMatcher(MatcherInterface matcher); - void updateMatcher(MatcherEntity matcher); + void updateMatcher(MatcherEntity matcher); - void updateAction(ActionEntity action); + void updateAction(ActionEntity action); - FilterEntity getFilter(String id); + FilterEntity getFilter(String id); - MatcherEntity getMatcher(String id); + MatcherEntity getMatcher(String id); - ActionEntity getAction(String id); + ActionEntity getAction(String id); - FilterEntity getFilter(FilterInterface filter); + FilterEntity getFilter(FilterInterface filter); - MatcherEntity getMatcher(MatcherInterface matcher); + MatcherEntity getMatcher(MatcherInterface matcher); - ActionEntity getAction(ActionInterface action); + ActionEntity getAction(ActionInterface action); - boolean applyAction(ActionEntity action, JobDetail job); + boolean applyAction(ActionEntity action, JobDetail job); - boolean applyAction(ActionEntity action, JobDetail job, FilterManagerService.Context context); + boolean applyAction(ActionEntity action, JobDetail job, FilterManagerService.Context context); - boolean applyActions(List actions, JobDetail job, - FilterManagerService.Context context); + boolean applyActions(List actions, JobDetail job, + FilterManagerService.Context context); - boolean applyActions(List actions, JobDetail job); + boolean applyActions(List actions, JobDetail job); - public boolean isMatch(MatcherEntity matcher, JobDetail job); + public boolean isMatch(MatcherEntity matcher, JobDetail job); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/FilterManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/FilterManagerService.java index eec1b74ac..eb972bf16 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/FilterManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/FilterManagerService.java @@ -59,495 +59,497 @@ @Transactional public class FilterManagerService implements FilterManager { - private static final Logger logger = LogManager.getLogger(FilterManagerService.class); - - private ActionDao actionDao; - private MatcherDao matcherDao; - private FilterDao filterDao; - private GroupDao groupDao; - private JobDao jobDao; - private LayerDao layerDao; - - @Transactional(propagation = Propagation.SUPPORTS) - public void runFilter(FilterEntity filter) { - List jobs = jobDao.findJobs(filter); - for (JobDetail job : jobs) { - if (!match(filter, job)) { - continue; - } - applyActions(filter, job); - } - } - - @Transactional(propagation = Propagation.SUPPORTS) - public void runFilterOnJob(FilterEntity filter, JobDetail job) { - if (match(filter, job)) { - applyActions(filter, job); - } - } - - @Transactional(propagation = Propagation.SUPPORTS) - public void runFilterOnJob(FilterEntity filter, String id) { - JobDetail j = jobDao.getJobDetail(id); - if (match(filter, j)) { - applyActions(filter, j); - } - } - - @Transactional(propagation = Propagation.SUPPORTS) - public void runFilterOnGroup(FilterEntity filter, GroupInterface group) { - for (JobDetail job : jobDao.findJobs(group)) { - if (match(filter, job)) { - applyActions(filter, job); - } - } - } - - @Transactional(propagation = Propagation.SUPPORTS) - public void filterShow(ShowInterface show) { - - List filters = filterDao.getActiveFilters(show); - List jobs = jobDao.findJobs(show); - - for (JobDetail job : jobs) { - for (FilterEntity filter : filters) { - if (!match(filter, job)) { - continue; + private static final Logger logger = LogManager.getLogger(FilterManagerService.class); + + private ActionDao actionDao; + private MatcherDao matcherDao; + private FilterDao filterDao; + private GroupDao groupDao; + private JobDao jobDao; + private LayerDao layerDao; + + @Transactional(propagation = Propagation.SUPPORTS) + public void runFilter(FilterEntity filter) { + List jobs = jobDao.findJobs(filter); + for (JobDetail job : jobs) { + if (!match(filter, job)) { + continue; + } + applyActions(filter, job); } - boolean stopProcessing = applyActions(filter, job); - if (stopProcessing) { - break; + } + + @Transactional(propagation = Propagation.SUPPORTS) + public void runFilterOnJob(FilterEntity filter, JobDetail job) { + if (match(filter, job)) { + applyActions(filter, job); } - } - } - } - - public void deleteFilter(FilterInterface f) { - filterDao.deleteFilter(f); - } - - public void lowerFilterOrder(FilterInterface f) { - filterDao.lowerFilterOrder(f, 1); - } - - public void raiseFilterOrder(FilterInterface f) { - filterDao.raiseFilterOrder(f, 1); - } - - public void setFilterOrder(FilterInterface f, double order) { - filterDao.updateSetFilterOrder(f, order); - } - - public void createAction(ActionEntity action) { - actionDao.createAction(action); - } - - public void createMatcher(MatcherEntity matcher) { - matcherDao.insertMatcher(matcher); - } - - /** - * Stores what options have already been set by other filers. Will need to extend this later to - * handle jobs running through different filers. - */ - public class Context { - - public static final int SET_MIN_CORES = 1; - public static final int SET_MAX_CORES = 2; - public static final int SET_PRIORITY = 4; - - int props = 0; - - public void setProperty(int value) { - if ((props & value) != value) { - props = props + value; - } - } - - public boolean isSet(int value) { - return (props & value) == value; - } - } - - /** - * Take a new job detail and run it though the show's filters, setting the groupId property. - * - * @param job - */ - @Transactional(propagation = Propagation.SUPPORTS) - public void runFiltersOnJob(JobDetail job) { - Context context = new Context(); - List filters = filterDao.getActiveFilters(job); - for (FilterEntity filter : filters) { - if (match(filter, job)) { - boolean stop_filters = applyActions(filter, job, context); - if (stop_filters) { - break; + } + + @Transactional(propagation = Propagation.SUPPORTS) + public void runFilterOnJob(FilterEntity filter, String id) { + JobDetail j = jobDao.getJobDetail(id); + if (match(filter, j)) { + applyActions(filter, j); } - } } - } - public boolean applyActions(List actions, JobDetail job, Context context) { - for (ActionEntity action : actions) { - applyAction(action, job, context); - if (action.type.equals(ActionType.STOP_PROCESSING)) { - return true; - } + @Transactional(propagation = Propagation.SUPPORTS) + public void runFilterOnGroup(FilterEntity filter, GroupInterface group) { + for (JobDetail job : jobDao.findJobs(group)) { + if (match(filter, job)) { + applyActions(filter, job); + } + } } - return false; - } - public boolean applyActions(List actions, JobDetail job) { - return applyActions(actions, job, new Context()); - } + @Transactional(propagation = Propagation.SUPPORTS) + public void filterShow(ShowInterface show) { + + List filters = filterDao.getActiveFilters(show); + List jobs = jobDao.findJobs(show); + + for (JobDetail job : jobs) { + for (FilterEntity filter : filters) { + if (!match(filter, job)) { + continue; + } + boolean stopProcessing = applyActions(filter, job); + if (stopProcessing) { + break; + } + } + } + } - public boolean applyActions(FilterEntity filter, JobDetail job) { - return applyActions(filter, job, new Context()); - } + public void deleteFilter(FilterInterface f) { + filterDao.deleteFilter(f); + } - public boolean applyActions(FilterEntity filter, JobDetail job, Context context) { - return applyActions(actionDao.getActions(filter), job, context); - } + public void lowerFilterOrder(FilterInterface f) { + filterDao.lowerFilterOrder(f, 1); + } - private boolean isMatch(final MatcherEntity matcher, final String... inputs) { - boolean isMatch = false; + public void raiseFilterOrder(FilterInterface f) { + filterDao.raiseFilterOrder(f, 1); + } - switch (matcher.type) { - case CONTAINS: - for (String s : inputs) { - isMatch = s.contains(matcher.value); - if (isMatch) - break; - } - break; - case DOES_NOT_CONTAIN: - for (String s : inputs) { - isMatch = s.contains(matcher.value); - if (isMatch) - return false; - } - isMatch = true; - break; - case IS: - for (String s : inputs) { - isMatch = s.equals(matcher.value); - if (isMatch) - break; - } - break; - case IS_NOT: - for (String s : inputs) { - isMatch = s.equals(matcher.value); - if (isMatch) - return false; - } - isMatch = true; - break; - case BEGINS_WITH: - for (String s : inputs) { - isMatch = s.startsWith(matcher.value); - if (isMatch) - break; - } - break; - case ENDS_WITH: - for (String s : inputs) { - isMatch = s.endsWith(matcher.value); - if (isMatch) - break; - } - break; - case REGEX: - Pattern pattern = null; - try { - pattern = Pattern.compile(matcher.value); - } catch (Exception e) { - return false; - } + public void setFilterOrder(FilterInterface f, double order) { + filterDao.updateSetFilterOrder(f, order); + } - for (String s : inputs) { - isMatch = pattern.matcher(s).find(); - if (isMatch) - break; - } - break; + public void createAction(ActionEntity action) { + actionDao.createAction(action); } - return isMatch; - } - public boolean isMatch(MatcherEntity matcher, JobDetail job) { + public void createMatcher(MatcherEntity matcher) { + matcherDao.insertMatcher(matcher); + } - String input = null; + /** + * Stores what options have already been set by other filers. Will need to extend this later to + * handle jobs running through different filers. + */ + public class Context { - switch (matcher.subject) { - case SERVICE_NAME: { - List layers = layerDao.getLayerDetails(job); - List serviceNames = new ArrayList(layers.size()); - for (LayerDetail layer : layers) { - for (String service : layer.services) { - serviceNames.add(service); - } - } + public static final int SET_MIN_CORES = 1; + public static final int SET_MAX_CORES = 2; + public static final int SET_PRIORITY = 4; - return isMatch(matcher, serviceNames.toArray(new String[0])); - } - case LAYER_NAME: { - List layers = layerDao.getLayerDetails(job); - List layerNames = new ArrayList(layers.size()); - for (LayerDetail layer : layers) { - layerNames.add(layer.name); - } + int props = 0; - return isMatch(matcher, layerNames.toArray(new String[0])); - } - default: { - switch (matcher.subject) { - case JOB_NAME: - input = job.getName().toLowerCase(); - break; - case SHOW: - input = job.showName.toLowerCase(); - break; - case SHOT: - input = job.shot.toLowerCase(); - break; - case USER: - input = job.user.toLowerCase(); - break; - case PRIORITY: - input = Integer.toString(job.priority); - break; - case FACILITY: - if (job.facilityName == null) { - return false; + public void setProperty(int value) { + if ((props & value) != value) { + props = props + value; } - input = job.facilityName.toLowerCase(); - break; - default: - input = ""; } - return isMatch(matcher, input); - } + public boolean isSet(int value) { + return (props & value) == value; + } } - } - - public boolean applyAction(ActionEntity action, JobDetail job) { - return applyAction(action, job, new Context()); - } - - public boolean applyAction(ActionEntity action, JobDetail job, Context context) { - boolean stopProcessing = false; /** - * All of these actions can be handled by the call to updateJob which happens later on. All - * other actions are handlded in applyAction + * Take a new job detail and run it though the show's filters, setting the groupId property. + * + * @param job */ - switch (action.type) { - case PAUSE_JOB: - jobDao.updatePaused(job, action.booleanValue); - break; - - case SET_JOB_MIN_CORES: - context.setProperty(Context.SET_MIN_CORES); - jobDao.updateMinCores(job, Convert.coresToCoreUnits(action.floatValue)); - break; - - case SET_JOB_MAX_CORES: - context.setProperty(Context.SET_MAX_CORES); - jobDao.updateMaxCores(job, Convert.coresToCoreUnits(action.floatValue)); - break; - - case SET_JOB_PRIORITY: - context.setProperty(Context.SET_PRIORITY); - int priority = (int) action.intValue; - jobDao.updatePriority(job, priority); - job.priority = priority; - break; - - case MOVE_JOB_TO_GROUP: - // Just ignore this if the groupValue is null. The job will launch - // and it can be moved to the right group manually. - if (action.groupValue == null) { - logger.error("Did not move job to group, the group value was not valid."); - break; + @Transactional(propagation = Propagation.SUPPORTS) + public void runFiltersOnJob(JobDetail job) { + Context context = new Context(); + List filters = filterDao.getActiveFilters(job); + for (FilterEntity filter : filters) { + if (match(filter, job)) { + boolean stop_filters = applyActions(filter, job, context); + if (stop_filters) { + break; + } + } } + } - GroupDetail g = groupDao.getGroupDetail(action.groupValue); - List inherits = new ArrayList(3); - - // Do not set these values from the group if they were set by another filter. - if (!context.isSet(Context.SET_PRIORITY) && g.jobPriority != -1) { - inherits.add(Inherit.Priority); - } - if (!context.isSet(Context.SET_MAX_CORES) && g.jobMaxCores != -1) { - inherits.add(Inherit.MaxCores); - } - if (!context.isSet(Context.SET_MIN_CORES) && g.jobMinCores != -1) { - inherits.add(Inherit.MinCores); + public boolean applyActions(List actions, JobDetail job, Context context) { + for (ActionEntity action : actions) { + applyAction(action, job, context); + if (action.type.equals(ActionType.STOP_PROCESSING)) { + return true; + } } + return false; + } - logger.info("moving job into group: " + g.name); - jobDao.updateParent(job, g, inherits.toArray(new Inherit[0])); - break; - - case SET_ALL_RENDER_LAYER_TAGS: - layerDao.updateTags(job, action.stringValue, LayerType.RENDER); - break; - - case SET_ALL_RENDER_LAYER_MEMORY: - layerDao.updateMinMemory(job, (int) action.intValue, LayerType.RENDER); - break; + public boolean applyActions(List actions, JobDetail job) { + return applyActions(actions, job, new Context()); + } - case SET_ALL_RENDER_LAYER_MIN_CORES: - layerDao.updateMinCores(job, Convert.coresToCoreUnits(action.floatValue), LayerType.RENDER); - break; + public boolean applyActions(FilterEntity filter, JobDetail job) { + return applyActions(filter, job, new Context()); + } - case SET_ALL_RENDER_LAYER_MAX_CORES: - layerDao.updateMaxCores(job, Convert.coresToCoreUnits(action.floatValue), LayerType.RENDER); - break; + public boolean applyActions(FilterEntity filter, JobDetail job, Context context) { + return applyActions(actionDao.getActions(filter), job, context); + } - case SET_MEMORY_OPTIMIZER: - List layers = layerDao.getLayers(job); - for (LayerInterface layer : layers) { - layerDao.enableMemoryOptimizer(layer, action.booleanValue); + private boolean isMatch(final MatcherEntity matcher, final String... inputs) { + boolean isMatch = false; + + switch (matcher.type) { + case CONTAINS: + for (String s : inputs) { + isMatch = s.contains(matcher.value); + if (isMatch) + break; + } + break; + case DOES_NOT_CONTAIN: + for (String s : inputs) { + isMatch = s.contains(matcher.value); + if (isMatch) + return false; + } + isMatch = true; + break; + case IS: + for (String s : inputs) { + isMatch = s.equals(matcher.value); + if (isMatch) + break; + } + break; + case IS_NOT: + for (String s : inputs) { + isMatch = s.equals(matcher.value); + if (isMatch) + return false; + } + isMatch = true; + break; + case BEGINS_WITH: + for (String s : inputs) { + isMatch = s.startsWith(matcher.value); + if (isMatch) + break; + } + break; + case ENDS_WITH: + for (String s : inputs) { + isMatch = s.endsWith(matcher.value); + if (isMatch) + break; + } + break; + case REGEX: + Pattern pattern = null; + try { + pattern = Pattern.compile(matcher.value); + } catch (Exception e) { + return false; + } + + for (String s : inputs) { + isMatch = pattern.matcher(s).find(); + if (isMatch) + break; + } + break; } - break; - - default: - stopProcessing = true; - break; + return isMatch; } - return stopProcessing; - } + public boolean isMatch(MatcherEntity matcher, JobDetail job) { - private boolean match(FilterEntity filter, JobDetail job) { + String input = null; - int numMatched = 0; - int numMatchesRequired = 1; + switch (matcher.subject) { + case SERVICE_NAME: { + List layers = layerDao.getLayerDetails(job); + List serviceNames = new ArrayList(layers.size()); + for (LayerDetail layer : layers) { + for (String service : layer.services) { + serviceNames.add(service); + } + } + + return isMatch(matcher, serviceNames.toArray(new String[0])); + } + case LAYER_NAME: { + List layers = layerDao.getLayerDetails(job); + List layerNames = new ArrayList(layers.size()); + for (LayerDetail layer : layers) { + layerNames.add(layer.name); + } + + return isMatch(matcher, layerNames.toArray(new String[0])); + } + default: { + switch (matcher.subject) { + case JOB_NAME: + input = job.getName().toLowerCase(); + break; + case SHOW: + input = job.showName.toLowerCase(); + break; + case SHOT: + input = job.shot.toLowerCase(); + break; + case USER: + input = job.user.toLowerCase(); + break; + case PRIORITY: + input = Integer.toString(job.priority); + break; + case FACILITY: + if (job.facilityName == null) { + return false; + } + input = job.facilityName.toLowerCase(); + break; + default: + input = ""; + } + + return isMatch(matcher, input); + } + } + } - List matchers = matcherDao.getMatchers(filter); - if (matchers.size() == 0) { - return false; + public boolean applyAction(ActionEntity action, JobDetail job) { + return applyAction(action, job, new Context()); } - if (filter.type.equals(FilterType.MATCH_ALL)) { - numMatchesRequired = matchers.size(); + public boolean applyAction(ActionEntity action, JobDetail job, Context context) { + + boolean stopProcessing = false; + /** + * All of these actions can be handled by the call to updateJob which happens later on. All + * other actions are handlded in applyAction + */ + switch (action.type) { + case PAUSE_JOB: + jobDao.updatePaused(job, action.booleanValue); + break; + + case SET_JOB_MIN_CORES: + context.setProperty(Context.SET_MIN_CORES); + jobDao.updateMinCores(job, Convert.coresToCoreUnits(action.floatValue)); + break; + + case SET_JOB_MAX_CORES: + context.setProperty(Context.SET_MAX_CORES); + jobDao.updateMaxCores(job, Convert.coresToCoreUnits(action.floatValue)); + break; + + case SET_JOB_PRIORITY: + context.setProperty(Context.SET_PRIORITY); + int priority = (int) action.intValue; + jobDao.updatePriority(job, priority); + job.priority = priority; + break; + + case MOVE_JOB_TO_GROUP: + // Just ignore this if the groupValue is null. The job will launch + // and it can be moved to the right group manually. + if (action.groupValue == null) { + logger.error("Did not move job to group, the group value was not valid."); + break; + } + + GroupDetail g = groupDao.getGroupDetail(action.groupValue); + List inherits = new ArrayList(3); + + // Do not set these values from the group if they were set by another filter. + if (!context.isSet(Context.SET_PRIORITY) && g.jobPriority != -1) { + inherits.add(Inherit.Priority); + } + if (!context.isSet(Context.SET_MAX_CORES) && g.jobMaxCores != -1) { + inherits.add(Inherit.MaxCores); + } + if (!context.isSet(Context.SET_MIN_CORES) && g.jobMinCores != -1) { + inherits.add(Inherit.MinCores); + } + + logger.info("moving job into group: " + g.name); + jobDao.updateParent(job, g, inherits.toArray(new Inherit[0])); + break; + + case SET_ALL_RENDER_LAYER_TAGS: + layerDao.updateTags(job, action.stringValue, LayerType.RENDER); + break; + + case SET_ALL_RENDER_LAYER_MEMORY: + layerDao.updateMinMemory(job, (int) action.intValue, LayerType.RENDER); + break; + + case SET_ALL_RENDER_LAYER_MIN_CORES: + layerDao.updateMinCores(job, Convert.coresToCoreUnits(action.floatValue), + LayerType.RENDER); + break; + + case SET_ALL_RENDER_LAYER_MAX_CORES: + layerDao.updateMaxCores(job, Convert.coresToCoreUnits(action.floatValue), + LayerType.RENDER); + break; + + case SET_MEMORY_OPTIMIZER: + List layers = layerDao.getLayers(job); + for (LayerInterface layer : layers) { + layerDao.enableMemoryOptimizer(layer, action.booleanValue); + } + break; + + default: + stopProcessing = true; + break; + } + + return stopProcessing; } - for (MatcherEntity matcher : matchers) { - boolean itMatches = isMatch(matcher, job); + private boolean match(FilterEntity filter, JobDetail job) { + + int numMatched = 0; + int numMatchesRequired = 1; + + List matchers = matcherDao.getMatchers(filter); + if (matchers.size() == 0) { + return false; + } - if (!itMatches) { if (filter.type.equals(FilterType.MATCH_ALL)) { - break; + numMatchesRequired = matchers.size(); } - } else { - numMatched++; - if (filter.type.equals(FilterType.MATCH_ANY)) { - break; + + for (MatcherEntity matcher : matchers) { + boolean itMatches = isMatch(matcher, job); + + if (!itMatches) { + if (filter.type.equals(FilterType.MATCH_ALL)) { + break; + } + } else { + numMatched++; + if (filter.type.equals(FilterType.MATCH_ANY)) { + break; + } + } } - } - } - if (numMatched == numMatchesRequired) { - return true; - } + if (numMatched == numMatchesRequired) { + return true; + } - return false; - } + return false; + } - public FilterDao getFilterDao() { - return filterDao; - } + public FilterDao getFilterDao() { + return filterDao; + } - public void setFilterDao(FilterDao filterDao) { - this.filterDao = filterDao; - } + public void setFilterDao(FilterDao filterDao) { + this.filterDao = filterDao; + } - public GroupDao getGroupDao() { - return groupDao; - } + public GroupDao getGroupDao() { + return groupDao; + } - public void setGroupDao(GroupDao groupDao) { - this.groupDao = groupDao; - } + public void setGroupDao(GroupDao groupDao) { + this.groupDao = groupDao; + } - public void deleteAction(ActionInterface action) { - actionDao.deleteAction(action); - } + public void deleteAction(ActionInterface action) { + actionDao.deleteAction(action); + } - public void deleteMatcher(MatcherInterface matcher) { - matcherDao.deleteMatcher(matcher); - } + public void deleteMatcher(MatcherInterface matcher) { + matcherDao.deleteMatcher(matcher); + } - public ActionEntity getAction(String id) { - return actionDao.getAction(id); - } + public ActionEntity getAction(String id) { + return actionDao.getAction(id); + } - public ActionEntity getAction(ActionInterface action) { - return actionDao.getAction(action); - } + public ActionEntity getAction(ActionInterface action) { + return actionDao.getAction(action); + } - public FilterEntity getFilter(String id) { - return filterDao.getFilter(id); - } + public FilterEntity getFilter(String id) { + return filterDao.getFilter(id); + } - public FilterEntity getFilter(FilterInterface filter) { - return filterDao.getFilter(filter); - } + public FilterEntity getFilter(FilterInterface filter) { + return filterDao.getFilter(filter); + } - public MatcherEntity getMatcher(String id) { - return matcherDao.getMatcher(id); - } + public MatcherEntity getMatcher(String id) { + return matcherDao.getMatcher(id); + } - public MatcherEntity getMatcher(MatcherInterface matcher) { - return matcherDao.getMatcher(matcher); - } + public MatcherEntity getMatcher(MatcherInterface matcher) { + return matcherDao.getMatcher(matcher); + } - public void updateAction(ActionEntity action) { - actionDao.updateAction(action); - } + public void updateAction(ActionEntity action) { + actionDao.updateAction(action); + } - public void updateMatcher(MatcherEntity matcher) { - matcherDao.updateMatcher(matcher); - } + public void updateMatcher(MatcherEntity matcher) { + matcherDao.updateMatcher(matcher); + } - public void createFilter(FilterEntity filter) { - filterDao.insertFilter(filter); - } + public void createFilter(FilterEntity filter) { + filterDao.insertFilter(filter); + } - public ActionDao getActionDao() { - return actionDao; - } + public ActionDao getActionDao() { + return actionDao; + } - public void setActionDao(ActionDao actionDao) { - this.actionDao = actionDao; - } + public void setActionDao(ActionDao actionDao) { + this.actionDao = actionDao; + } - public JobDao getJobDao() { - return jobDao; - } + public JobDao getJobDao() { + return jobDao; + } - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } - public MatcherDao getMatcherDao() { - return matcherDao; - } + public MatcherDao getMatcherDao() { + return matcherDao; + } - public void setMatcherDao(MatcherDao matcherDao) { - this.matcherDao = matcherDao; - } + public void setMatcherDao(MatcherDao matcherDao) { + this.matcherDao = matcherDao; + } - public LayerDao getLayerDao() { - return layerDao; - } + public LayerDao getLayerDao() { + return layerDao; + } - public void setLayerDao(LayerDao layerDao) { - this.layerDao = layerDao; - } + public void setLayerDao(LayerDao layerDao) { + this.layerDao = layerDao; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/GroupManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/GroupManager.java index 15a9eaa60..06289ec50 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/GroupManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/GroupManager.java @@ -26,92 +26,92 @@ public interface GroupManager { - void setGroupMaxCores(GroupInterface g, int coreUnits); - - void setGroupMinCores(GroupInterface g, int coreUnits); - - void setGroupDefaultJobMinCores(GroupInterface g, int coreUnits); - - void setGroupDefaultJobMaxCores(GroupInterface g, int coreUnits); - - void setGroupMaxGpus(GroupInterface g, int gpuUnits); - - void setGroupMinGpus(GroupInterface g, int gpuUnits); - - void setGroupDefaultJobMinGpus(GroupInterface g, int gpuUnits); - - void setGroupDefaultJobMaxGpus(GroupInterface g, int gpuUnits); - - void setGroupDefaultJobPriority(GroupInterface g, int priority); - - /** - * Return the group from its unique ID - * - * @param id - * @return - */ - GroupInterface getGroup(String id); - - /** - * Return the root group for the specified show. - * - * @param s - * @return - */ - GroupDetail getRootGroupDetail(ShowInterface s); - - /** - * Return the GroupDetail by job. - * - * @param j - * @return - */ - GroupDetail getGroupDetail(JobInterface j); - - /** - * Return a GroupDetail from its unique ID - * - * @param id - * @return - */ - GroupDetail getGroupDetail(String id); - - void setGroupParent(GroupInterface group, GroupInterface newParent); - - void deleteGroup(GroupInterface group); - - void createGroup(GroupDetail group, GroupInterface parent); - - /** - * Re-parent a job to the specified group. - * - * @param job - * @param group - * @param inherit - */ - void reparentJob(JobInterface job, GroupDetail group, Inherit[] inherit); - - /** - * Parents a list of groups to the specified group - * - * @param group - * @param groups - */ - void reparentGroups(GroupInterface group, List groups); - - /** - * Re-parent a list of unique group IDs. - * - * @param group - * @param groups - */ - void reparentGroupIds(GroupInterface group, List groups); - - /** - * Sets the group's department all all jobs in that group to the new department. - * - * @param group - * @param d - */ - void setGroupDepartment(GroupInterface group, DepartmentInterface d); + void setGroupMaxCores(GroupInterface g, int coreUnits); + + void setGroupMinCores(GroupInterface g, int coreUnits); + + void setGroupDefaultJobMinCores(GroupInterface g, int coreUnits); + + void setGroupDefaultJobMaxCores(GroupInterface g, int coreUnits); + + void setGroupMaxGpus(GroupInterface g, int gpuUnits); + + void setGroupMinGpus(GroupInterface g, int gpuUnits); + + void setGroupDefaultJobMinGpus(GroupInterface g, int gpuUnits); + + void setGroupDefaultJobMaxGpus(GroupInterface g, int gpuUnits); + + void setGroupDefaultJobPriority(GroupInterface g, int priority); + + /** + * Return the group from its unique ID + * + * @param id + * @return + */ + GroupInterface getGroup(String id); + + /** + * Return the root group for the specified show. + * + * @param s + * @return + */ + GroupDetail getRootGroupDetail(ShowInterface s); + + /** + * Return the GroupDetail by job. + * + * @param j + * @return + */ + GroupDetail getGroupDetail(JobInterface j); + + /** + * Return a GroupDetail from its unique ID + * + * @param id + * @return + */ + GroupDetail getGroupDetail(String id); + + void setGroupParent(GroupInterface group, GroupInterface newParent); + + void deleteGroup(GroupInterface group); + + void createGroup(GroupDetail group, GroupInterface parent); + + /** + * Re-parent a job to the specified group. + * + * @param job + * @param group + * @param inherit + */ + void reparentJob(JobInterface job, GroupDetail group, Inherit[] inherit); + + /** + * Parents a list of groups to the specified group + * + * @param group + * @param groups + */ + void reparentGroups(GroupInterface group, List groups); + + /** + * Re-parent a list of unique group IDs. + * + * @param group + * @param groups + */ + void reparentGroupIds(GroupInterface group, List groups); + + /** + * Sets the group's department all all jobs in that group to the new department. + * + * @param group + * @param d + */ + void setGroupDepartment(GroupInterface group, DepartmentInterface d); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/GroupManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/GroupManagerService.java index c06e09dfb..b969839de 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/GroupManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/GroupManagerService.java @@ -34,181 +34,181 @@ @Transactional public class GroupManagerService implements GroupManager { - private GroupDao groupDao; + private GroupDao groupDao; - private JobDao jobDao; + private JobDao jobDao; - private DepartmentDao departmentDao; + private DepartmentDao departmentDao; - private DepartmentManager departmentManager; - - @Override - public void setGroupDefaultJobPriority(GroupInterface g, int priority) { - groupDao.updateDefaultJobPriority(g, priority); - jobDao.updatePriority(g, priority); - } - - @Override - public void setGroupDefaultJobMaxCores(GroupInterface g, int coreUnits) { - groupDao.updateDefaultJobMaxCores(g, coreUnits); - if (coreUnits != CueUtil.FEATURE_DISABLED && !groupDao.isManaged(g)) { - jobDao.updateMaxCores(g, coreUnits); - } - } - - @Override - public void setGroupDefaultJobMinCores(GroupInterface g, int coreUnits) { - groupDao.updateDefaultJobMinCores(g, coreUnits); - if (coreUnits != CueUtil.FEATURE_DISABLED && !groupDao.isManaged(g)) { - jobDao.updateMinCores(g, coreUnits); - } - } - - @Override - public void setGroupMaxCores(GroupInterface g, int coreUnits) { - groupDao.updateMaxCores(g, coreUnits); - } - - @Override - public void setGroupMinCores(GroupInterface g, int coreUnits) { - groupDao.updateMinCores(g, coreUnits); - } - - @Override - public void setGroupDefaultJobMaxGpus(GroupInterface g, int gpuUnits) { - groupDao.updateDefaultJobMaxGpus(g, gpuUnits); - if (gpuUnits != CueUtil.FEATURE_DISABLED && !groupDao.isManaged(g)) { - jobDao.updateMaxGpus(g, gpuUnits); - } - } - - @Override - public void setGroupDefaultJobMinGpus(GroupInterface g, int gpuUnits) { - groupDao.updateDefaultJobMinGpus(g, gpuUnits); - if (gpuUnits != CueUtil.FEATURE_DISABLED && !groupDao.isManaged(g)) { - jobDao.updateMinGpus(g, gpuUnits); - } - } - - @Override - public void setGroupMaxGpus(GroupInterface g, int gpuUnits) { - groupDao.updateMaxGpus(g, gpuUnits); - } - - @Override - public void setGroupMinGpus(GroupInterface g, int gpuUnits) { - groupDao.updateMinGpus(g, gpuUnits); - } - - @Override - public void setGroupParent(GroupInterface group, GroupInterface newParent) { - groupDao.updateGroupParent(group, newParent); - } - - @Override - public void deleteGroup(GroupInterface group) { - groupDao.deleteGroup(group); - } - - @Override - public void createGroup(GroupDetail group, GroupInterface parent) { - DepartmentInterface d; - if (group.getDepartmentId() == null) { - d = departmentDao.getDefaultDepartment(); - group.deptId = d.getId(); - } else { - d = departmentDao.getDepartment(group.getDepartmentId()); - } - groupDao.insertGroup(group, parent); - - if (!departmentManager.departmentConfigExists(group, d)) { - departmentManager.createDepartmentConfig(group, d); - } - } - - @Override - public void reparentGroups(GroupInterface group, List groups) { - for (GroupInterface g : groups) { - groupDao.updateGroupParent(g, group); - } - } - - @Override - public void reparentJob(JobInterface job, GroupDetail group, Inherit[] inherit) { - jobDao.updateParent(job, group, inherit); - } - - @Override - public void reparentGroupIds(GroupInterface group, List groups) { - reparentGroups(group, groupDao.getGroups(groups)); - } - - @Override - public void setGroupDepartment(GroupInterface group, DepartmentInterface dept) { - /* - * If this is the first time the show is using this department a department configuration is - * created. - */ - if (!departmentManager.departmentConfigExists(group, dept)) { - departmentManager.createDepartmentConfig(group, dept); - } - groupDao.updateDepartment(group, dept); - jobDao.updateDepartment(group, dept); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public GroupInterface getGroup(String id) { - return groupDao.getGroup(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public GroupDetail getGroupDetail(String id) { - return groupDao.getGroupDetail(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public GroupDetail getRootGroupDetail(ShowInterface s) { - return groupDao.getRootGroupDetail(s); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public GroupDetail getGroupDetail(JobInterface j) { - return groupDao.getGroupDetail(j); - } - - public GroupDao getGroupDao() { - return groupDao; - } - - public void setGroupDao(GroupDao groupDao) { - this.groupDao = groupDao; - } - - public JobDao getJobDao() { - return jobDao; - } - - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } - - public DepartmentDao getDepartmentDao() { - return departmentDao; - } - - public void setDepartmentDao(DepartmentDao departmentDao) { - this.departmentDao = departmentDao; - } - - public DepartmentManager getDepartmentManager() { - return departmentManager; - } - - public void setDepartmentManager(DepartmentManager departmentManager) { - this.departmentManager = departmentManager; - } + private DepartmentManager departmentManager; + + @Override + public void setGroupDefaultJobPriority(GroupInterface g, int priority) { + groupDao.updateDefaultJobPriority(g, priority); + jobDao.updatePriority(g, priority); + } + + @Override + public void setGroupDefaultJobMaxCores(GroupInterface g, int coreUnits) { + groupDao.updateDefaultJobMaxCores(g, coreUnits); + if (coreUnits != CueUtil.FEATURE_DISABLED && !groupDao.isManaged(g)) { + jobDao.updateMaxCores(g, coreUnits); + } + } + + @Override + public void setGroupDefaultJobMinCores(GroupInterface g, int coreUnits) { + groupDao.updateDefaultJobMinCores(g, coreUnits); + if (coreUnits != CueUtil.FEATURE_DISABLED && !groupDao.isManaged(g)) { + jobDao.updateMinCores(g, coreUnits); + } + } + + @Override + public void setGroupMaxCores(GroupInterface g, int coreUnits) { + groupDao.updateMaxCores(g, coreUnits); + } + + @Override + public void setGroupMinCores(GroupInterface g, int coreUnits) { + groupDao.updateMinCores(g, coreUnits); + } + + @Override + public void setGroupDefaultJobMaxGpus(GroupInterface g, int gpuUnits) { + groupDao.updateDefaultJobMaxGpus(g, gpuUnits); + if (gpuUnits != CueUtil.FEATURE_DISABLED && !groupDao.isManaged(g)) { + jobDao.updateMaxGpus(g, gpuUnits); + } + } + + @Override + public void setGroupDefaultJobMinGpus(GroupInterface g, int gpuUnits) { + groupDao.updateDefaultJobMinGpus(g, gpuUnits); + if (gpuUnits != CueUtil.FEATURE_DISABLED && !groupDao.isManaged(g)) { + jobDao.updateMinGpus(g, gpuUnits); + } + } + + @Override + public void setGroupMaxGpus(GroupInterface g, int gpuUnits) { + groupDao.updateMaxGpus(g, gpuUnits); + } + + @Override + public void setGroupMinGpus(GroupInterface g, int gpuUnits) { + groupDao.updateMinGpus(g, gpuUnits); + } + + @Override + public void setGroupParent(GroupInterface group, GroupInterface newParent) { + groupDao.updateGroupParent(group, newParent); + } + + @Override + public void deleteGroup(GroupInterface group) { + groupDao.deleteGroup(group); + } + + @Override + public void createGroup(GroupDetail group, GroupInterface parent) { + DepartmentInterface d; + if (group.getDepartmentId() == null) { + d = departmentDao.getDefaultDepartment(); + group.deptId = d.getId(); + } else { + d = departmentDao.getDepartment(group.getDepartmentId()); + } + groupDao.insertGroup(group, parent); + + if (!departmentManager.departmentConfigExists(group, d)) { + departmentManager.createDepartmentConfig(group, d); + } + } + + @Override + public void reparentGroups(GroupInterface group, List groups) { + for (GroupInterface g : groups) { + groupDao.updateGroupParent(g, group); + } + } + + @Override + public void reparentJob(JobInterface job, GroupDetail group, Inherit[] inherit) { + jobDao.updateParent(job, group, inherit); + } + + @Override + public void reparentGroupIds(GroupInterface group, List groups) { + reparentGroups(group, groupDao.getGroups(groups)); + } + + @Override + public void setGroupDepartment(GroupInterface group, DepartmentInterface dept) { + /* + * If this is the first time the show is using this department a department configuration is + * created. + */ + if (!departmentManager.departmentConfigExists(group, dept)) { + departmentManager.createDepartmentConfig(group, dept); + } + groupDao.updateDepartment(group, dept); + jobDao.updateDepartment(group, dept); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public GroupInterface getGroup(String id) { + return groupDao.getGroup(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public GroupDetail getGroupDetail(String id) { + return groupDao.getGroupDetail(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public GroupDetail getRootGroupDetail(ShowInterface s) { + return groupDao.getRootGroupDetail(s); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public GroupDetail getGroupDetail(JobInterface j) { + return groupDao.getGroupDetail(j); + } + + public GroupDao getGroupDao() { + return groupDao; + } + + public void setGroupDao(GroupDao groupDao) { + this.groupDao = groupDao; + } + + public JobDao getJobDao() { + return jobDao; + } + + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } + + public DepartmentDao getDepartmentDao() { + return departmentDao; + } + + public void setDepartmentDao(DepartmentDao departmentDao) { + this.departmentDao = departmentDao; + } + + public DepartmentManager getDepartmentManager() { + return departmentManager; + } + + public void setDepartmentManager(DepartmentManager departmentManager) { + this.departmentManager = departmentManager; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalManager.java index 9aca062ac..fca7ea6b1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalManager.java @@ -21,18 +21,18 @@ public interface HistoricalManager { - /** - * Returns a list of jobs ready to be archived. - * - * @return List - */ - List getFinishedJobs(); - - /** - * Transfers data from the live to the historical tables. - * - * @param job - */ - void transferJob(JobInterface job); + /** + * Returns a list of jobs ready to be archived. + * + * @return List + */ + List getFinishedJobs(); + + /** + * Transfers data from the live to the historical tables. + * + * @param job + */ + void transferJob(JobInterface job); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalManagerService.java index 8e60eebe3..85e004218 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalManagerService.java @@ -29,33 +29,33 @@ @Transactional public class HistoricalManagerService implements HistoricalManager { - private HistoricalDao historicalDao; - - @Autowired - private Environment env; - - @Transactional(readOnly = true, isolation = Isolation.SERIALIZABLE) - public List getFinishedJobs() { - return historicalDao.getFinishedJobs( - env.getRequiredProperty("history.archive_jobs_cutoff_hours", Integer.class)); - } - - @Transactional - public void transferJob(JobInterface job) { - try { - historicalDao.transferJob(job); - } catch (Exception e) { - throw new HistoricalJobTransferException( - "failed to transfer job " + job.getName() + " to historical table"); + private HistoricalDao historicalDao; + + @Autowired + private Environment env; + + @Transactional(readOnly = true, isolation = Isolation.SERIALIZABLE) + public List getFinishedJobs() { + return historicalDao.getFinishedJobs( + env.getRequiredProperty("history.archive_jobs_cutoff_hours", Integer.class)); + } + + @Transactional + public void transferJob(JobInterface job) { + try { + historicalDao.transferJob(job); + } catch (Exception e) { + throw new HistoricalJobTransferException( + "failed to transfer job " + job.getName() + " to historical table"); + } } - } - public HistoricalDao getHistoricalDao() { - return historicalDao; - } + public HistoricalDao getHistoricalDao() { + return historicalDao; + } - public void setHistoricalDao(HistoricalDao historicalDao) { - this.historicalDao = historicalDao; - } + public void setHistoricalDao(HistoricalDao historicalDao) { + this.historicalDao = historicalDao; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalSupport.java b/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalSupport.java index c266ba1f8..0ecef835a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalSupport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalSupport.java @@ -23,28 +23,28 @@ import com.imageworks.spcue.JobInterface; public class HistoricalSupport { - private static final Logger logger = LogManager.getLogger(HistoricalSupport.class); - - private HistoricalManager historicalManager; - - public void archiveHistoricalJobData() { - logger.info("running historical job data transfer"); - List jobs = historicalManager.getFinishedJobs(); - for (JobInterface j : jobs) { - logger.info("transfering job " + j.getId() + "/" + j.getName()); - try { - historicalManager.transferJob(j); - } catch (Exception e) { - logger.warn("failed to transfer job, " + e); - } + private static final Logger logger = LogManager.getLogger(HistoricalSupport.class); + + private HistoricalManager historicalManager; + + public void archiveHistoricalJobData() { + logger.info("running historical job data transfer"); + List jobs = historicalManager.getFinishedJobs(); + for (JobInterface j : jobs) { + logger.info("transfering job " + j.getId() + "/" + j.getName()); + try { + historicalManager.transferJob(j); + } catch (Exception e) { + logger.warn("failed to transfer job, " + e); + } + } } - } - public HistoricalManager getHistoricalManager() { - return historicalManager; - } + public HistoricalManager getHistoricalManager() { + return historicalManager; + } - public void setHistoricalManager(HistoricalManager historicalManager) { - this.historicalManager = historicalManager; - } + public void setHistoricalManager(HistoricalManager historicalManager) { + this.historicalManager = historicalManager; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/HostManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/HostManager.java index e074169b3..76d5282b2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/HostManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/HostManager.java @@ -38,211 +38,211 @@ public interface HostManager { - void rebootWhenIdle(HostInterface host); - - void rebootNow(HostInterface host); - - /** - * Lock/unlock the specified host. - * - * @param host - * @param state - * @param source - */ - void setHostLock(HostInterface host, LockState state, Source source); - - /** - * Updates the state of a host. - * - * @param host HostInterface - * @param state HardwareState - */ - void setHostState(HostInterface host, HardwareState state); - - /** - * Updates the free temporary directory (mcp) of a host. - * - * @param host HostInterface - * @param freeTempDir Long - */ - void setHostFreeTempDir(HostInterface host, Long freeTempDir); - - DispatchHost createHost(HostReport report); - - DispatchHost createHost(RenderHost host); - - /** - * Create a host and move it into the specified allocation. - * - * @param rhost - * @param alloc - * @return - */ - DispatchHost createHost(RenderHost rhost, AllocationEntity alloc); - - HostInterface getHost(String id); - - HostInterface findHost(String name); - - DispatchHost getDispatchHost(String id); - - DispatchHost findDispatchHost(String name); - - HostEntity getHostDetail(HostInterface host); - - HostEntity getHostDetail(String id); - - HostEntity findHostDetail(String name); - - /** - * Returns true of the LockState is not Open. - * - * @param host - * @return - */ - boolean isLocked(HostInterface host); - - /** - * Set all host statistics. - * - * @param host - * @param totalMemory - * @param freeMemory - * @param totalSwap - * @param freeSwap - * @param totalMcp - * @param freeMcp - * @param totalGpuMemory - * @param freeGpuMemory - * @param load - * @param bootTime - * @param os - */ - void setHostStatistics(HostInterface host, long totalMemory, long freeMemory, long totalSwap, - long freeSwap, long totalMcp, long freeMcp, long totalGpuMemory, long freeGpuMemory, int load, - Timestamp bootTime, String os); - - void deleteHost(HostInterface host); - - AllocationInterface getDefaultAllocationDetail(); - - void setAllocation(HostInterface host, AllocationInterface alloc); - - void addTags(HostInterface host, String[] tags); - - void removeTags(HostInterface host, String[] tags); - - void renameTag(HostInterface host, String oldTag, String newTag); - - /** - * Verify that the given proc and frame IDs are assigned to each other in the database. - * - * @param procId - * @param frameId - * @return - */ - boolean verifyRunningProc(String procId, String frameId); - - /** - * Returns a list of VirtualProcs that match the specified criteria. - * - * @param r - * @return a list of VirtualProcs that match the criteria - */ - List findVirtualProcs(ProcSearchInterface r); - - List findVirtualProcs(FrameSearchInterface r); - - VirtualProc findVirtualProc(FrameInterface frame); - - List findVirtualProcs(HardwareState state); - - /** - * Returns a list of booked procs. When a proc is "booked", that means it plans on staying on the - * same job after it completes the current frame. If a proc is unbooked, it aways tries to find - * work to do on another job. - * - * @param r - * @return - */ - List findBookedVirtualProcs(ProcSearchInterface r); - - void unbookVirtualProcs(List procs); - - void unbookProc(ProcInterface proc); - - /** - * Return the Virtual proc with the specified unique ID. - * - * @param id - * @return - */ - VirtualProc getVirtualProc(String id); - - /** - * Return true if the given host is in the Up state. Other states are Down, Rebooting, - * RebootWhenIdle, etc. Only hosts in the Up state should be booked or dispatched. - * - * @param host - * @return - */ - boolean isHostUp(HostInterface host); - - /** - * Return true if the proc is an orphan. An orphan has not had a ping in 5 minutes. - * - * @param proc - * @return - */ - boolean isOprhan(ProcInterface proc); - - /** - * Return the number of stranded cores on the host. - */ - int getStrandedCoreUnits(HostInterface h); - - /** - * Return the number of stranded cores on the host. - */ - int getStrandedGpuUnits(HostInterface h); - - /** - * Return true of the host prefers a particular show. - * - * @param host - * @return - */ - boolean isPreferShow(HostInterface host); - - /** - * Return a host's preferred show. - * - * @param host - * @return - */ - ShowInterface getPreferredShow(HostInterface host); - - /** - * Return all running procs for the given host. - * - * @param host - * @return - */ - List findVirtualProcs(HostInterface host); - - /** - * Return all running procs for the given LocalHostAssignment. - * - * @param l - * @return - */ - List findVirtualProcs(LocalHostAssignment l); - - /** - * Set the hosts available idle cores and memory. - * - * @param host - * @param report - */ - void setHostResources(DispatchHost host, HostReport report); + void rebootWhenIdle(HostInterface host); + + void rebootNow(HostInterface host); + + /** + * Lock/unlock the specified host. + * + * @param host + * @param state + * @param source + */ + void setHostLock(HostInterface host, LockState state, Source source); + + /** + * Updates the state of a host. + * + * @param host HostInterface + * @param state HardwareState + */ + void setHostState(HostInterface host, HardwareState state); + + /** + * Updates the free temporary directory (mcp) of a host. + * + * @param host HostInterface + * @param freeTempDir Long + */ + void setHostFreeTempDir(HostInterface host, Long freeTempDir); + + DispatchHost createHost(HostReport report); + + DispatchHost createHost(RenderHost host); + + /** + * Create a host and move it into the specified allocation. + * + * @param rhost + * @param alloc + * @return + */ + DispatchHost createHost(RenderHost rhost, AllocationEntity alloc); + + HostInterface getHost(String id); + + HostInterface findHost(String name); + + DispatchHost getDispatchHost(String id); + + DispatchHost findDispatchHost(String name); + + HostEntity getHostDetail(HostInterface host); + + HostEntity getHostDetail(String id); + + HostEntity findHostDetail(String name); + + /** + * Returns true of the LockState is not Open. + * + * @param host + * @return + */ + boolean isLocked(HostInterface host); + + /** + * Set all host statistics. + * + * @param host + * @param totalMemory + * @param freeMemory + * @param totalSwap + * @param freeSwap + * @param totalMcp + * @param freeMcp + * @param totalGpuMemory + * @param freeGpuMemory + * @param load + * @param bootTime + * @param os + */ + void setHostStatistics(HostInterface host, long totalMemory, long freeMemory, long totalSwap, + long freeSwap, long totalMcp, long freeMcp, long totalGpuMemory, long freeGpuMemory, + int load, Timestamp bootTime, String os); + + void deleteHost(HostInterface host); + + AllocationInterface getDefaultAllocationDetail(); + + void setAllocation(HostInterface host, AllocationInterface alloc); + + void addTags(HostInterface host, String[] tags); + + void removeTags(HostInterface host, String[] tags); + + void renameTag(HostInterface host, String oldTag, String newTag); + + /** + * Verify that the given proc and frame IDs are assigned to each other in the database. + * + * @param procId + * @param frameId + * @return + */ + boolean verifyRunningProc(String procId, String frameId); + + /** + * Returns a list of VirtualProcs that match the specified criteria. + * + * @param r + * @return a list of VirtualProcs that match the criteria + */ + List findVirtualProcs(ProcSearchInterface r); + + List findVirtualProcs(FrameSearchInterface r); + + VirtualProc findVirtualProc(FrameInterface frame); + + List findVirtualProcs(HardwareState state); + + /** + * Returns a list of booked procs. When a proc is "booked", that means it plans on staying on + * the same job after it completes the current frame. If a proc is unbooked, it aways tries to + * find work to do on another job. + * + * @param r + * @return + */ + List findBookedVirtualProcs(ProcSearchInterface r); + + void unbookVirtualProcs(List procs); + + void unbookProc(ProcInterface proc); + + /** + * Return the Virtual proc with the specified unique ID. + * + * @param id + * @return + */ + VirtualProc getVirtualProc(String id); + + /** + * Return true if the given host is in the Up state. Other states are Down, Rebooting, + * RebootWhenIdle, etc. Only hosts in the Up state should be booked or dispatched. + * + * @param host + * @return + */ + boolean isHostUp(HostInterface host); + + /** + * Return true if the proc is an orphan. An orphan has not had a ping in 5 minutes. + * + * @param proc + * @return + */ + boolean isOprhan(ProcInterface proc); + + /** + * Return the number of stranded cores on the host. + */ + int getStrandedCoreUnits(HostInterface h); + + /** + * Return the number of stranded cores on the host. + */ + int getStrandedGpuUnits(HostInterface h); + + /** + * Return true of the host prefers a particular show. + * + * @param host + * @return + */ + boolean isPreferShow(HostInterface host); + + /** + * Return a host's preferred show. + * + * @param host + * @return + */ + ShowInterface getPreferredShow(HostInterface host); + + /** + * Return all running procs for the given host. + * + * @param host + * @return + */ + List findVirtualProcs(HostInterface host); + + /** + * Return all running procs for the given LocalHostAssignment. + * + * @param l + * @return + */ + List findVirtualProcs(LocalHostAssignment l); + + /** + * Set the hosts available idle cores and memory. + * + * @param host + * @param report + */ + void setHostResources(DispatchHost host, HostReport report); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/HostManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/HostManagerService.java index 8f5ce90d5..1432f7169 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/HostManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/HostManagerService.java @@ -54,357 +54,358 @@ @Transactional public class HostManagerService implements HostManager { - private static final Logger logger = LogManager.getLogger(HostManagerService.class); - - private HostDao hostDao; - private RqdClient rqdClient; - private ProcDao procDao; - private ShowDao showDao; - private FacilityDao facilityDao; - private SubscriptionDao subscriptionDao; - private AllocationDao allocationDao; - - public HostManagerService() {} - - @Override - public void setHostLock(HostInterface host, LockState lock, Source source) { - hostDao.updateHostLock(host, lock, source); - rqdClient.setHostLock(host, lock); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean isLocked(HostInterface host) { - return hostDao.isHostLocked(host); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean isHostUp(HostInterface host) { - return hostDao.isHostUp(host); - } - - @Override - public void setHostState(HostInterface host, HardwareState state) { - hostDao.updateHostState(host, state); - } - - @Override - public void setHostFreeTempDir(HostInterface host, Long freeTempDir) { - hostDao.updateHostFreeTempDir(host, freeTempDir); - } - - public void rebootWhenIdle(HostInterface host) { - try { - hostDao.updateHostState(host, HardwareState.REBOOT_WHEN_IDLE); - rqdClient.rebootWhenIdle(host); - } catch (RqdClientException e) { - logger.info("failed to contact host: " + host.getName() + " for reboot"); - } - } - - public void rebootNow(HostInterface host) { - try { - hostDao.updateHostState(host, HardwareState.REBOOTING); - rqdClient.rebootNow(host); - } catch (RqdClientException e) { - logger.info("failed to contact host: " + host.getName() + " for reboot"); - hostDao.updateHostState(host, HardwareState.DOWN); - } - } - - @Override - public void setHostStatistics(HostInterface host, long totalMemory, long freeMemory, - long totalSwap, long freeSwap, long totalMcp, long freeMcp, long totalGpuMemory, - long freeGpuMemory, int load, Timestamp bootTime, String os) { - - hostDao.updateHostStats(host, totalMemory, freeMemory, totalSwap, freeSwap, totalMcp, freeMcp, - totalGpuMemory, freeGpuMemory, load, bootTime, os); - } - - @Transactional(propagation = Propagation.SUPPORTS, readOnly = true) - public HostInterface findHost(String name) { - return hostDao.findHost(name); - } - - @Transactional(propagation = Propagation.SUPPORTS, readOnly = true) - public HostInterface getHost(String id) { - return hostDao.getHost(id); - } - - @Transactional(propagation = Propagation.REQUIRED) - public DispatchHost createHost(HostReport report) { - return createHost(report.getHost()); - } - - @Transactional(propagation = Propagation.REQUIRED) - public DispatchHost createHost(RenderHost rhost) { - // Find suitable allocation with facility and tags. - AllocationEntity alloc = null; - if (rhost.getTagsCount() > 0) { - String facility = rhost.getFacility(); - for (String tag : rhost.getTagsList()) { + private static final Logger logger = LogManager.getLogger(HostManagerService.class); + + private HostDao hostDao; + private RqdClient rqdClient; + private ProcDao procDao; + private ShowDao showDao; + private FacilityDao facilityDao; + private SubscriptionDao subscriptionDao; + private AllocationDao allocationDao; + + public HostManagerService() {} + + @Override + public void setHostLock(HostInterface host, LockState lock, Source source) { + hostDao.updateHostLock(host, lock, source); + rqdClient.setHostLock(host, lock); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isLocked(HostInterface host) { + return hostDao.isHostLocked(host); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isHostUp(HostInterface host) { + return hostDao.isHostUp(host); + } + + @Override + public void setHostState(HostInterface host, HardwareState state) { + hostDao.updateHostState(host, state); + } + + @Override + public void setHostFreeTempDir(HostInterface host, Long freeTempDir) { + hostDao.updateHostFreeTempDir(host, freeTempDir); + } + + public void rebootWhenIdle(HostInterface host) { + try { + hostDao.updateHostState(host, HardwareState.REBOOT_WHEN_IDLE); + rqdClient.rebootWhenIdle(host); + } catch (RqdClientException e) { + logger.info("failed to contact host: " + host.getName() + " for reboot"); + } + } + + public void rebootNow(HostInterface host) { try { - alloc = allocationDao.findAllocationEntity(facility, tag); - logger.info("set " + rhost.getName() + " to the given allocation " + alloc.getName()); - break; - } catch (EmptyResultDataAccessException e) { - // Allocation doesn't exist. ignore. + hostDao.updateHostState(host, HardwareState.REBOOTING); + rqdClient.rebootNow(host); + } catch (RqdClientException e) { + logger.info("failed to contact host: " + host.getName() + " for reboot"); + hostDao.updateHostState(host, HardwareState.DOWN); + } + } + + @Override + public void setHostStatistics(HostInterface host, long totalMemory, long freeMemory, + long totalSwap, long freeSwap, long totalMcp, long freeMcp, long totalGpuMemory, + long freeGpuMemory, int load, Timestamp bootTime, String os) { + + hostDao.updateHostStats(host, totalMemory, freeMemory, totalSwap, freeSwap, totalMcp, + freeMcp, totalGpuMemory, freeGpuMemory, load, bootTime, os); + } + + @Transactional(propagation = Propagation.SUPPORTS, readOnly = true) + public HostInterface findHost(String name) { + return hostDao.findHost(name); + } + + @Transactional(propagation = Propagation.SUPPORTS, readOnly = true) + public HostInterface getHost(String id) { + return hostDao.getHost(id); + } + + @Transactional(propagation = Propagation.REQUIRED) + public DispatchHost createHost(HostReport report) { + return createHost(report.getHost()); + } + + @Transactional(propagation = Propagation.REQUIRED) + public DispatchHost createHost(RenderHost rhost) { + // Find suitable allocation with facility and tags. + AllocationEntity alloc = null; + if (rhost.getTagsCount() > 0) { + String facility = rhost.getFacility(); + for (String tag : rhost.getTagsList()) { + try { + alloc = allocationDao.findAllocationEntity(facility, tag); + logger.info("set " + rhost.getName() + " to the given allocation " + + alloc.getName()); + break; + } catch (EmptyResultDataAccessException e) { + // Allocation doesn't exist. ignore. + } + } + } + if (alloc == null) { + alloc = getDefaultAllocationDetail(); + logger.info("set " + rhost.getName() + " to the default allocation " + alloc.getName()); + } + return createHost(rhost, alloc); + } + + @Transactional(propagation = Propagation.REQUIRED) + public DispatchHost createHost(RenderHost rhost, AllocationEntity alloc) { + + hostDao.insertRenderHost(rhost, alloc, false); + DispatchHost host = hostDao.findDispatchHost(rhost.getName()); + + hostDao.tagHost(host, alloc.tag, HostTagType.ALLOC); + hostDao.tagHost(host, host.name, HostTagType.HOSTNAME); + + if (rhost.getTagsCount() > 0) { + for (String tag : rhost.getTagsList()) { + hostDao.tagHost(host, tag, HostTagType.MANUAL); + } + } + + // Don't tag anything with hardware yet, we don't watch new procs + // that report in to automatically start running frames. + + hostDao.recalcuateTags(host.id); + return host; + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public DispatchHost findDispatchHost(String name) { + return hostDao.findDispatchHost(name); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public HostEntity findHostDetail(String name) { + return hostDao.findHostDetail(name); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public DispatchHost getDispatchHost(String id) { + return hostDao.getDispatchHost(id); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public HostEntity getHostDetail(HostInterface host) { + return hostDao.getHostDetail(host); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public HostEntity getHostDetail(String id) { + return hostDao.getHostDetail(id); + } + + @Transactional(propagation = Propagation.SUPPORTS) + public AllocationEntity getDefaultAllocationDetail() { + return allocationDao.getDefaultAllocationEntity(); + } + + public void addTags(HostInterface host, String[] tags) { + for (String tag : tags) { + if (tag == null) { + continue; + } + if (tag.length() == 0) { + continue; + } + hostDao.tagHost(host, tag, HostTagType.MANUAL); + } + hostDao.recalcuateTags(host.getHostId()); + } + + public void removeTags(HostInterface host, String[] tags) { + for (String tag : tags) { + hostDao.removeTag(host, tag); + } + hostDao.recalcuateTags(host.getHostId()); + } + + public void renameTag(HostInterface host, String oldTag, String newTag) { + hostDao.renameTag(host, oldTag, newTag); + hostDao.recalcuateTags(host.getHostId()); + } + + public void setAllocation(HostInterface host, AllocationInterface alloc) { + + if (procDao.findVirtualProcs(host).size() > 0) { + throw new EntityModificationError( + "You cannot move hosts with " + "running procs between allocations."); + } + + hostDao.lockForUpdate(host); + hostDao.updateHostSetAllocation(host, alloc); + hostDao.recalcuateTags(host.getHostId()); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public int getStrandedCoreUnits(HostInterface h) { + return hostDao.getStrandedCoreUnits(h); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public int getStrandedGpuUnits(HostInterface h) { + return hostDao.getStrandedGpus(h); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean verifyRunningProc(String procId, String frameId) { + return procDao.verifyRunningProc(procId, frameId); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List findVirtualProcs(FrameSearchInterface request) { + return procDao.findVirtualProcs(request); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public VirtualProc findVirtualProc(FrameInterface frame) { + return procDao.findVirtualProc(frame); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List findVirtualProcs(HardwareState state) { + return procDao.findVirtualProcs(state); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List findVirtualProcs(LocalHostAssignment l) { + return procDao.findVirtualProcs(l); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List findVirtualProcs(ProcSearchInterface r) { + return procDao.findVirtualProcs(r); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List findVirtualProcs(HostInterface host) { + return procDao.findVirtualProcs(host); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List findBookedVirtualProcs(ProcSearchInterface r) { + return procDao.findBookedVirtualProcs(r); + } + + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void unbookVirtualProcs(List procs) { + for (VirtualProc proc : procs) { + unbookProc(proc); } - } } - if (alloc == null) { - alloc = getDefaultAllocationDetail(); - logger.info("set " + rhost.getName() + " to the default allocation " + alloc.getName()); + + @Transactional(propagation = Propagation.REQUIRED) + public void unbookProc(ProcInterface proc) { + procDao.unbookProc(proc); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void setHostResources(DispatchHost host, HostReport report) { + hostDao.updateHostResources(host, report); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public VirtualProc getVirtualProc(String id) { + return procDao.getVirtualProc(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isOprhan(ProcInterface proc) { + return procDao.isOrphan(proc); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isPreferShow(HostInterface host) { + return hostDao.isPreferShow(host); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public ShowInterface getPreferredShow(HostInterface host) { + return showDao.getShowDetail(host); + } + + public void deleteHost(HostInterface host) { + hostDao.deleteHost(host); + } + + public AllocationDao getAllocationDao() { + return allocationDao; + } + + public void setAllocationDao(AllocationDao allocationDao) { + this.allocationDao = allocationDao; + } + + public HostDao getHostDao() { + return hostDao; + } + + public void setHostDao(HostDao hostDao) { + this.hostDao = hostDao; + } + + public ProcDao getProcDao() { + return procDao; + } + + public void setProcDao(ProcDao procDao) { + this.procDao = procDao; + } + + public RqdClient getRqdClient() { + return rqdClient; + } + + public void setRqdClient(RqdClient rqdClient) { + this.rqdClient = rqdClient; + } + + public FacilityDao getFacilityDao() { + return facilityDao; + } + + public void setFacilityDao(FacilityDao facilityDao) { + this.facilityDao = facilityDao; + } + + public ShowDao getShowDao() { + return showDao; + } + + public void setShowDao(ShowDao showDao) { + this.showDao = showDao; + } + + public SubscriptionDao getSubscriptionDao() { + return subscriptionDao; + } + + public void setSubscriptionDao(SubscriptionDao subscriptionDao) { + this.subscriptionDao = subscriptionDao; } - return createHost(rhost, alloc); - } - - @Transactional(propagation = Propagation.REQUIRED) - public DispatchHost createHost(RenderHost rhost, AllocationEntity alloc) { - - hostDao.insertRenderHost(rhost, alloc, false); - DispatchHost host = hostDao.findDispatchHost(rhost.getName()); - - hostDao.tagHost(host, alloc.tag, HostTagType.ALLOC); - hostDao.tagHost(host, host.name, HostTagType.HOSTNAME); - - if (rhost.getTagsCount() > 0) { - for (String tag : rhost.getTagsList()) { - hostDao.tagHost(host, tag, HostTagType.MANUAL); - } - } - - // Don't tag anything with hardware yet, we don't watch new procs - // that report in to automatically start running frames. - - hostDao.recalcuateTags(host.id); - return host; - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public DispatchHost findDispatchHost(String name) { - return hostDao.findDispatchHost(name); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public HostEntity findHostDetail(String name) { - return hostDao.findHostDetail(name); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public DispatchHost getDispatchHost(String id) { - return hostDao.getDispatchHost(id); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public HostEntity getHostDetail(HostInterface host) { - return hostDao.getHostDetail(host); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public HostEntity getHostDetail(String id) { - return hostDao.getHostDetail(id); - } - - @Transactional(propagation = Propagation.SUPPORTS) - public AllocationEntity getDefaultAllocationDetail() { - return allocationDao.getDefaultAllocationEntity(); - } - - public void addTags(HostInterface host, String[] tags) { - for (String tag : tags) { - if (tag == null) { - continue; - } - if (tag.length() == 0) { - continue; - } - hostDao.tagHost(host, tag, HostTagType.MANUAL); - } - hostDao.recalcuateTags(host.getHostId()); - } - - public void removeTags(HostInterface host, String[] tags) { - for (String tag : tags) { - hostDao.removeTag(host, tag); - } - hostDao.recalcuateTags(host.getHostId()); - } - - public void renameTag(HostInterface host, String oldTag, String newTag) { - hostDao.renameTag(host, oldTag, newTag); - hostDao.recalcuateTags(host.getHostId()); - } - - public void setAllocation(HostInterface host, AllocationInterface alloc) { - - if (procDao.findVirtualProcs(host).size() > 0) { - throw new EntityModificationError( - "You cannot move hosts with " + "running procs between allocations."); - } - - hostDao.lockForUpdate(host); - hostDao.updateHostSetAllocation(host, alloc); - hostDao.recalcuateTags(host.getHostId()); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public int getStrandedCoreUnits(HostInterface h) { - return hostDao.getStrandedCoreUnits(h); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public int getStrandedGpuUnits(HostInterface h) { - return hostDao.getStrandedGpus(h); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean verifyRunningProc(String procId, String frameId) { - return procDao.verifyRunningProc(procId, frameId); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List findVirtualProcs(FrameSearchInterface request) { - return procDao.findVirtualProcs(request); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public VirtualProc findVirtualProc(FrameInterface frame) { - return procDao.findVirtualProc(frame); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List findVirtualProcs(HardwareState state) { - return procDao.findVirtualProcs(state); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List findVirtualProcs(LocalHostAssignment l) { - return procDao.findVirtualProcs(l); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List findVirtualProcs(ProcSearchInterface r) { - return procDao.findVirtualProcs(r); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List findVirtualProcs(HostInterface host) { - return procDao.findVirtualProcs(host); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List findBookedVirtualProcs(ProcSearchInterface r) { - return procDao.findBookedVirtualProcs(r); - } - - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void unbookVirtualProcs(List procs) { - for (VirtualProc proc : procs) { - unbookProc(proc); - } - } - - @Transactional(propagation = Propagation.REQUIRED) - public void unbookProc(ProcInterface proc) { - procDao.unbookProc(proc); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void setHostResources(DispatchHost host, HostReport report) { - hostDao.updateHostResources(host, report); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public VirtualProc getVirtualProc(String id) { - return procDao.getVirtualProc(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean isOprhan(ProcInterface proc) { - return procDao.isOrphan(proc); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean isPreferShow(HostInterface host) { - return hostDao.isPreferShow(host); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public ShowInterface getPreferredShow(HostInterface host) { - return showDao.getShowDetail(host); - } - - public void deleteHost(HostInterface host) { - hostDao.deleteHost(host); - } - - public AllocationDao getAllocationDao() { - return allocationDao; - } - - public void setAllocationDao(AllocationDao allocationDao) { - this.allocationDao = allocationDao; - } - - public HostDao getHostDao() { - return hostDao; - } - - public void setHostDao(HostDao hostDao) { - this.hostDao = hostDao; - } - - public ProcDao getProcDao() { - return procDao; - } - - public void setProcDao(ProcDao procDao) { - this.procDao = procDao; - } - - public RqdClient getRqdClient() { - return rqdClient; - } - - public void setRqdClient(RqdClient rqdClient) { - this.rqdClient = rqdClient; - } - - public FacilityDao getFacilityDao() { - return facilityDao; - } - - public void setFacilityDao(FacilityDao facilityDao) { - this.facilityDao = facilityDao; - } - - public ShowDao getShowDao() { - return showDao; - } - - public void setShowDao(ShowDao showDao) { - this.showDao = showDao; - } - - public SubscriptionDao getSubscriptionDao() { - return subscriptionDao; - } - - public void setSubscriptionDao(SubscriptionDao subscriptionDao) { - this.subscriptionDao = subscriptionDao; - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/JmsMover.java b/cuebot/src/main/java/com/imageworks/spcue/service/JmsMover.java index 1befa2aed..4bbe33edf 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/JmsMover.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/JmsMover.java @@ -36,64 +36,66 @@ import com.imageworks.spcue.util.CueExceptionUtil; public class JmsMover extends ThreadPoolExecutor { - private static final Logger logger = LogManager.getLogger(JmsMover.class); - private final Gson gson = new GsonBuilder().serializeNulls().create(); + private static final Logger logger = LogManager.getLogger(JmsMover.class); + private final Gson gson = new GsonBuilder().serializeNulls().create(); - @Autowired - private Environment env; - private JmsTemplate template; - private Topic topic; + @Autowired + private Environment env; + private JmsTemplate template; + private Topic topic; - private static final int THREAD_POOL_SIZE_INITIAL = 1; - private static final int THREAD_POOL_SIZE_MAX = 1; - private static final int QUEUE_SIZE_INITIAL = 1000; + private static final int THREAD_POOL_SIZE_INITIAL = 1; + private static final int THREAD_POOL_SIZE_MAX = 1; + private static final int QUEUE_SIZE_INITIAL = 1000; - public JmsMover() { - super(THREAD_POOL_SIZE_INITIAL, THREAD_POOL_SIZE_MAX, 10, TimeUnit.SECONDS, - new LinkedBlockingQueue(QUEUE_SIZE_INITIAL)); - } + public JmsMover() { + super(THREAD_POOL_SIZE_INITIAL, THREAD_POOL_SIZE_MAX, 10, TimeUnit.SECONDS, + new LinkedBlockingQueue(QUEUE_SIZE_INITIAL)); + } - public void send(Object m) { - if (env.getRequiredProperty("messaging.enabled", Boolean.class)) { - try { - execute(new Runnable() { - @Override - public void run() { + public void send(Object m) { + if (env.getRequiredProperty("messaging.enabled", Boolean.class)) { try { - template.send(topic, new MessageCreator() { - @Override - public Message createMessage(Session session) throws javax.jms.JMSException { - return session.createTextMessage(gson.toJson(m)); - } - }); - } catch (JmsException e) { - logger.warn("Failed to send JMS message"); - CueExceptionUtil - .logStackTrace("JmsProducer " + this.getClass().toString() + " caught error ", e); + execute(new Runnable() { + @Override + public void run() { + try { + template.send(topic, new MessageCreator() { + @Override + public Message createMessage(Session session) + throws javax.jms.JMSException { + return session.createTextMessage(gson.toJson(m)); + } + }); + } catch (JmsException e) { + logger.warn("Failed to send JMS message"); + CueExceptionUtil.logStackTrace( + "JmsProducer " + this.getClass().toString() + " caught error ", + e); + } + } + }); + } catch (RejectedExecutionException e) { + logger.warn("Outgoing JMS message queue is full!"); + CueExceptionUtil.logStackTrace( + "JmsProducer " + this.getClass().toString() + " caught error ", e); } - } - }); - } catch (RejectedExecutionException e) { - logger.warn("Outgoing JMS message queue is full!"); - CueExceptionUtil - .logStackTrace("JmsProducer " + this.getClass().toString() + " caught error ", e); - } + } } - } - public JmsTemplate getTemplate() { - return template; - } + public JmsTemplate getTemplate() { + return template; + } - public void setTemplate(JmsTemplate template) { - this.template = template; - } + public void setTemplate(JmsTemplate template) { + this.template = template; + } - public Topic getTopic() { - return topic; - } + public Topic getTopic() { + return topic; + } - public void setTopic(Topic topic) { - this.topic = topic; - } + public void setTopic(Topic topic) { + this.topic = topic; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/JobLauncher.java b/cuebot/src/main/java/com/imageworks/spcue/service/JobLauncher.java index 7b2f09016..ee017b167 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/JobLauncher.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/JobLauncher.java @@ -40,188 +40,189 @@ * Job launching functions. */ public class JobLauncher implements ApplicationContextAware { - private static final Logger logger = LogManager.getLogger(JobLauncher.class); - private ApplicationContext context; - - private JobManager jobManager; - private DepartmentManager departmentManager; - private AdminManager adminManager; - private ThreadPoolTaskExecutor launchQueue; - private EmailSupport emailSupport; - private JmsMover jmsMover; - private LocalBookingSupport localBookingSupport; - - /** - * When true, disables log path creation and proc points sync. - */ - public volatile boolean testMode = false; - - @Override - public void setApplicationContext(ApplicationContext context) throws BeansException { - this.context = context; - } - - public JobSpec parse(String xml) { - JobSpec spec = (JobSpec) this.context.getBean("jobSpec"); - spec.parse(xml); - return spec; - } - - public JobSpec parse(File file) { - JobSpec spec = (JobSpec) this.context.getBean("jobSpec"); - spec.parse(file); - return spec; - } - - public void launch(String xml) { - JobSpec spec = (JobSpec) this.context.getBean("jobSpec"); - spec.parse(xml); - launch(spec); - } - - public void launch(File file) { - JobSpec spec = (JobSpec) this.context.getBean("jobSpec"); - spec.parse(file); - launch(spec); - } - - public void launch(final JobSpec spec) { - - verifyJobSpec(spec); - - try { - jobManager.launchJobSpec(spec); - - for (BuildableJob job : spec.getJobs()) { - /* - * If isLocal is set, need to create local host assignment. - */ - JobDetail d = job.detail; - if (d.isLocal) { - logger.info( - d.localHostName + " will do local dispatch. " + d.getJobId() + " " + d.localHostName); - LocalHostAssignment lha = new LocalHostAssignment(); - lha.setJobId(d.getJobId()); - lha.setThreads(d.localThreadNumber); - lha.setMaxCoreUnits(d.localMaxCores * 100); - lha.setMaxMemory(d.localMaxMemory); - lha.setMaxGpuUnits(d.localMaxGpus); - lha.setMaxGpuMemory(d.localMaxGpuMemory); - lha.setType(RenderPartitionType.JOB_PARTITION); - - try { - localBookingSupport.bookLocal(d, d.localHostName, d.user, lha); - } catch (DataIntegrityViolationException e) { - logger.info(d.name + " failed to create host local assignment."); - } + private static final Logger logger = LogManager.getLogger(JobLauncher.class); + private ApplicationContext context; + + private JobManager jobManager; + private DepartmentManager departmentManager; + private AdminManager adminManager; + private ThreadPoolTaskExecutor launchQueue; + private EmailSupport emailSupport; + private JmsMover jmsMover; + private LocalBookingSupport localBookingSupport; + + /** + * When true, disables log path creation and proc points sync. + */ + public volatile boolean testMode = false; + + @Override + public void setApplicationContext(ApplicationContext context) throws BeansException { + this.context = context; + } + + public JobSpec parse(String xml) { + JobSpec spec = (JobSpec) this.context.getBean("jobSpec"); + spec.parse(xml); + return spec; + } + + public JobSpec parse(File file) { + JobSpec spec = (JobSpec) this.context.getBean("jobSpec"); + spec.parse(file); + return spec; + } + + public void launch(String xml) { + JobSpec spec = (JobSpec) this.context.getBean("jobSpec"); + spec.parse(xml); + launch(spec); + } + + public void launch(File file) { + JobSpec spec = (JobSpec) this.context.getBean("jobSpec"); + spec.parse(file); + launch(spec); + } + + public void launch(final JobSpec spec) { + + verifyJobSpec(spec); + + try { + jobManager.launchJobSpec(spec); + + for (BuildableJob job : spec.getJobs()) { + /* + * If isLocal is set, need to create local host assignment. + */ + JobDetail d = job.detail; + if (d.isLocal) { + logger.info(d.localHostName + " will do local dispatch. " + d.getJobId() + " " + + d.localHostName); + LocalHostAssignment lha = new LocalHostAssignment(); + lha.setJobId(d.getJobId()); + lha.setThreads(d.localThreadNumber); + lha.setMaxCoreUnits(d.localMaxCores * 100); + lha.setMaxMemory(d.localMaxMemory); + lha.setMaxGpuUnits(d.localMaxGpus); + lha.setMaxGpuMemory(d.localMaxGpuMemory); + lha.setType(RenderPartitionType.JOB_PARTITION); + + try { + localBookingSupport.bookLocal(d, d.localHostName, d.user, lha); + } catch (DataIntegrityViolationException e) { + logger.info(d.name + " failed to create host local assignment."); + } + } + } + + /* + * This has to happen outside of the job launching transaction or else it can lock up + * booking because it updates the job_resource table. It can take quite some time to + * launch a job with dependencies, so the transaction should not touch any rows that are + * currently in the "live" data set. + */ + if (!testMode) { + Set depts = new HashSet(); + for (BuildableJob job : spec.getJobs()) { + JobDetail d = jobManager.getJobDetail(job.detail.id); + jmsMover.send(d); + if (departmentManager.isManaged(d)) { + if (!depts.contains(d.deptId)) { + departmentManager.syncJobsWithTask(d); + depts.add(d.deptId); + } + } + } + } + } catch (Exception e) { + // Catch anything and email the user a report as to + // why the job launch failed. + emailSupport.reportLaunchError(spec, e); } - } - - /* - * This has to happen outside of the job launching transaction or else it can lock up booking - * because it updates the job_resource table. It can take quite some time to launch a job with - * dependencies, so the transaction should not touch any rows that are currently in the "live" - * data set. - */ - if (!testMode) { - Set depts = new HashSet(); + } + + public void verifyJobSpec(JobSpec spec) { + for (BuildableJob job : spec.getJobs()) { - JobDetail d = jobManager.getJobDetail(job.detail.id); - jmsMover.send(d); - if (departmentManager.isManaged(d)) { - if (!depts.contains(d.deptId)) { - departmentManager.syncJobsWithTask(d); - depts.add(d.deptId); + if (jobManager.isJobPending(job.detail.name)) { + throw new EntityCreationError("The job " + job.detail.name + " is already pending"); + } + } + + try { + ShowEntity s = adminManager.findShowEntity(spec.getShow()); + if (!s.active) { + throw new EntityCreationError("The " + spec.getShow() + + " show has been deactivated. Please contact " + + "administrator of your OpenCue deployment to reactivate " + "this show."); } - } + } catch (EmptyResultDataAccessException e) { + throw new EntityCreationError("The " + spec.getShow() + + " does not exist. Please contact " + + "administrator of your OpenCue deployment to have this show " + "created."); } - } - } catch (Exception e) { - // Catch anything and email the user a report as to - // why the job launch failed. - emailSupport.reportLaunchError(spec, e); - } - } - - public void verifyJobSpec(JobSpec spec) { - - for (BuildableJob job : spec.getJobs()) { - if (jobManager.isJobPending(job.detail.name)) { - throw new EntityCreationError("The job " + job.detail.name + " is already pending"); - } - } - - try { - ShowEntity s = adminManager.findShowEntity(spec.getShow()); - if (!s.active) { - throw new EntityCreationError( - "The " + spec.getShow() + " show has been deactivated. Please contact " - + "administrator of your OpenCue deployment to reactivate " + "this show."); - } - } catch (EmptyResultDataAccessException e) { - throw new EntityCreationError("The " + spec.getShow() + " does not exist. Please contact " - + "administrator of your OpenCue deployment to have this show " + "created."); - } - } - - public void queueAndLaunch(final JobSpec spec) { - verifyJobSpec(spec); - launchQueue.execute(new DispatchLaunchJob(spec, this)); - } - - public EmailSupport getEmailSupport() { - return emailSupport; - } - - public void setEmailSupport(EmailSupport emailSupport) { - this.emailSupport = emailSupport; - } - - public JobManager getJobManager() { - return jobManager; - } - - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } - - public DepartmentManager getDepartmentManager() { - return departmentManager; - } - - public void setDepartmentManager(DepartmentManager departmentManager) { - this.departmentManager = departmentManager; - } - - public AdminManager getAdminManager() { - return adminManager; - } - - public void setAdminManager(AdminManager adminManager) { - this.adminManager = adminManager; - } - - public ThreadPoolTaskExecutor getLaunchQueue() { - return launchQueue; - } - - public void setLaunchQueue(ThreadPoolTaskExecutor launchQueue) { - this.launchQueue = launchQueue; - } - - public JmsMover getJmsMover() { - return jmsMover; - } - - public void setJmsMover(JmsMover jmsMover) { - this.jmsMover = jmsMover; - } - - public LocalBookingSupport getLocalBookingSupport() { - return localBookingSupport; - } - - public void setLocalBookingSupport(LocalBookingSupport localBookingSupport) { - this.localBookingSupport = localBookingSupport; - } + } + + public void queueAndLaunch(final JobSpec spec) { + verifyJobSpec(spec); + launchQueue.execute(new DispatchLaunchJob(spec, this)); + } + + public EmailSupport getEmailSupport() { + return emailSupport; + } + + public void setEmailSupport(EmailSupport emailSupport) { + this.emailSupport = emailSupport; + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public DepartmentManager getDepartmentManager() { + return departmentManager; + } + + public void setDepartmentManager(DepartmentManager departmentManager) { + this.departmentManager = departmentManager; + } + + public AdminManager getAdminManager() { + return adminManager; + } + + public void setAdminManager(AdminManager adminManager) { + this.adminManager = adminManager; + } + + public ThreadPoolTaskExecutor getLaunchQueue() { + return launchQueue; + } + + public void setLaunchQueue(ThreadPoolTaskExecutor launchQueue) { + this.launchQueue = launchQueue; + } + + public JmsMover getJmsMover() { + return jmsMover; + } + + public void setJmsMover(JmsMover jmsMover) { + this.jmsMover = jmsMover; + } + + public LocalBookingSupport getLocalBookingSupport() { + return localBookingSupport; + } + + public void setLocalBookingSupport(LocalBookingSupport localBookingSupport) { + this.localBookingSupport = localBookingSupport; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/JobManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/JobManager.java index 077a08638..4641b8e82 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/JobManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/JobManager.java @@ -43,451 +43,453 @@ */ public interface JobManager { - /** - * Pause/unpause a job - * - * @param job - * @param paused - */ - void setJobPaused(JobInterface job, boolean paused); - - /** - * - * @param id - * @return - */ - public DispatchJob getDispatchJob(String id); - - /** - * - * @param id - * @return - */ - public DispatchFrame getDispatchFrame(String id); - - /** - * Returns true if there is a pending job with the specifed name on the cue. - * - * @param name - * @return - */ - boolean isJobPending(String name); - - /** - * Returns true if the job has no more frames that can possibly be dispatched. - * - * @return - */ - boolean isJobComplete(JobInterface job); - - /** - * Returns true if the layer is complete. - * - * @param layer - * @return - */ - boolean isLayerComplete(LayerInterface layer); - - /** - * Launches a job spec. - * - * @param spec - */ - void launchJobSpec(JobSpec spec); - - /** - * Creates a new job entry - * - * @param BuildableJob job - * @return JobDetail - */ - JobDetail createJob(BuildableJob job); - - /** - * Removes an existing job entry. The job must be in the Finished state before it can be removed. - * - * @param JobDetail job - */ - void removeJob(JobInterface job); - - /** - * Shutting down a job will signal RQD to kill all frames and drop all dependencies for specified - * job. Job is put into Shutdown state which should be commited before any other operations are - * done on the job. When shutdown is complete, the job shoud be marked Finished. - * - * @param JobDetail job - */ - boolean shutdownJob(JobInterface job); - - /** - * Finds and active job by name. - * - * @param String name - * @return JobDetail - */ - JobDetail findJobDetail(String name); - - /** - * Finds and active job by name. - * - * @param String name - * @return JobDetail - */ - JobInterface findJob(String name); - - /** - * Gets an active job by ID. - * - * @param String id - * @return JobDetail - */ - JobDetail getJobDetail(String id); - - /** - * Gets a job by unique id - * - * @param id - * @return - */ - JobInterface getJob(String id); - - /** - * - * @param id - * @return LayerDetail - */ - LayerDetail getLayerDetail(String id); - - /** - * Return a layer by its unique ID. - * - * @param id - * @return LayerDetail - */ - LayerInterface getLayer(String id); - - /** - * - * @param id - * @return FrameDetail - */ - FrameDetail getFrameDetail(String id); - - /** - * Return a frame with the given ID. - * - * @param id - * @return - */ - FrameInterface getFrame(String id); - - /** - * Marks a specific frame as waiting, setting its dependency count to 0 in the process even though - * it has active dependencies. - * - * @param frame - */ - public void markFrameAsWaiting(FrameInterface frame); - - /** - * Marks a specific frame as Depend if the frame has active dependencies. This will pretty much - * undo a markFrameAsWaiting. If the frame has no active depends this call should have no effect. - * - * @param frame - */ - public void markFrameAsDepend(FrameInterface frame); - - /** - * Return the result of the given FrameSearch. - * - * @param job - * @param r - * @return - */ - public List findFrames(FrameSearchInterface r); - - /** - * Updates specified frame to new state. - * - * @param frame - * @param state - */ - public void updateFrameState(FrameInterface frame, FrameState state); - - /** - * Updates specified job to new state. - * - * @param job - * @param state - */ - public void updateJobState(JobInterface job, JobState state); - - /** - * Reorders the specified layer. - * - * @param job - * @param frameSet - */ - public void reorderLayer(LayerInterface layer, FrameSet frameSet, Order order); - - /** - * - * @param layer - * @param frameSet - */ - public void staggerLayer(LayerInterface layer, String range, int stagger); - - /** - * Returns all of the layers for the specified job - * - * @param job - * @return - */ - public List getLayers(JobInterface job); - - /** - * Returns all of the layers for the specified job - * - * @param job - * @return - */ - public List getLayerDetails(JobInterface job); - - /** - * Creates the job log directory. The JobDetail object must have the logDir property populated. - * - * @param newJob - */ - public void createJobLogDirectory(JobDetail newJob); - - /** - * Optimizes layer settings based on the specified maxRss and run time. - * - * @param layer - * @param maxRss - * @param runTime - */ - void optimizeLayer(LayerInterface layer, int cores, long maxRss, int runTime); - - /** - * Return true if the given job is booked greater than min cores. - * - * @param job - * @return - */ - boolean isOverMinCores(JobInterface job); - - /** - * Increase the layer memory requirement to given KB value. - * - * @param layer - * @param memKb - */ - void increaseLayerMemoryRequirement(LayerInterface layer, long memKb); - - /** - * Appends a tag to a layer's existing tags. - * - * @param layer - * @param tag - */ - void appendLayerTag(LayerInterface layer, String tag); - - /** - * Replace all existing tags with the specified tag. - * - * @param layer - * @param tag - */ - void setLayerTag(LayerInterface layer, String tag); - - /** - * Return true if the given layer is threadable. - * - * @param layer - * @return - */ - boolean isLayerThreadable(LayerInterface layer); - - /** - * Enable or disable the layer memory optimizer. - */ - void enableMemoryOptimizer(LayerInterface layer, boolean state); - - /** - * Return the frame for the given layer and frame number. - * - * @param layer - * @param number - * @return - */ - FrameInterface findFrame(LayerInterface layer, int number); - - /** - * - * @param job - * @return - */ - FrameDetail findLongestFrame(JobInterface job); - - /** - * - * @param job - * @return - */ - FrameDetail findShortestFrame(JobInterface job); - - /** - * - * @param job - * @return - */ - FrameStateTotals getFrameStateTotals(JobInterface job); - - /** - * - * @param job - * @return - */ - ExecutionSummary getExecutionSummary(JobInterface job); - - /** - * - * @param job - * @return - */ - FrameDetail findHighestMemoryFrame(JobInterface job); - - /** - * - * @param job - * @return - */ - FrameDetail findLowestMemoryFrame(JobInterface job); - - /** - * Return the frame state totals by layer. - * - * @param layer - * @return - */ - FrameStateTotals getFrameStateTotals(LayerInterface layer); - - /** - * Return the execution summary by layer. - * - * @param layer - * @return - */ - ExecutionSummary getExecutionSummary(LayerInterface layer); - - /** - * Update the checkpoint state for the given frame. - * - * @param frame - * @param state - */ - void updateCheckpointState(FrameInterface frame, CheckpointState state); - - /** - * Return a list of frames that failed to checkpoint within the given checkpoint point. - * - * @param cutoffTimeMs - * @return - */ - List getStaleCheckpoints(int cutoffTimeSec); - - /** - * Return a list of registered layer outputs. - * - * @param layer - * @return - */ - List getLayerOutputs(LayerInterface layer); - - /** - * Register layer output. - * - * @param layer - * @return - */ - void registerLayerOutput(LayerInterface layer, String filespec); - - /** - * Return thread stats for the given layer. - * - * @param layer - * @return - */ - List getThreadStats(LayerInterface layer); - - /** - * Update the max core value for the given layer. - * - * @param layer - * @param coreUnits - */ - void setLayerMaxCores(LayerInterface layer, int coreUnits); - - /** - * Update the min core value for the given layer. - * - * @param layer - * @param coreUnits - */ - void setLayerMinCores(LayerInterface layer, int coreUnits); - - /** - * Update the max gpu value for the given layer. - * - * @param layer - * @param gpuUnits - */ - void setLayerMaxGpus(LayerInterface layer, int gpuUnits); - - /** - * Update the min gpu value for the given layer. - * - * @param layer - * @param gpuUnits - */ - void setLayerMinGpus(LayerInterface layer, int gpuUnits); - - /** - * Add a limit to the given layer. - * - * @param layer - * @param limitId - */ - void addLayerLimit(LayerInterface layer, String limitId); - - /** - * Remove a limit from the given layer. - * - * @param layer - * @param limitId - */ - void dropLayerLimit(LayerInterface layer, String limitId); - - /** - * Return a list of limits for the given layer. - * - * @param layer - */ - List getLayerLimits(LayerInterface layer); - - /** - * Update email(s) of subscribers for job - * - * @param job - * @param email - */ - void updateEmail(JobInterface job, String email); - - /** - * Return a list of limits for the given layer. - * - * @param job - */ - String getEmail(JobInterface job); + /** + * Pause/unpause a job + * + * @param job + * @param paused + */ + void setJobPaused(JobInterface job, boolean paused); + + /** + * + * @param id + * @return + */ + public DispatchJob getDispatchJob(String id); + + /** + * + * @param id + * @return + */ + public DispatchFrame getDispatchFrame(String id); + + /** + * Returns true if there is a pending job with the specifed name on the cue. + * + * @param name + * @return + */ + boolean isJobPending(String name); + + /** + * Returns true if the job has no more frames that can possibly be dispatched. + * + * @return + */ + boolean isJobComplete(JobInterface job); + + /** + * Returns true if the layer is complete. + * + * @param layer + * @return + */ + boolean isLayerComplete(LayerInterface layer); + + /** + * Launches a job spec. + * + * @param spec + */ + void launchJobSpec(JobSpec spec); + + /** + * Creates a new job entry + * + * @param BuildableJob job + * @return JobDetail + */ + JobDetail createJob(BuildableJob job); + + /** + * Removes an existing job entry. The job must be in the Finished state before it can be + * removed. + * + * @param JobDetail job + */ + void removeJob(JobInterface job); + + /** + * Shutting down a job will signal RQD to kill all frames and drop all dependencies for + * specified job. Job is put into Shutdown state which should be commited before any other + * operations are done on the job. When shutdown is complete, the job shoud be marked Finished. + * + * @param JobDetail job + */ + boolean shutdownJob(JobInterface job); + + /** + * Finds and active job by name. + * + * @param String name + * @return JobDetail + */ + JobDetail findJobDetail(String name); + + /** + * Finds and active job by name. + * + * @param String name + * @return JobDetail + */ + JobInterface findJob(String name); + + /** + * Gets an active job by ID. + * + * @param String id + * @return JobDetail + */ + JobDetail getJobDetail(String id); + + /** + * Gets a job by unique id + * + * @param id + * @return + */ + JobInterface getJob(String id); + + /** + * + * @param id + * @return LayerDetail + */ + LayerDetail getLayerDetail(String id); + + /** + * Return a layer by its unique ID. + * + * @param id + * @return LayerDetail + */ + LayerInterface getLayer(String id); + + /** + * + * @param id + * @return FrameDetail + */ + FrameDetail getFrameDetail(String id); + + /** + * Return a frame with the given ID. + * + * @param id + * @return + */ + FrameInterface getFrame(String id); + + /** + * Marks a specific frame as waiting, setting its dependency count to 0 in the process even + * though it has active dependencies. + * + * @param frame + */ + public void markFrameAsWaiting(FrameInterface frame); + + /** + * Marks a specific frame as Depend if the frame has active dependencies. This will pretty much + * undo a markFrameAsWaiting. If the frame has no active depends this call should have no + * effect. + * + * @param frame + */ + public void markFrameAsDepend(FrameInterface frame); + + /** + * Return the result of the given FrameSearch. + * + * @param job + * @param r + * @return + */ + public List findFrames(FrameSearchInterface r); + + /** + * Updates specified frame to new state. + * + * @param frame + * @param state + */ + public void updateFrameState(FrameInterface frame, FrameState state); + + /** + * Updates specified job to new state. + * + * @param job + * @param state + */ + public void updateJobState(JobInterface job, JobState state); + + /** + * Reorders the specified layer. + * + * @param job + * @param frameSet + */ + public void reorderLayer(LayerInterface layer, FrameSet frameSet, Order order); + + /** + * + * @param layer + * @param frameSet + */ + public void staggerLayer(LayerInterface layer, String range, int stagger); + + /** + * Returns all of the layers for the specified job + * + * @param job + * @return + */ + public List getLayers(JobInterface job); + + /** + * Returns all of the layers for the specified job + * + * @param job + * @return + */ + public List getLayerDetails(JobInterface job); + + /** + * Creates the job log directory. The JobDetail object must have the logDir property populated. + * + * @param newJob + */ + public void createJobLogDirectory(JobDetail newJob); + + /** + * Optimizes layer settings based on the specified maxRss and run time. + * + * @param layer + * @param maxRss + * @param runTime + */ + void optimizeLayer(LayerInterface layer, int cores, long maxRss, int runTime); + + /** + * Return true if the given job is booked greater than min cores. + * + * @param job + * @return + */ + boolean isOverMinCores(JobInterface job); + + /** + * Increase the layer memory requirement to given KB value. + * + * @param layer + * @param memKb + */ + void increaseLayerMemoryRequirement(LayerInterface layer, long memKb); + + /** + * Appends a tag to a layer's existing tags. + * + * @param layer + * @param tag + */ + void appendLayerTag(LayerInterface layer, String tag); + + /** + * Replace all existing tags with the specified tag. + * + * @param layer + * @param tag + */ + void setLayerTag(LayerInterface layer, String tag); + + /** + * Return true if the given layer is threadable. + * + * @param layer + * @return + */ + boolean isLayerThreadable(LayerInterface layer); + + /** + * Enable or disable the layer memory optimizer. + */ + void enableMemoryOptimizer(LayerInterface layer, boolean state); + + /** + * Return the frame for the given layer and frame number. + * + * @param layer + * @param number + * @return + */ + FrameInterface findFrame(LayerInterface layer, int number); + + /** + * + * @param job + * @return + */ + FrameDetail findLongestFrame(JobInterface job); + + /** + * + * @param job + * @return + */ + FrameDetail findShortestFrame(JobInterface job); + + /** + * + * @param job + * @return + */ + FrameStateTotals getFrameStateTotals(JobInterface job); + + /** + * + * @param job + * @return + */ + ExecutionSummary getExecutionSummary(JobInterface job); + + /** + * + * @param job + * @return + */ + FrameDetail findHighestMemoryFrame(JobInterface job); + + /** + * + * @param job + * @return + */ + FrameDetail findLowestMemoryFrame(JobInterface job); + + /** + * Return the frame state totals by layer. + * + * @param layer + * @return + */ + FrameStateTotals getFrameStateTotals(LayerInterface layer); + + /** + * Return the execution summary by layer. + * + * @param layer + * @return + */ + ExecutionSummary getExecutionSummary(LayerInterface layer); + + /** + * Update the checkpoint state for the given frame. + * + * @param frame + * @param state + */ + void updateCheckpointState(FrameInterface frame, CheckpointState state); + + /** + * Return a list of frames that failed to checkpoint within the given checkpoint point. + * + * @param cutoffTimeMs + * @return + */ + List getStaleCheckpoints(int cutoffTimeSec); + + /** + * Return a list of registered layer outputs. + * + * @param layer + * @return + */ + List getLayerOutputs(LayerInterface layer); + + /** + * Register layer output. + * + * @param layer + * @return + */ + void registerLayerOutput(LayerInterface layer, String filespec); + + /** + * Return thread stats for the given layer. + * + * @param layer + * @return + */ + List getThreadStats(LayerInterface layer); + + /** + * Update the max core value for the given layer. + * + * @param layer + * @param coreUnits + */ + void setLayerMaxCores(LayerInterface layer, int coreUnits); + + /** + * Update the min core value for the given layer. + * + * @param layer + * @param coreUnits + */ + void setLayerMinCores(LayerInterface layer, int coreUnits); + + /** + * Update the max gpu value for the given layer. + * + * @param layer + * @param gpuUnits + */ + void setLayerMaxGpus(LayerInterface layer, int gpuUnits); + + /** + * Update the min gpu value for the given layer. + * + * @param layer + * @param gpuUnits + */ + void setLayerMinGpus(LayerInterface layer, int gpuUnits); + + /** + * Add a limit to the given layer. + * + * @param layer + * @param limitId + */ + void addLayerLimit(LayerInterface layer, String limitId); + + /** + * Remove a limit from the given layer. + * + * @param layer + * @param limitId + */ + void dropLayerLimit(LayerInterface layer, String limitId); + + /** + * Return a list of limits for the given layer. + * + * @param layer + */ + List getLayerLimits(LayerInterface layer); + + /** + * Update email(s) of subscribers for job + * + * @param job + * @param email + */ + void updateEmail(JobInterface job, String email); + + /** + * Return a list of limits for the given layer. + * + * @param job + */ + String getEmail(JobInterface job); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerService.java index becadc0ec..03bc765b4 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerService.java @@ -67,589 +67,593 @@ @Transactional public class JobManagerService implements JobManager { - private static final Logger logger = LogManager.getLogger(JobManagerService.class); - - private JobDao jobDao; - private ShowDao showDao; - private FrameDao frameDao; - private LayerDao layerDao; - private LimitDao limitDao; - private HostDao hostDao; - private DependManager dependManager; - private FilterManager filterManager; - private GroupDao groupDao; - private FacilityDao facilityDao; - private JobLogUtil jobLogUtil; - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean isJobComplete(JobInterface job) { - return jobDao.isJobComplete(job); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean isLayerComplete(LayerInterface layer) { - return layerDao.isLayerComplete(layer); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean isLayerThreadable(LayerInterface layer) { - return layerDao.isThreadable(layer); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean isJobPending(String name) { - return jobDao.exists(name); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void removeJob(JobInterface job) { - jobDao.deleteJob(job); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public JobDetail getJobDetail(String id) { - return jobDao.getJobDetail(id); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public JobInterface getJob(String id) { - return jobDao.getJob(id); - } - - @Transactional(propagation = Propagation.REQUIRED) - public JobDetail findJobDetail(String name) { - return jobDao.findJobDetail(name); - } - - @Transactional(propagation = Propagation.REQUIRED) - public JobInterface findJob(String name) { - return jobDao.findJob(name); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public boolean isOverMinCores(JobInterface job) { - return jobDao.isOverMinCores(job); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public DispatchJob getDispatchJob(String id) { - return jobDao.getDispatchJob(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public FrameInterface getFrame(String id) { - return frameDao.getFrame(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public FrameInterface findFrame(LayerInterface layer, int number) { - return frameDao.findFrame(layer, number); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public DispatchFrame getDispatchFrame(String id) { - return frameDao.getDispatchFrame(id); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public JobDetail findLastJob(String name) { - return jobDao.findLastJob(name); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void setJobPaused(JobInterface job, boolean paused) { - jobDao.updatePaused(job, paused); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void launchJobSpec(JobSpec spec) { - - for (BuildableJob job : spec.getJobs()) { - - JobDetail d = createJob(job); - if (job.maxCoresOverride != null) { - jobDao.updateMaxCores(d, Convert.coresToWholeCoreUnits(job.maxCoresOverride.intValue())); - } - if (job.maxGpusOverride != null) { - jobDao.updateMaxGpus(d, job.maxGpusOverride.intValue()); - } - if (job.getPostJob() != null) { - BuildableJob postJob = job.getPostJob(); - postJob.env.put("CUE_PARENT_JOB_ID", d.id); - postJob.env.put("CUE_PARENT_JOB", d.name); - createJob(postJob); - jobDao.mapPostJob(job); - } - } - - for (BuildableDependency dep : spec.getDepends()) { - dep.setLaunchDepend(true); - dependManager.createDepend(dep); - } - - for (BuildableJob job : spec.getJobs()) { - jobDao.activateJob(job.detail, JobState.PENDING); - if (job.getPostJob() != null) { - jobDao.activateJob(job.getPostJob().detail, JobState.POSTED); - } - } - } - - @Transactional(propagation = Propagation.REQUIRED) - public JobDetail createJob(BuildableJob buildableJob) { - - logger.info("creating new job: " + buildableJob.detail.name); - long startTime = System.currentTimeMillis(); - - if (jobDao.exists(buildableJob.detail.name)) { - throw new JobLaunchException( - "error launching job, active job already exists: " + buildableJob.detail.name); - } - - if (buildableJob.getBuildableLayers().size() < 1) { - throw new JobLaunchException("error launching job, there were no layers defined!"); - } - - JobDetail job = buildableJob.detail; - - try { - /* - * Get the last job with the same name and try to use the memory settings for that job. Do - * this before inserting the new job we'll find this job as the last job. - */ - JobDetail lastJob = null; - try { - lastJob = findLastJob(job.name); - logger.info("Last job " + job.name + " was found as " + lastJob.name); - } catch (Exception e) { - logger.info("Last job " + job.name + " was NOT found"); - // don't have another version of the job in the DB. - } - - ShowEntity show = showDao.findShowDetail(job.showName); - if (!job.isPaused) { - job.isPaused = show.paused; - } - - job.showId = show.id; - job.logDir = job.name; - - /* - * The job gets inserted into the root group and unknown department. - */ - GroupDetail rootGroup = groupDao.getRootGroupDetail(job); - job.groupId = rootGroup.id; - job.deptId = rootGroup.deptId; - - resolveFacility(job); - - jobDao.insertJob(job, jobLogUtil); - jobDao.insertEnvironment(job, buildableJob.env); - - for (BuildableLayer buildableLayer : buildableJob.getBuildableLayers()) { - - LayerDetail layer = buildableLayer.layerDetail; - layer.jobId = job.id; - layer.showId = show.id; - - /** Not accurate anymore */ - List frames = CueUtil.normalizeFrameRange(layer.range, layer.chunkSize); - layer.totalFrameCount = frames.size(); + private static final Logger logger = LogManager.getLogger(JobManagerService.class); + + private JobDao jobDao; + private ShowDao showDao; + private FrameDao frameDao; + private LayerDao layerDao; + private LimitDao limitDao; + private HostDao hostDao; + private DependManager dependManager; + private FilterManager filterManager; + private GroupDao groupDao; + private FacilityDao facilityDao; + private JobLogUtil jobLogUtil; + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isJobComplete(JobInterface job) { + return jobDao.isJobComplete(job); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isLayerComplete(LayerInterface layer) { + return layerDao.isLayerComplete(layer); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isLayerThreadable(LayerInterface layer) { + return layerDao.isThreadable(layer); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isJobPending(String name) { + return jobDao.exists(name); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void removeJob(JobInterface job) { + jobDao.deleteJob(job); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public JobDetail getJobDetail(String id) { + return jobDao.getJobDetail(id); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public JobInterface getJob(String id) { + return jobDao.getJob(id); + } + + @Transactional(propagation = Propagation.REQUIRED) + public JobDetail findJobDetail(String name) { + return jobDao.findJobDetail(name); + } + + @Transactional(propagation = Propagation.REQUIRED) + public JobInterface findJob(String name) { + return jobDao.findJob(name); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public boolean isOverMinCores(JobInterface job) { + return jobDao.isOverMinCores(job); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public DispatchJob getDispatchJob(String id) { + return jobDao.getDispatchJob(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FrameInterface getFrame(String id) { + return frameDao.getFrame(id); + } - if (lastJob != null && !buildableLayer.isMemoryOverride) { - long pastMaxRSS = layerDao.findPastMaxRSS(lastJob, layer.name); - if (pastMaxRSS > 0) { - logger.info("found new maxRSS for layer: " + layer.name + " " + pastMaxRSS); - layer.minimumMemory = pastMaxRSS; - } + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FrameInterface findFrame(LayerInterface layer, int number) { + return frameDao.findFrame(layer, number); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public DispatchFrame getDispatchFrame(String id) { + return frameDao.getDispatchFrame(id); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public JobDetail findLastJob(String name) { + return jobDao.findLastJob(name); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void setJobPaused(JobInterface job, boolean paused) { + jobDao.updatePaused(job, paused); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void launchJobSpec(JobSpec spec) { + + for (BuildableJob job : spec.getJobs()) { + + JobDetail d = createJob(job); + if (job.maxCoresOverride != null) { + jobDao.updateMaxCores(d, + Convert.coresToWholeCoreUnits(job.maxCoresOverride.intValue())); + } + if (job.maxGpusOverride != null) { + jobDao.updateMaxGpus(d, job.maxGpusOverride.intValue()); + } + if (job.getPostJob() != null) { + BuildableJob postJob = job.getPostJob(); + postJob.env.put("CUE_PARENT_JOB_ID", d.id); + postJob.env.put("CUE_PARENT_JOB", d.name); + createJob(postJob); + jobDao.mapPostJob(job); + } + } + + for (BuildableDependency dep : spec.getDepends()) { + dep.setLaunchDepend(true); + dependManager.createDepend(dep); + } + + for (BuildableJob job : spec.getJobs()) { + jobDao.activateJob(job.detail, JobState.PENDING); + if (job.getPostJob() != null) { + jobDao.activateJob(job.getPostJob().detail, JobState.POSTED); + } + } + } + + @Transactional(propagation = Propagation.REQUIRED) + public JobDetail createJob(BuildableJob buildableJob) { + + logger.info("creating new job: " + buildableJob.detail.name); + long startTime = System.currentTimeMillis(); + + if (jobDao.exists(buildableJob.detail.name)) { + throw new JobLaunchException( + "error launching job, active job already exists: " + buildableJob.detail.name); } - if (layer.minimumCores > 0 && layer.minimumCores < Dispatcher.CORE_POINTS_RESERVED_MIN) { - layer.minimumCores = Dispatcher.CORE_POINTS_RESERVED_MIN; + if (buildableJob.getBuildableLayers().size() < 1) { + throw new JobLaunchException("error launching job, there were no layers defined!"); + } + + JobDetail job = buildableJob.detail; + + try { + /* + * Get the last job with the same name and try to use the memory settings for that job. + * Do this before inserting the new job we'll find this job as the last job. + */ + JobDetail lastJob = null; + try { + lastJob = findLastJob(job.name); + logger.info("Last job " + job.name + " was found as " + lastJob.name); + } catch (Exception e) { + logger.info("Last job " + job.name + " was NOT found"); + // don't have another version of the job in the DB. + } + + ShowEntity show = showDao.findShowDetail(job.showName); + if (!job.isPaused) { + job.isPaused = show.paused; + } + + job.showId = show.id; + job.logDir = job.name; + + /* + * The job gets inserted into the root group and unknown department. + */ + GroupDetail rootGroup = groupDao.getRootGroupDetail(job); + job.groupId = rootGroup.id; + job.deptId = rootGroup.deptId; + + resolveFacility(job); + + jobDao.insertJob(job, jobLogUtil); + jobDao.insertEnvironment(job, buildableJob.env); + + for (BuildableLayer buildableLayer : buildableJob.getBuildableLayers()) { + + LayerDetail layer = buildableLayer.layerDetail; + layer.jobId = job.id; + layer.showId = show.id; + + /** Not accurate anymore */ + List frames = CueUtil.normalizeFrameRange(layer.range, layer.chunkSize); + layer.totalFrameCount = frames.size(); + + if (lastJob != null && !buildableLayer.isMemoryOverride) { + long pastMaxRSS = layerDao.findPastMaxRSS(lastJob, layer.name); + if (pastMaxRSS > 0) { + logger.info("found new maxRSS for layer: " + layer.name + " " + pastMaxRSS); + layer.minimumMemory = pastMaxRSS; + } + } + + if (layer.minimumCores > 0 + && layer.minimumCores < Dispatcher.CORE_POINTS_RESERVED_MIN) { + layer.minimumCores = Dispatcher.CORE_POINTS_RESERVED_MIN; + } + + logger.info("creating layer " + layer.name + " range: " + layer.range); + layerDao.insertLayerDetail(layer); + layerDao.insertLayerEnvironment(layer, buildableLayer.env); + layer.limits.stream() + .forEach(ln -> addLayerLimit(layer, limitDao.findLimit(ln).getLimitId())); + layer.outputs.stream().forEach(ln -> registerLayerOutput(layer, ln)); + frameDao.insertFrames(layer, frames); + } + + // The priority of a job is set on it's resource entry. + // To update it we set the priority after it's been inserted. + jobDao.updatePriority(job, job.priority); + + /* + * Finally, run any filters on the job which may set the job's priority. + */ + filterManager.runFiltersOnJob(job); + + CueUtil.logDuration(startTime, "created job " + job.getName() + " " + job.getId()); + return job; + + } catch (Exception e) { + logger.info("error launching job: " + job.name + "," + e); + throw new JobLaunchException("error launching job: " + job.name + "," + e, e); + } + } + + private void resolveFacility(JobDetail job) { + try { + if (job.facilityName == null) { + job.facilityId = facilityDao.getDefaultFacility().getId(); + } else { + job.facilityId = facilityDao.getFacility(job.facilityName).getId(); + } + } catch (Exception e) { + throw new EntityRetrievalException("failed to find facility: " + job.facilityName, e); } + } + + @Transactional(propagation = Propagation.REQUIRED) + public boolean shutdownJob(JobInterface job) { + // See JobManagerSupport + if (jobDao.updateJobFinished(job)) { + logger.info("shutting down job: " + job.getName()); + jobDao.activatePostJob(job); + logger.info("activating post jobs"); + return true; + } + return false; + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List findFrames(FrameSearchInterface r) { + return frameDao.findFrames(r); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void updateFrameState(FrameInterface frame, FrameState state) { + frameDao.updateFrameState(frame, state); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public LayerDetail getLayerDetail(String id) { + return layerDao.getLayerDetail(id); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public LayerInterface getLayer(String id) { + return layerDao.getLayer(id); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void updateJobState(JobInterface job, JobState state) { + jobDao.updateState(job, state); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FrameDetail getFrameDetail(String id) { + return frameDao.getFrameDetail(id); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void markFrameAsWaiting(FrameInterface frame) { + frameDao.markFrameAsWaiting(frame); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void markFrameAsDepend(FrameInterface frame) { + frameDao.markFrameAsDepend(frame); + } + + /** + * Creates a new job log directory. This is only called when launching a job. + * + * @param job + */ + @Transactional(propagation = Propagation.NEVER) + public void createJobLogDirectory(JobDetail job) { + if (!jobLogUtil.createJobLogDirectory(job.logDir)) { + throw new JobLaunchException("error launching job, unable to create log directory"); + } + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getLayers(JobInterface job) { + return layerDao.getLayers(job); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void increaseLayerMemoryRequirement(LayerInterface layer, long memKb) { + layerDao.increaseLayerMinMemory(layer, memKb); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void reorderLayer(LayerInterface layer, FrameSet frameSet, Order order) { + switch (order) { + case FIRST: + frameDao.reorderFramesFirst(layer, frameSet); + break; + case LAST: + frameDao.reorderFramesLast(layer, frameSet); + break; + case REVERSE: + frameDao.reorderLayerReverse(layer, frameSet); + break; + } + } + + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void staggerLayer(LayerInterface layer, String range, int stagger) { + frameDao.staggerLayer(layer, range, stagger); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getLayerDetails(JobInterface job) { + return layerDao.getLayerDetails(job); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getThreadStats(LayerInterface layer) { + return layerDao.getThreadStats(layer); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void optimizeLayer(LayerInterface layer, int cores, long maxRss, int runTime) { + layerDao.balanceLayerMinMemory(layer, maxRss); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED) + public void enableMemoryOptimizer(LayerInterface layer, boolean state) { + layerDao.enableMemoryOptimizer(layer, state); + } + + @Override + public void appendLayerTag(LayerInterface layer, String tag) { + layerDao.appendLayerTags(layer, tag); + } + + @Override + public void setLayerTag(LayerInterface layer, String tag) { + layerDao.updateLayerTags(layer, Sets.newHashSet(tag)); + } - logger.info("creating layer " + layer.name + " range: " + layer.range); - layerDao.insertLayerDetail(layer); - layerDao.insertLayerEnvironment(layer, buildableLayer.env); - layer.limits.stream() - .forEach(ln -> addLayerLimit(layer, limitDao.findLimit(ln).getLimitId())); - layer.outputs.stream().forEach(ln -> registerLayerOutput(layer, ln)); - frameDao.insertFrames(layer, frames); - } - - // The priority of a job is set on it's resource entry. - // To update it we set the priority after it's been inserted. - jobDao.updatePriority(job, job.priority); - - /* - * Finally, run any filters on the job which may set the job's priority. - */ - filterManager.runFiltersOnJob(job); - - CueUtil.logDuration(startTime, "created job " + job.getName() + " " + job.getId()); - return job; - - } catch (Exception e) { - logger.info("error launching job: " + job.name + "," + e); - throw new JobLaunchException("error launching job: " + job.name + "," + e, e); - } - } - - private void resolveFacility(JobDetail job) { - try { - if (job.facilityName == null) { - job.facilityId = facilityDao.getDefaultFacility().getId(); - } else { - job.facilityId = facilityDao.getFacility(job.facilityName).getId(); - } - } catch (Exception e) { - throw new EntityRetrievalException("failed to find facility: " + job.facilityName, e); - } - } - - @Transactional(propagation = Propagation.REQUIRED) - public boolean shutdownJob(JobInterface job) { - // See JobManagerSupport - if (jobDao.updateJobFinished(job)) { - logger.info("shutting down job: " + job.getName()); - jobDao.activatePostJob(job); - logger.info("activating post jobs"); - return true; - } - return false; - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List findFrames(FrameSearchInterface r) { - return frameDao.findFrames(r); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void updateFrameState(FrameInterface frame, FrameState state) { - frameDao.updateFrameState(frame, state); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public LayerDetail getLayerDetail(String id) { - return layerDao.getLayerDetail(id); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public LayerInterface getLayer(String id) { - return layerDao.getLayer(id); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void updateJobState(JobInterface job, JobState state) { - jobDao.updateState(job, state); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public FrameDetail getFrameDetail(String id) { - return frameDao.getFrameDetail(id); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void markFrameAsWaiting(FrameInterface frame) { - frameDao.markFrameAsWaiting(frame); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void markFrameAsDepend(FrameInterface frame) { - frameDao.markFrameAsDepend(frame); - } - - /** - * Creates a new job log directory. This is only called when launching a job. - * - * @param job - */ - @Transactional(propagation = Propagation.NEVER) - public void createJobLogDirectory(JobDetail job) { - if (!jobLogUtil.createJobLogDirectory(job.logDir)) { - throw new JobLaunchException("error launching job, unable to create log directory"); - } - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List getLayers(JobInterface job) { - return layerDao.getLayers(job); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void increaseLayerMemoryRequirement(LayerInterface layer, long memKb) { - layerDao.increaseLayerMinMemory(layer, memKb); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void reorderLayer(LayerInterface layer, FrameSet frameSet, Order order) { - switch (order) { - case FIRST: - frameDao.reorderFramesFirst(layer, frameSet); - break; - case LAST: - frameDao.reorderFramesLast(layer, frameSet); - break; - case REVERSE: - frameDao.reorderLayerReverse(layer, frameSet); - break; - } - } - - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void staggerLayer(LayerInterface layer, String range, int stagger) { - frameDao.staggerLayer(layer, range, stagger); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List getLayerDetails(JobInterface job) { - return layerDao.getLayerDetails(job); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List getThreadStats(LayerInterface layer) { - return layerDao.getThreadStats(layer); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void optimizeLayer(LayerInterface layer, int cores, long maxRss, int runTime) { - layerDao.balanceLayerMinMemory(layer, maxRss); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void enableMemoryOptimizer(LayerInterface layer, boolean state) { - layerDao.enableMemoryOptimizer(layer, state); - } - - @Override - public void appendLayerTag(LayerInterface layer, String tag) { - layerDao.appendLayerTags(layer, tag); - } - - @Override - public void setLayerTag(LayerInterface layer, String tag) { - layerDao.updateLayerTags(layer, Sets.newHashSet(tag)); - } - - @Override - public void setLayerMinCores(LayerInterface layer, int coreUnits) { - layerDao.updateLayerMinCores(layer, coreUnits); - } - - @Override - public void setLayerMaxCores(LayerInterface layer, int coreUnits) { - layerDao.updateLayerMaxCores(layer, coreUnits); - } - - @Override - public void setLayerMinGpus(LayerInterface layer, int gpu) { - layerDao.updateLayerMinGpus(layer, gpu); - } - - @Override - public void setLayerMaxGpus(LayerInterface layer, int gpu) { - layerDao.updateLayerMaxGpus(layer, gpu); - } - - @Override - public void addLayerLimit(LayerInterface layer, String limitId) { - layerDao.addLimit(layer, limitId); - } - - @Override - public void dropLayerLimit(LayerInterface layer, String limitId) { - layerDao.dropLimit(layer, limitId); - } - - @Override - public List getLayerLimits(LayerInterface layer) { - return layerDao.getLimits(layer); - } - - @Override - public void registerLayerOutput(LayerInterface layer, String filespec) { - try { - layerDao.insertLayerOutput(layer, filespec); - } catch (DataAccessException e) { - // Fail quietly but log it. - logger.warn("Failed to add layer output: " + filespec + "," + e); - } - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List getLayerOutputs(LayerInterface layer) { - return layerDao.getLayerOutputs(layer); - } - - @Override - @Transactional(propagation = Propagation.SUPPORTS) - public void updateCheckpointState(FrameInterface frame, CheckpointState state) { - - if (frameDao.updateFrameCheckpointState(frame, state)) { - logger.info("Checkpoint state of frame " + frame.getId() + " set to " + state.toString()); - } else { - logger.warn("Failed to set checkpoint state of " + frame.getId() + " to " + state.toString()); - } - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public FrameDetail findHighestMemoryFrame(JobInterface job) { - return frameDao.findHighestMemoryFrame(job); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public FrameDetail findLongestFrame(JobInterface job) { - return frameDao.findLongestFrame(job); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public FrameDetail findLowestMemoryFrame(JobInterface job) { - return frameDao.findLowestMemoryFrame(job); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public FrameDetail findShortestFrame(JobInterface job) { - return frameDao.findShortestFrame(job); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public ExecutionSummary getExecutionSummary(JobInterface job) { - return jobDao.getExecutionSummary(job); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public FrameStateTotals getFrameStateTotals(JobInterface job) { - return jobDao.getFrameStateTotals(job); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public ExecutionSummary getExecutionSummary(LayerInterface layer) { - return layerDao.getExecutionSummary(layer); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public FrameStateTotals getFrameStateTotals(LayerInterface layer) { - return layerDao.getFrameStateTotals(layer); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public List getStaleCheckpoints(int cutoffTimeSec) { - return frameDao.getStaleCheckpoints(cutoffTimeSec); - } - - @Transactional(propagation = Propagation.REQUIRED) - public void updateEmail(JobInterface job, String email) { - jobDao.updateEmail(job, email); - } - - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public String getEmail(JobInterface job) { - return jobDao.getEmail(job); - } - - public DependManager getDependManager() { - return dependManager; - } - - public void setDependManager(DependManager dependManager) { - this.dependManager = dependManager; - } - - public FrameDao getFrameDao() { - return frameDao; - } - - public void setFrameDao(FrameDao frameDao) { - this.frameDao = frameDao; - } - - public LayerDao getLayerDao() { - return layerDao; - } - - public void setLayerDao(LayerDao layerDao) { - this.layerDao = layerDao; - } - - public LimitDao getLimitDao() { - return limitDao; - } - - public void setLimitDao(LimitDao limitDao) { - this.limitDao = limitDao; - } - - public ShowDao getShowDao() { - return showDao; - } - - public void setShowDao(ShowDao showDao) { - this.showDao = showDao; - } - - public JobDao getJobDao() { - return jobDao; - } - - public void setJobDao(JobDao workDao) { - this.jobDao = workDao; - } - - public FilterManager getFilterManager() { - return filterManager; - } - - public void setFilterManager(FilterManager filterManager) { - this.filterManager = filterManager; - } - - public GroupDao getGroupDao() { - return groupDao; - } - - public void setGroupDao(GroupDao groupDao) { - this.groupDao = groupDao; - } - - public FacilityDao getFacilityDao() { - return facilityDao; - } - - public void setFacilityDao(FacilityDao facilityDao) { - this.facilityDao = facilityDao; - } - - public HostDao getHostDao() { - return hostDao; - } - - public void setHostDao(HostDao hostDao) { - this.hostDao = hostDao; - } - - public JobLogUtil getJobLogUtil() { - return jobLogUtil; - } - - public void setJobLogUtil(JobLogUtil jobLogUtil) { - this.jobLogUtil = jobLogUtil; - } + @Override + public void setLayerMinCores(LayerInterface layer, int coreUnits) { + layerDao.updateLayerMinCores(layer, coreUnits); + } + + @Override + public void setLayerMaxCores(LayerInterface layer, int coreUnits) { + layerDao.updateLayerMaxCores(layer, coreUnits); + } + + @Override + public void setLayerMinGpus(LayerInterface layer, int gpu) { + layerDao.updateLayerMinGpus(layer, gpu); + } + + @Override + public void setLayerMaxGpus(LayerInterface layer, int gpu) { + layerDao.updateLayerMaxGpus(layer, gpu); + } + + @Override + public void addLayerLimit(LayerInterface layer, String limitId) { + layerDao.addLimit(layer, limitId); + } + + @Override + public void dropLayerLimit(LayerInterface layer, String limitId) { + layerDao.dropLimit(layer, limitId); + } + + @Override + public List getLayerLimits(LayerInterface layer) { + return layerDao.getLimits(layer); + } + + @Override + public void registerLayerOutput(LayerInterface layer, String filespec) { + try { + layerDao.insertLayerOutput(layer, filespec); + } catch (DataAccessException e) { + // Fail quietly but log it. + logger.warn("Failed to add layer output: " + filespec + "," + e); + } + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getLayerOutputs(LayerInterface layer) { + return layerDao.getLayerOutputs(layer); + } + + @Override + @Transactional(propagation = Propagation.SUPPORTS) + public void updateCheckpointState(FrameInterface frame, CheckpointState state) { + + if (frameDao.updateFrameCheckpointState(frame, state)) { + logger.info( + "Checkpoint state of frame " + frame.getId() + " set to " + state.toString()); + } else { + logger.warn("Failed to set checkpoint state of " + frame.getId() + " to " + + state.toString()); + } + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FrameDetail findHighestMemoryFrame(JobInterface job) { + return frameDao.findHighestMemoryFrame(job); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FrameDetail findLongestFrame(JobInterface job) { + return frameDao.findLongestFrame(job); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FrameDetail findLowestMemoryFrame(JobInterface job) { + return frameDao.findLowestMemoryFrame(job); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FrameDetail findShortestFrame(JobInterface job) { + return frameDao.findShortestFrame(job); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public ExecutionSummary getExecutionSummary(JobInterface job) { + return jobDao.getExecutionSummary(job); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FrameStateTotals getFrameStateTotals(JobInterface job) { + return jobDao.getFrameStateTotals(job); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public ExecutionSummary getExecutionSummary(LayerInterface layer) { + return layerDao.getExecutionSummary(layer); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public FrameStateTotals getFrameStateTotals(LayerInterface layer) { + return layerDao.getFrameStateTotals(layer); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public List getStaleCheckpoints(int cutoffTimeSec) { + return frameDao.getStaleCheckpoints(cutoffTimeSec); + } + + @Transactional(propagation = Propagation.REQUIRED) + public void updateEmail(JobInterface job, String email) { + jobDao.updateEmail(job, email); + } + + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public String getEmail(JobInterface job) { + return jobDao.getEmail(job); + } + + public DependManager getDependManager() { + return dependManager; + } + + public void setDependManager(DependManager dependManager) { + this.dependManager = dependManager; + } + + public FrameDao getFrameDao() { + return frameDao; + } + + public void setFrameDao(FrameDao frameDao) { + this.frameDao = frameDao; + } + + public LayerDao getLayerDao() { + return layerDao; + } + + public void setLayerDao(LayerDao layerDao) { + this.layerDao = layerDao; + } + + public LimitDao getLimitDao() { + return limitDao; + } + + public void setLimitDao(LimitDao limitDao) { + this.limitDao = limitDao; + } + + public ShowDao getShowDao() { + return showDao; + } + + public void setShowDao(ShowDao showDao) { + this.showDao = showDao; + } + + public JobDao getJobDao() { + return jobDao; + } + + public void setJobDao(JobDao workDao) { + this.jobDao = workDao; + } + + public FilterManager getFilterManager() { + return filterManager; + } + + public void setFilterManager(FilterManager filterManager) { + this.filterManager = filterManager; + } + + public GroupDao getGroupDao() { + return groupDao; + } + + public void setGroupDao(GroupDao groupDao) { + this.groupDao = groupDao; + } + + public FacilityDao getFacilityDao() { + return facilityDao; + } + + public void setFacilityDao(FacilityDao facilityDao) { + this.facilityDao = facilityDao; + } + + public HostDao getHostDao() { + return hostDao; + } + + public void setHostDao(HostDao hostDao) { + this.hostDao = hostDao; + } + + public JobLogUtil getJobLogUtil() { + return jobLogUtil; + } + + public void setJobLogUtil(JobLogUtil jobLogUtil) { + this.jobLogUtil = jobLogUtil; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerSupport.java b/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerSupport.java index 678dc8de7..bb91ad1d8 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerSupport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerSupport.java @@ -54,540 +54,543 @@ * A non-transaction support class for managing jobs. */ public class JobManagerSupport { - private static final Logger logger = LogManager.getLogger(JobManagerSupport.class); - - private JobManager jobManager; - private DependManager dependManager; - private HostManager hostManager; - private RqdClient rqdClient; - private DepartmentManager departmentManager; - private DispatchSupport dispatchSupport; - private DispatchQueue manageQueue; - private RedirectManager redirectManager; - private EmailSupport emailSupport; - private FrameSearchFactory frameSearchFactory; - - public void queueShutdownJob(JobInterface job, Source source, boolean isManualKill) { - manageQueue.execute(new DispatchJobComplete(job, source, isManualKill, this)); - } - - public boolean shutdownJob(JobInterface job, Source source, boolean isManualKill) { - - if (isManualKill && source.getReason().isEmpty()) { - logger.info(job.getName() + "/" + job.getId() + " **Invalid Job Kill Request** for " - + source.toString()); - } else { - if (jobManager.shutdownJob(job)) { - /* - * Satisfy any dependencies on just the job record, not layers or frames. - */ - satisfyWhatDependsOn(job); + private static final Logger logger = LogManager.getLogger(JobManagerSupport.class); + + private JobManager jobManager; + private DependManager dependManager; + private HostManager hostManager; + private RqdClient rqdClient; + private DepartmentManager departmentManager; + private DispatchSupport dispatchSupport; + private DispatchQueue manageQueue; + private RedirectManager redirectManager; + private EmailSupport emailSupport; + private FrameSearchFactory frameSearchFactory; + + public void queueShutdownJob(JobInterface job, Source source, boolean isManualKill) { + manageQueue.execute(new DispatchJobComplete(job, source, isManualKill, this)); + } + + public boolean shutdownJob(JobInterface job, Source source, boolean isManualKill) { + + if (isManualKill && source.getReason().isEmpty()) { + logger.info(job.getName() + "/" + job.getId() + " **Invalid Job Kill Request** for " + + source.toString()); + } else { + if (jobManager.shutdownJob(job)) { + /* + * Satisfy any dependencies on just the job record, not layers or frames. + */ + satisfyWhatDependsOn(job); + + if (departmentManager.isManaged(job)) { + departmentManager.syncJobsWithTask(job); + } - if (departmentManager.isManaged(job)) { - departmentManager.syncJobsWithTask(job); + if (isManualKill) { + logger.info(job.getName() + "/" + job.getId() + " is being manually killed by " + + source.toString()); + + /** + * Sleep a bit here in case any frames were dispatched during the job shutdown + * process. + */ + try { + Thread.sleep(3000); + } catch (InterruptedException e1) { + logger.info(job.getName() + "/" + job.getId() + + " shutdown thread was interrupted."); + Thread.currentThread().interrupt(); + } + + // Report kill requests to sentry + Sentry.configureScope(scope -> { + scope.setExtra("Job Name", job.getName()); + scope.setExtra("Job ID", job.getId()); + scope.setExtra("Job Details", source.toString()); + scope.setExtra("Kill Reason", source.getReason()); + scope.setTag("job", job.getName()); + Sentry.captureMessage("Kill Request Successful"); + }); + + FrameSearchInterface search = frameSearchFactory.create(job); + FrameSearchCriteria newCriteria = search.getCriteria(); + FrameStateSeq states = newCriteria.getStates().toBuilder() + .addFrameStates(FrameState.RUNNING).build(); + search.setCriteria(newCriteria.toBuilder().setStates(states).build()); + + for (FrameInterface frame : jobManager.findFrames(search)) { + + VirtualProc proc = null; + try { + proc = hostManager.findVirtualProc(frame); + } catch (DataAccessException e) { + logger.warn("Unable to find proc to kill frame " + frame + + " on job shutdown operation, " + e); + } + + if (manualStopFrame(frame, FrameState.WAITING)) { + try { + if (proc != null) { + kill(proc, source); + } + } catch (DataAccessException e) { + logger.warn("Failed to kill frame " + frame + + " on job shutdown operation, " + e); + } catch (Exception e) { + logger.warn("error killing frame: " + frame); + } + } + } + } + + /* + * Send mail after all frames have been stopped or else the email will have + * inaccurate numbers. + */ + emailSupport.sendShutdownEmail(job); + + return true; + } } - if (isManualKill) { - logger.info(job.getName() + "/" + job.getId() + " is being manually killed by " - + source.toString()); - - /** - * Sleep a bit here in case any frames were dispatched during the job shutdown process. - */ - try { - Thread.sleep(3000); - } catch (InterruptedException e1) { - logger.info(job.getName() + "/" + job.getId() + " shutdown thread was interrupted."); - Thread.currentThread().interrupt(); - } - - // Report kill requests to sentry - Sentry.configureScope(scope -> { - scope.setExtra("Job Name", job.getName()); - scope.setExtra("Job ID", job.getId()); - scope.setExtra("Job Details", source.toString()); - scope.setExtra("Kill Reason", source.getReason()); - scope.setTag("job", job.getName()); - Sentry.captureMessage("Kill Request Successful"); - }); - - FrameSearchInterface search = frameSearchFactory.create(job); - FrameSearchCriteria newCriteria = search.getCriteria(); - FrameStateSeq states = - newCriteria.getStates().toBuilder().addFrameStates(FrameState.RUNNING).build(); - search.setCriteria(newCriteria.toBuilder().setStates(states).build()); - - for (FrameInterface frame : jobManager.findFrames(search)) { - - VirtualProc proc = null; - try { - proc = hostManager.findVirtualProc(frame); - } catch (DataAccessException e) { - logger.warn("Unable to find proc to kill frame " + frame - + " on job shutdown operation, " + e); + return false; + } + + public void reorderJob(JobInterface job, FrameSet frameSet, Order order) { + List layers = jobManager.getLayers(job); + for (LayerInterface layer : layers) { + jobManager.reorderLayer(layer, frameSet, order); + } + } + + public void reorderLayer(LayerInterface layer, FrameSet frameSet, Order order) { + jobManager.reorderLayer(layer, frameSet, order); + } + + public void staggerJob(JobInterface job, String range, int stagger) { + List layers = jobManager.getLayers(job); + for (LayerInterface layer : layers) { + jobManager.staggerLayer(layer, range, stagger); + } + } + + public void staggerLayer(LayerInterface layer, String range, int stagger) { + jobManager.staggerLayer(layer, range, stagger); + } + + public void satisfyWhatDependsOn(FrameInterface frame) { + List depends = dependManager.getWhatDependsOn(frame); + logger.info("satisfying " + depends.size() + " depends that are waiting on frame " + + frame.getName()); + for (LightweightDependency depend : depends) { + dependManager.satisfyDepend(depend); + } + } + + public void satisfyWhatDependsOn(LayerInterface layer) { + List depends = dependManager.getWhatDependsOn(layer); + logger.info("satisfying " + depends.size() + " depends that are waiting on layer " + + layer.getName()); + for (LightweightDependency depend : dependManager.getWhatDependsOn(layer)) { + dependManager.satisfyDepend(depend); + } + } + + public void satisfyWhatDependsOn(JobInterface job) { + List depends = dependManager.getWhatDependsOn(job); + logger.info("satisfying " + depends.size() + " depends that are waiting on job " + + job.getName()); + for (LightweightDependency depend : dependManager.getWhatDependsOn(job)) { + dependManager.satisfyDepend(depend); + } + } + + public void satisfyWhatDependsOn(JobInterface job, DependTarget target) { + for (LightweightDependency depend : dependManager.getWhatDependsOn(job, target)) { + dependManager.satisfyDepend(depend); + } + } + + public void satisfyWhatDependsOn(FrameSearchInterface request) { + for (FrameInterface frame : jobManager.findFrames(request)) { + for (LightweightDependency depend : dependManager.getWhatDependsOn(frame)) { + dependManager.satisfyDepend(depend); } + } + } - if (manualStopFrame(frame, FrameState.WAITING)) { - try { - if (proc != null) { - kill(proc, source); - } - } catch (DataAccessException e) { - logger.warn("Failed to kill frame " + frame + " on job shutdown operation, " + e); - } catch (Exception e) { - logger.warn("error killing frame: " + frame); - } + public boolean isJobComplete(JobInterface job) { + return jobManager.isJobComplete(job); + } + + /* + * Destructive functions require a extra Source argument which contains information about the + * user making the call. This information is propagated down to the frame log file. + * + * There are three main destructive functions. kill, retry, and eat. + * + * Before a frame is retried or eaten, the new frame state must be set and committed to the DB + * before the call to RQD is made to actually kill the frame. This will tell the dispatcher what + * to do with the frame when RQD sends in the FrameCompleteReport. + * + * See RqdReportManagerService.determineFrameState + */ + + /** + * Kill the specified frame. If RQD throws back an exception, the proc is considered lost and is + * manually removed. + * + * @param p + * @param source + */ + public void kill(VirtualProc p, Source source) { + try { + rqdClient.killFrame(p, source.toString()); + } catch (java.lang.Throwable e) { + dispatchSupport.lostProc(p, "clearing due to failed kill," + p.getName() + "," + e, + Dispatcher.EXIT_STATUS_FAILED_KILL); + } + } + + /** + * Kill a list procs. If RQD throws back an exception, the proc is considered lost and is + * manually removed. + * + * @param procs + * @param source + */ + public void kill(Collection procs, Source source) { + for (VirtualProc p : procs) { + try { + rqdClient.killFrame(p, source.toString()); + } catch (java.lang.Throwable e) { + dispatchSupport.lostProc(p, "clearing due to failed kill," + p.getName() + "," + e, + Dispatcher.EXIT_STATUS_FAILED_KILL); } - } } + } + + /** + * Kills a frame. This is a convenience method for when you have a reference to the Frame and + * + * @param frame + * @param source + */ + public void kill(FrameInterface frame, Source source) { + kill(hostManager.findVirtualProc(frame), source); + } - /* - * Send mail after all frames have been stopped or else the email will have inaccurate - * numbers. - */ - emailSupport.sendShutdownEmail(job); - - return true; - } - } - - return false; - } - - public void reorderJob(JobInterface job, FrameSet frameSet, Order order) { - List layers = jobManager.getLayers(job); - for (LayerInterface layer : layers) { - jobManager.reorderLayer(layer, frameSet, order); - } - } - - public void reorderLayer(LayerInterface layer, FrameSet frameSet, Order order) { - jobManager.reorderLayer(layer, frameSet, order); - } - - public void staggerJob(JobInterface job, String range, int stagger) { - List layers = jobManager.getLayers(job); - for (LayerInterface layer : layers) { - jobManager.staggerLayer(layer, range, stagger); - } - } - - public void staggerLayer(LayerInterface layer, String range, int stagger) { - jobManager.staggerLayer(layer, range, stagger); - } - - public void satisfyWhatDependsOn(FrameInterface frame) { - List depends = dependManager.getWhatDependsOn(frame); - logger.info( - "satisfying " + depends.size() + " depends that are waiting on frame " + frame.getName()); - for (LightweightDependency depend : depends) { - dependManager.satisfyDepend(depend); - } - } - - public void satisfyWhatDependsOn(LayerInterface layer) { - List depends = dependManager.getWhatDependsOn(layer); - logger.info( - "satisfying " + depends.size() + " depends that are waiting on layer " + layer.getName()); - for (LightweightDependency depend : dependManager.getWhatDependsOn(layer)) { - dependManager.satisfyDepend(depend); - } - } - - public void satisfyWhatDependsOn(JobInterface job) { - List depends = dependManager.getWhatDependsOn(job); - logger - .info("satisfying " + depends.size() + " depends that are waiting on job " + job.getName()); - for (LightweightDependency depend : dependManager.getWhatDependsOn(job)) { - dependManager.satisfyDepend(depend); - } - } - - public void satisfyWhatDependsOn(JobInterface job, DependTarget target) { - for (LightweightDependency depend : dependManager.getWhatDependsOn(job, target)) { - dependManager.satisfyDepend(depend); - } - } - - public void satisfyWhatDependsOn(FrameSearchInterface request) { - for (FrameInterface frame : jobManager.findFrames(request)) { - for (LightweightDependency depend : dependManager.getWhatDependsOn(frame)) { - dependManager.satisfyDepend(depend); - } - } - } - - public boolean isJobComplete(JobInterface job) { - return jobManager.isJobComplete(job); - } - - /* - * Destructive functions require a extra Source argument which contains information about the user - * making the call. This information is propagated down to the frame log file. - * - * There are three main destructive functions. kill, retry, and eat. - * - * Before a frame is retried or eaten, the new frame state must be set and committed to the DB - * before the call to RQD is made to actually kill the frame. This will tell the dispatcher what - * to do with the frame when RQD sends in the FrameCompleteReport. - * - * See RqdReportManagerService.determineFrameState - */ - - /** - * Kill the specified frame. If RQD throws back an exception, the proc is considered lost and is - * manually removed. - * - * @param p - * @param source - */ - public void kill(VirtualProc p, Source source) { - try { - rqdClient.killFrame(p, source.toString()); - } catch (java.lang.Throwable e) { - dispatchSupport.lostProc(p, "clearing due to failed kill," + p.getName() + "," + e, - Dispatcher.EXIT_STATUS_FAILED_KILL); - } - } - - /** - * Kill a list procs. If RQD throws back an exception, the proc is considered lost and is manually - * removed. - * - * @param procs - * @param source - */ - public void kill(Collection procs, Source source) { - for (VirtualProc p : procs) { - try { - rqdClient.killFrame(p, source.toString()); - } catch (java.lang.Throwable e) { - dispatchSupport.lostProc(p, "clearing due to failed kill," + p.getName() + "," + e, - Dispatcher.EXIT_STATUS_FAILED_KILL); - } - } - } - - /** - * Kills a frame. This is a convenience method for when you have a reference to the Frame and - * - * @param frame - * @param source - */ - public void kill(FrameInterface frame, Source source) { - kill(hostManager.findVirtualProc(frame), source); - } - - /** - * Unbook and optionally kill all procs that match the specified search criteria. - * - * @param r - * @param killProc - * @param source - * @return - */ - public int unbookProcs(ProcSearchInterface r, boolean killProc, Source source) { - List procs = hostManager.findBookedVirtualProcs(r); - for (VirtualProc proc : procs) { - unbookProc(proc, killProc, source); - } - return procs.size(); - } - - /** - * Unbook and optionally kill all procs that match the specified search criteria. - * - * @param proc - * @param killProc - * @param source - * @return - */ - public void unbookProc(VirtualProc proc, boolean killProc, Source source) { - hostManager.unbookProc(proc); - if (killProc) { - kill(proc, source); - } - } - - /** - * Kill procs and optionally unbook them as well. - * - * @param host - * @param source - * @param unbook - */ - public void killProcs(HostInterface host, Source source, boolean unbook) { - - List procs = hostManager.findVirtualProcs(host); - - if (unbook) { - hostManager.unbookVirtualProcs(procs); - } - - for (VirtualProc proc : procs) { - kill(proc, source); - } - } - - /** - * Kill procs and optionally unbook them as well. - * - * @param r - * @param source - * @param unbook - */ - public void killProcs(FrameSearchInterface r, Source source, boolean unbook) { - - FrameSearchCriteria newCriteria = - r.getCriteria().toBuilder().setStates(FrameStateSeq.newBuilder().build()).build(); - r.setCriteria(newCriteria); - - List procs = hostManager.findVirtualProcs(r); - - if (unbook) { - hostManager.unbookVirtualProcs(procs); - } - - for (VirtualProc proc : procs) { - kill(proc, source); - } - } - - /** - * Kill procs and optionally unbook them as well. - * - * @param job - * @param source - * @param unbook - */ - public void killProcs(JobInterface job, Source source, boolean unbook) { - List procs = hostManager.findVirtualProcs(frameSearchFactory.create(job)); - if (unbook) { - hostManager.unbookVirtualProcs(procs); - } - - for (VirtualProc proc : procs) { - kill(proc, source); - } - } - - /** - * Retry frames that match the specified FrameSearch request. - * - * @param request - * @param source - */ - public void retryFrames(FrameSearchInterface request, Source source) { - for (FrameInterface frame : jobManager.findFrames(request)) { - try { - retryFrame(frame, source); - } catch (Exception e) { - CueExceptionUtil.logStackTrace("Failed to retry frame " + frame + " from source " + source, - e); - } - } - } - - /** - * Retry a single frame. - * - * @param frame - * @param source - */ - public void retryFrame(FrameInterface frame, Source source) { /** - * Have to find the proc before we stop the frame. + * Unbook and optionally kill all procs that match the specified search criteria. + * + * @param r + * @param killProc + * @param source + * @return */ - VirtualProc proc = null; - try { - proc = hostManager.findVirtualProc(frame); - } catch (EmptyResultDataAccessException e) { - logger.info("failed to obtain information for " + "proc running on frame: " + frame); + public int unbookProcs(ProcSearchInterface r, boolean killProc, Source source) { + List procs = hostManager.findBookedVirtualProcs(r); + for (VirtualProc proc : procs) { + unbookProc(proc, killProc, source); + } + return procs.size(); } - if (manualStopFrame(frame, FrameState.WAITING)) { - if (proc != null) { - redirectManager.addRedirect(proc, (JobInterface) proc, false, source); - kill(proc, source); - } - } else { - jobManager.updateFrameState(frame, FrameState.WAITING); + /** + * Unbook and optionally kill all procs that match the specified search criteria. + * + * @param proc + * @param killProc + * @param source + * @return + */ + public void unbookProc(VirtualProc proc, boolean killProc, Source source) { + hostManager.unbookProc(proc); + if (killProc) { + kill(proc, source); + } } /** - * If a frame is retried that was part of a dependency, that dependency should become active - * again. + * Kill procs and optionally unbook them as well. + * + * @param host + * @param source + * @param unbook */ + public void killProcs(HostInterface host, Source source, boolean unbook) { + + List procs = hostManager.findVirtualProcs(host); - // Handle FrameOnFrame depends. - for (LightweightDependency depend : dependManager.getWhatDependsOn(frame, false)) { - dependManager.unsatisfyDepend(depend); + if (unbook) { + hostManager.unbookVirtualProcs(procs); + } + + for (VirtualProc proc : procs) { + kill(proc, source); + } } - // Handle LayerOnLayer depends. - for (LightweightDependency depend : dependManager.getWhatDependsOn((LayerInterface) frame, - false)) { - dependManager.unsatisfyDepend(depend); + /** + * Kill procs and optionally unbook them as well. + * + * @param r + * @param source + * @param unbook + */ + public void killProcs(FrameSearchInterface r, Source source, boolean unbook) { + + FrameSearchCriteria newCriteria = + r.getCriteria().toBuilder().setStates(FrameStateSeq.newBuilder().build()).build(); + r.setCriteria(newCriteria); + + List procs = hostManager.findVirtualProcs(r); + + if (unbook) { + hostManager.unbookVirtualProcs(procs); + } + + for (VirtualProc proc : procs) { + kill(proc, source); + } } - // set the job back to pending. - jobManager.updateJobState(jobManager.getJob(frame.getJobId()), JobState.PENDING); + /** + * Kill procs and optionally unbook them as well. + * + * @param job + * @param source + * @param unbook + */ + public void killProcs(JobInterface job, Source source, boolean unbook) { + List procs = hostManager.findVirtualProcs(frameSearchFactory.create(job)); + if (unbook) { + hostManager.unbookVirtualProcs(procs); + } - } + for (VirtualProc proc : procs) { + kill(proc, source); + } + } - /** - * Eat frames that match the specified FrameSearch. Eaten frames are considered "Succeeded" by the - * dispatcher. A Job with all eaten frames will leave the cue. - * - * @param request - * @param source - */ - public void eatFrames(FrameSearchInterface request, Source source) { - for (FrameInterface frame : jobManager.findFrames(request)) { - eatFrame(frame, source); + /** + * Retry frames that match the specified FrameSearch request. + * + * @param request + * @param source + */ + public void retryFrames(FrameSearchInterface request, Source source) { + for (FrameInterface frame : jobManager.findFrames(request)) { + try { + retryFrame(frame, source); + } catch (Exception e) { + CueExceptionUtil.logStackTrace( + "Failed to retry frame " + frame + " from source " + source, e); + } + } + } + + /** + * Retry a single frame. + * + * @param frame + * @param source + */ + public void retryFrame(FrameInterface frame, Source source) { + /** + * Have to find the proc before we stop the frame. + */ + VirtualProc proc = null; + try { + proc = hostManager.findVirtualProc(frame); + } catch (EmptyResultDataAccessException e) { + logger.info("failed to obtain information for " + "proc running on frame: " + frame); + } + + if (manualStopFrame(frame, FrameState.WAITING)) { + if (proc != null) { + redirectManager.addRedirect(proc, (JobInterface) proc, false, source); + kill(proc, source); + } + } else { + jobManager.updateFrameState(frame, FrameState.WAITING); + } + + /** + * If a frame is retried that was part of a dependency, that dependency should become active + * again. + */ + + // Handle FrameOnFrame depends. + for (LightweightDependency depend : dependManager.getWhatDependsOn(frame, false)) { + dependManager.unsatisfyDepend(depend); + } + + // Handle LayerOnLayer depends. + for (LightweightDependency depend : dependManager.getWhatDependsOn((LayerInterface) frame, + false)) { + dependManager.unsatisfyDepend(depend); + } + + // set the job back to pending. + jobManager.updateJobState(jobManager.getJob(frame.getJobId()), JobState.PENDING); + + } + + /** + * Eat frames that match the specified FrameSearch. Eaten frames are considered "Succeeded" by + * the dispatcher. A Job with all eaten frames will leave the cue. + * + * @param request + * @param source + */ + public void eatFrames(FrameSearchInterface request, Source source) { + for (FrameInterface frame : jobManager.findFrames(request)) { + eatFrame(frame, source); + } + } + + /** + * Eat the specified frame. Eaten frames are considered "Succeeded" by the dispatcher. A Job + * with all eaten frames will leave the cue. + * + * @param frame + * @param source + */ + public void eatFrame(FrameInterface frame, Source source) { + /** + * Have to find the proc before we stop the frame. + */ + VirtualProc proc = null; + try { + proc = hostManager.findVirtualProc(frame); + } catch (EmptyResultDataAccessException e) { + logger.info("failed to obtain information " + "for proc running on frame: " + frame); + } + + if (manualStopFrame(frame, FrameState.EATEN)) { + if (proc != null) { + kill(proc, source); + } + } else { + jobManager.updateFrameState(frame, FrameState.EATEN); + } + if (jobManager.isJobComplete(frame)) { + queueShutdownJob(frame, source, false); + } + } + + /** + * Marks the result of the specified frame search as FrameState.Waiting and decrease the depend + * count to 0 no matter how many active depends exists. + * + * @param request + * @param source + */ + public void markFramesAsWaiting(FrameSearchInterface request, Source source) { + for (FrameInterface frame : jobManager.findFrames(request)) { + jobManager.markFrameAsWaiting(frame); + } } - } - /** - * Eat the specified frame. Eaten frames are considered "Succeeded" by the dispatcher. A Job with - * all eaten frames will leave the cue. - * - * @param frame - * @param source - */ - public void eatFrame(FrameInterface frame, Source source) { /** - * Have to find the proc before we stop the frame. + * Stops the specified frame. Return true if the call to this method actually stops the frame, + * ie the state changes from Running to the given state. Return false if the frame was already + * stopped. + * + * Stopping the frame also removes the link between the frame and the proc. The proc still + * exists, but, its assigned frame is null. + * + * @param frame + * @param state */ - VirtualProc proc = null; - try { - proc = hostManager.findVirtualProc(frame); - } catch (EmptyResultDataAccessException e) { - logger.info("failed to obtain information " + "for proc running on frame: " + frame); - } - - if (manualStopFrame(frame, FrameState.EATEN)) { - if (proc != null) { - kill(proc, source); - } - } else { - jobManager.updateFrameState(frame, FrameState.EATEN); - } - if (jobManager.isJobComplete(frame)) { - queueShutdownJob(frame, source, false); - } - } - - /** - * Marks the result of the specified frame search as FrameState.Waiting and decrease the depend - * count to 0 no matter how many active depends exists. - * - * @param request - * @param source - */ - public void markFramesAsWaiting(FrameSearchInterface request, Source source) { - for (FrameInterface frame : jobManager.findFrames(request)) { - jobManager.markFrameAsWaiting(frame); - } - } - - /** - * Stops the specified frame. Return true if the call to this method actually stops the frame, ie - * the state changes from Running to the given state. Return false if the frame was already - * stopped. - * - * Stopping the frame also removes the link between the frame and the proc. The proc still exists, - * but, its assigned frame is null. - * - * @param frame - * @param state - */ - private boolean manualStopFrame(FrameInterface frame, FrameState state) { - if (dispatchSupport.stopFrame(frame, state, state.ordinal() + 500)) { - dispatchSupport.updateUsageCounters(frame, state.ordinal() + 500); - logger.info("Manually stopping frame: " + frame); - return true; - } - return false; - } - - public DependManager getDependManager() { - return dependManager; - } - - public void setDependManager(DependManager dependManager) { - this.dependManager = dependManager; - } - - public JobManager getJobManager() { - return jobManager; - } - - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } - - public DispatchQueue getManageQueue() { - return manageQueue; - } - - public void setManageQueue(DispatchQueue manageQueue) { - this.manageQueue = manageQueue; - } - - public HostManager getHostManager() { - return hostManager; - } - - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; - } - - public DispatchSupport getDispatchSupport() { - return dispatchSupport; - } - - public void setDispatchSupport(DispatchSupport dispatchSupport) { - this.dispatchSupport = dispatchSupport; - } - - public RqdClient getRqdClient() { - return rqdClient; - } - - public void setRqdClient(RqdClient rqdClient) { - this.rqdClient = rqdClient; - } - - public DepartmentManager getDepartmentManager() { - return departmentManager; - } - - public void setDepartmentManager(DepartmentManager departmentManager) { - this.departmentManager = departmentManager; - } - - public RedirectManager getRedirectManager() { - return redirectManager; - } - - public void setRedirectManager(RedirectManager redirectManager) { - this.redirectManager = redirectManager; - } - - public EmailSupport getEmailSupport() { - return emailSupport; - } - - public void setEmailSupport(EmailSupport emailSupport) { - this.emailSupport = emailSupport; - } - - public FrameSearchFactory getFrameSearchFactory() { - return frameSearchFactory; - } - - public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { - this.frameSearchFactory = frameSearchFactory; - } + private boolean manualStopFrame(FrameInterface frame, FrameState state) { + if (dispatchSupport.stopFrame(frame, state, state.ordinal() + 500)) { + dispatchSupport.updateUsageCounters(frame, state.ordinal() + 500); + logger.info("Manually stopping frame: " + frame); + return true; + } + return false; + } + + public DependManager getDependManager() { + return dependManager; + } + + public void setDependManager(DependManager dependManager) { + this.dependManager = dependManager; + } + + public JobManager getJobManager() { + return jobManager; + } + + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } + + public DispatchQueue getManageQueue() { + return manageQueue; + } + + public void setManageQueue(DispatchQueue manageQueue) { + this.manageQueue = manageQueue; + } + + public HostManager getHostManager() { + return hostManager; + } + + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; + } + + public DispatchSupport getDispatchSupport() { + return dispatchSupport; + } + + public void setDispatchSupport(DispatchSupport dispatchSupport) { + this.dispatchSupport = dispatchSupport; + } + + public RqdClient getRqdClient() { + return rqdClient; + } + + public void setRqdClient(RqdClient rqdClient) { + this.rqdClient = rqdClient; + } + + public DepartmentManager getDepartmentManager() { + return departmentManager; + } + + public void setDepartmentManager(DepartmentManager departmentManager) { + this.departmentManager = departmentManager; + } + + public RedirectManager getRedirectManager() { + return redirectManager; + } + + public void setRedirectManager(RedirectManager redirectManager) { + this.redirectManager = redirectManager; + } + + public EmailSupport getEmailSupport() { + return emailSupport; + } + + public void setEmailSupport(EmailSupport emailSupport) { + this.emailSupport = emailSupport; + } + + public FrameSearchFactory getFrameSearchFactory() { + return frameSearchFactory; + } + + public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { + this.frameSearchFactory = frameSearchFactory; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/JobSpec.java b/cuebot/src/main/java/com/imageworks/spcue/service/JobSpec.java index e1236f7e7..4b3245665 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/JobSpec.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/JobSpec.java @@ -56,936 +56,945 @@ import com.imageworks.spcue.util.CueUtil; public class JobSpec { - @Autowired - private Environment env; + @Autowired + private Environment env; - private static final Logger logger = LogManager.getLogger(JobSpec.class); + private static final Logger logger = LogManager.getLogger(JobSpec.class); - private String facility; + private String facility; - private String show; + private String show; - private String shot; + private String shot; - private String user; + private String user; - private String email; + private String email; - private Optional uid; + private Optional uid; - private int totalFrames = 0; + private int totalFrames = 0; - private Document doc; + private Document doc; - private ServiceManager serviceManager; + private ServiceManager serviceManager; - /** - * Maximum number of cores a layer can get per frame. - */ - public static final int MAX_CORES = 800; + /** + * Maximum number of cores a layer can get per frame. + */ + public static final int MAX_CORES = 800; - /** - * The maximum number of layers a job can have. Increases this with care, its usually not worth - * it. The more layers you have the longer a job takes to dispatch which could lead to dispatches - * being dropped. - */ - public static final int MAX_LAYERS = 1000; + /** + * The maximum number of layers a job can have. Increases this with care, its usually not worth + * it. The more layers you have the longer a job takes to dispatch which could lead to + * dispatches being dropped. + */ + public static final int MAX_LAYERS = 1000; - /** - * The maximum number of frames a job can have. Increase this with care. The more frames a job - * has, the longer it takes to dispatch, which could lead to dispatches being dropped. - */ - public static final int MAX_FRAMES = 100000; + /** + * The maximum number of frames a job can have. Increase this with care. The more frames a job + * has, the longer it takes to dispatch, which could lead to dispatches being dropped. + */ + public static final int MAX_FRAMES = 100000; - // The default number of retries per frame - public static final int FRAME_RETRIES_DEFAULT = 1; + // The default number of retries per frame + public static final int FRAME_RETRIES_DEFAULT = 1; - // The default maximum number of retries per frame. - public static final int FRAME_RETRIES_MAX = 1; + // The default maximum number of retries per frame. + public static final int FRAME_RETRIES_MAX = 1; - // The default minimum number of retries per frame. - public static final int FRAME_RETRIES_MIN = 0; + // The default minimum number of retries per frame. + public static final int FRAME_RETRIES_MIN = 0; - public static final String DEFAULT_SERVICE = "default"; + public static final String DEFAULT_SERVICE = "default"; - public static final String SPCUE_DTD_URL = "http://localhost:8080/spcue/dtd/"; + public static final String SPCUE_DTD_URL = "http://localhost:8080/spcue/dtd/"; - private List jobs = new ArrayList(); + private List jobs = new ArrayList(); - private List depends = new ArrayList(); + private List depends = new ArrayList(); - public JobSpec() {} + public JobSpec() {} - public static final String NAME_REGEX = "^([\\w\\.-]{3,})$"; + public static final String NAME_REGEX = "^([\\w\\.-]{3,})$"; - public static final Pattern NAME_PATTERN = Pattern.compile(NAME_REGEX); + public static final Pattern NAME_PATTERN = Pattern.compile(NAME_REGEX); - public String conformJobName(String name) { + public String conformJobName(String name) { - if (name == null) { - throw new SpecBuilderException("Job names cannot be null"); - } + if (name == null) { + throw new SpecBuilderException("Job names cannot be null"); + } - String prefix = String.format("%s-%s-%s_", show, shot, user); - String suffix = name; + String prefix = String.format("%s-%s-%s_", show, shot, user); + String suffix = name; - /* - * Find the job's suffix - */ - if (suffix.startsWith(prefix)) { - int index = prefix.length() - 1; - suffix = suffix.substring(index); - } + /* + * Find the job's suffix + */ + if (suffix.startsWith(prefix)) { + int index = prefix.length() - 1; + suffix = suffix.substring(index); + } + + suffix = suffix.toLowerCase(); + suffix = suffix.replaceAll("[_]{2,}", "_"); - suffix = suffix.toLowerCase(); - suffix = suffix.replaceAll("[_]{2,}", "_"); + suffix = suffix.replace("-", "_"); - suffix = suffix.replace("-", "_"); + Matcher matcher = NAME_PATTERN.matcher(suffix); + if (!matcher.matches()) { + throw new SpecBuilderException("The job name suffix: " + suffix + + " must be composed of alpha numeric characters, periods, " + + "and underscores and be at least 3 characters long"); + } + + suffix = suffix.replaceAll("^[_]{1,}", ""); + prefix = prefix.replaceAll("[_]{1,}$", ""); - Matcher matcher = NAME_PATTERN.matcher(suffix); - if (!matcher.matches()) { - throw new SpecBuilderException("The job name suffix: " + suffix - + " must be composed of alpha numeric characters, periods, " - + "and underscores and be at least 3 characters long"); + return String.format("%s_%s", prefix, suffix).toLowerCase(); } - suffix = suffix.replaceAll("^[_]{1,}", ""); - prefix = prefix.replaceAll("[_]{1,}$", ""); + public static String conformName(String type, String name) { - return String.format("%s_%s", prefix, suffix).toLowerCase(); - } + String lowerType = type.toLowerCase(); - public static String conformName(String type, String name) { + if (name.length() < 3) { + throw new SpecBuilderException( + "The " + lowerType + " name must be at least 3 characters."); + } - String lowerType = type.toLowerCase(); + String newName = name; + newName = newName.replace("-", "_"); + newName = newName.toLowerCase(); - if (name.length() < 3) { - throw new SpecBuilderException("The " + lowerType + " name must be at least 3 characters."); - } + Matcher matcher = NAME_PATTERN.matcher(newName); + if (!matcher.matches()) { + throw new SpecBuilderException("The " + lowerType + " name: " + newName + + " is not in the proper format. " + type + " names must be " + + "alpha numeric, no dashes or punctuation."); + } - String newName = name; - newName = newName.replace("-", "_"); - newName = newName.toLowerCase(); + return newName; + } - Matcher matcher = NAME_PATTERN.matcher(newName); - if (!matcher.matches()) { - throw new SpecBuilderException( - "The " + lowerType + " name: " + newName + " is not in the proper format. " + type - + " names must be " + "alpha numeric, no dashes or punctuation."); + public static String conformShowName(String name) { + return conformName("Show", name); } - return newName; - } + public static String conformShotName(String name) { + return conformName("Shot", name); + } - public static String conformShowName(String name) { - return conformName("Show", name); - } + public static String conformLayerName(String name) { + return conformName("Layer", name); + } - public static String conformShotName(String name) { - return conformName("Shot", name); - } + public static final String FRAME_NAME_REGEX = "^([\\d]{4,6})-([\\w]+)$"; - public static String conformLayerName(String name) { - return conformName("Layer", name); - } + public static final Pattern FRAME_NAME_PATTERN = Pattern.compile(FRAME_NAME_REGEX); - public static final String FRAME_NAME_REGEX = "^([\\d]{4,6})-([\\w]+)$"; + public String conformFrameName(String name) { + Matcher m = FRAME_NAME_PATTERN.matcher(name); + if (!m.matches()) { + throw new SpecBuilderException( + "The frame name: " + name + " is not in the proper format."); + } + return String.format("%04d-%s", Integer.valueOf(m.group(1)), conformLayerName(m.group(2))); + } - public static final Pattern FRAME_NAME_PATTERN = Pattern.compile(FRAME_NAME_REGEX); + /** + * Grabs the show/shot/user/uid for this spec. + */ + private void handleSpecTag() { + Element rootElement = doc.getRootElement(); + facility = rootElement.getChildTextTrim("facility"); + if (facility != null) { + facility = facility.toLowerCase(); + } - public String conformFrameName(String name) { - Matcher m = FRAME_NAME_PATTERN.matcher(name); - if (!m.matches()) { - throw new SpecBuilderException("The frame name: " + name + " is not in the proper format."); - } - return String.format("%04d-%s", Integer.valueOf(m.group(1)), conformLayerName(m.group(2))); - } + show = rootElement.getChildTextTrim("show"); + shot = conformShotName(rootElement.getChildTextTrim("shot")); + user = rootElement.getChildTextTrim("user"); + uid = Optional.ofNullable(rootElement.getChildTextTrim("uid")).map(Integer::parseInt); + email = rootElement.getChildTextTrim("email"); - /** - * Grabs the show/shot/user/uid for this spec. - */ - private void handleSpecTag() { - Element rootElement = doc.getRootElement(); - facility = rootElement.getChildTextTrim("facility"); - if (facility != null) { - facility = facility.toLowerCase(); + if (user.equals("root") || uid.equals(Optional.of(0))) { + throw new SpecBuilderException("Cannot launch jobs as root."); + } } - show = rootElement.getChildTextTrim("show"); - shot = conformShotName(rootElement.getChildTextTrim("shot")); - user = rootElement.getChildTextTrim("user"); - uid = Optional.ofNullable(rootElement.getChildTextTrim("uid")).map(Integer::parseInt); - email = rootElement.getChildTextTrim("email"); + /** + * Loop over all tags + * + */ + private void handleJobsTag() { + List elements = doc.getRootElement().getChildren("job"); + if (elements == null) { + return; + } - if (user.equals("root") || uid.equals(Optional.of(0))) { - throw new SpecBuilderException("Cannot launch jobs as root."); + for (Object tmpElement : elements) { + Element jobElement = (Element) tmpElement; + jobs.add(handleJobTag(jobElement)); + } } - } - /** - * Loop over all tags - * - */ - private void handleJobsTag() { - List elements = doc.getRootElement().getChildren("job"); - if (elements == null) { - return; + /** + * Loop over all tags + * + */ + private void handleDependsTags() { + Element delements = doc.getRootElement().getChild("depends"); + if (delements == null) { + return; + } + List elements = delements.getChildren("depend"); + if (elements == null) { + return; + } + for (Object tmpElement : elements) { + Element dependElement = (Element) tmpElement; + depends.add(handleDependTag(dependElement)); + } } - for (Object tmpElement : elements) { - Element jobElement = (Element) tmpElement; - jobs.add(handleJobTag(jobElement)); - } - } + /** + * + * @param jobTag + * @return + */ + private BuildableJob handleJobTag(Element jobTag) { + + /* + * Read in the job tag + */ + JobDetail job = new JobDetail(); + job.name = conformJobName(jobTag.getAttributeValue("name")); + job.state = JobState.STARTUP; + job.isPaused = Convert.stringToBool(jobTag.getChildTextTrim("paused")); + job.isAutoEat = Convert.stringToBool(jobTag.getChildTextTrim("autoeat")); + job.isLocal = false; + Element local = jobTag.getChild("localbook"); + if (local != null) { + job.isLocal = true; + job.localHostName = local.getAttributeValue("host"); + if (local.getAttributeValue("cores") != null) + job.localMaxCores = Integer.parseInt(local.getAttributeValue("cores")); + if (local.getAttributeValue("memory") != null) + job.localMaxMemory = Long.parseLong(local.getAttributeValue("memory")); + if (local.getAttributeValue("threads") != null) + job.localThreadNumber = Integer.parseInt(local.getAttributeValue("threads")); + if (local.getAttributeValue("gpus") != null) + job.localMaxGpus = Integer.parseInt(local.getAttributeValue("gpus")); + if (local.getAttributeValue("gpu") != null) { + logger.warn(job.name + " localbook has the deprecated gpu. Use gpu_memory."); + job.localMaxGpuMemory = Long.parseLong(local.getAttributeValue("gpu")); + } + if (local.getAttributeValue("gpu_memory") != null) + job.localMaxGpuMemory = Long.parseLong(local.getAttributeValue("gpu_memory")); + } - /** - * Loop over all tags - * - */ - private void handleDependsTags() { - Element delements = doc.getRootElement().getChild("depends"); - if (delements == null) { - return; - } - List elements = delements.getChildren("depend"); - if (elements == null) { - return; - } - for (Object tmpElement : elements) { - Element dependElement = (Element) tmpElement; - depends.add(handleDependTag(dependElement)); - } - } + job.maxCoreUnits = 20000; + job.minCoreUnits = 100; + job.startTime = CueUtil.getTime(); + job.maxRetries = FRAME_RETRIES_DEFAULT; + job.shot = shot; + job.user = user; + job.uid = uid; + job.email = email; + job.os = null; // default to no OS specified + job.showName = show; + job.facilityName = facility; + job.deptName = jobTag.getChildTextTrim("dept"); + + BuildableJob buildableJob = new BuildableJob(job); + + if (jobTag.getChildTextTrim("os") != null) { + job.os = jobTag.getChildTextTrim("os"); + } - /** - * - * @param jobTag - * @return - */ - private BuildableJob handleJobTag(Element jobTag) { + if (jobTag.getChildTextTrim("maxretries") != null) { + job.maxRetries = Integer.valueOf(jobTag.getChildTextTrim("maxretries")); + if (job.maxRetries > FRAME_RETRIES_MAX) { + job.maxRetries = FRAME_RETRIES_MAX; + } else if (job.maxRetries < FRAME_RETRIES_MIN) { + job.maxRetries = FRAME_RETRIES_MIN; + } + } - /* - * Read in the job tag - */ - JobDetail job = new JobDetail(); - job.name = conformJobName(jobTag.getAttributeValue("name")); - job.state = JobState.STARTUP; - job.isPaused = Convert.stringToBool(jobTag.getChildTextTrim("paused")); - job.isAutoEat = Convert.stringToBool(jobTag.getChildTextTrim("autoeat")); - job.isLocal = false; - Element local = jobTag.getChild("localbook"); - if (local != null) { - job.isLocal = true; - job.localHostName = local.getAttributeValue("host"); - if (local.getAttributeValue("cores") != null) - job.localMaxCores = Integer.parseInt(local.getAttributeValue("cores")); - if (local.getAttributeValue("memory") != null) - job.localMaxMemory = Long.parseLong(local.getAttributeValue("memory")); - if (local.getAttributeValue("threads") != null) - job.localThreadNumber = Integer.parseInt(local.getAttributeValue("threads")); - if (local.getAttributeValue("gpus") != null) - job.localMaxGpus = Integer.parseInt(local.getAttributeValue("gpus")); - if (local.getAttributeValue("gpu") != null) { - logger.warn(job.name + " localbook has the deprecated gpu. Use gpu_memory."); - job.localMaxGpuMemory = Long.parseLong(local.getAttributeValue("gpu")); - } - if (local.getAttributeValue("gpu_memory") != null) - job.localMaxGpuMemory = Long.parseLong(local.getAttributeValue("gpu_memory")); - } - - job.maxCoreUnits = 20000; - job.minCoreUnits = 100; - job.startTime = CueUtil.getTime(); - job.maxRetries = FRAME_RETRIES_DEFAULT; - job.shot = shot; - job.user = user; - job.uid = uid; - job.email = email; - job.os = null; // default to no OS specified - job.showName = show; - job.facilityName = facility; - job.deptName = jobTag.getChildTextTrim("dept"); - - BuildableJob buildableJob = new BuildableJob(job); - - if (jobTag.getChildTextTrim("os") != null) { - job.os = jobTag.getChildTextTrim("os"); - } - - if (jobTag.getChildTextTrim("maxretries") != null) { - job.maxRetries = Integer.valueOf(jobTag.getChildTextTrim("maxretries")); - if (job.maxRetries > FRAME_RETRIES_MAX) { - job.maxRetries = FRAME_RETRIES_MAX; - } else if (job.maxRetries < FRAME_RETRIES_MIN) { - job.maxRetries = FRAME_RETRIES_MIN; - } - } - - if (jobTag.getChildTextTrim("maxcores") != null) { - buildableJob.maxCoresOverride = Integer.valueOf(jobTag.getChildTextTrim("maxcores")); - } - if (jobTag.getChildTextTrim("maxgpus") != null) { - buildableJob.maxGpusOverride = Integer.valueOf(jobTag.getChildTextTrim("maxgpus")); - } - - if (jobTag.getChildTextTrim("priority") != null) { - job.priority = Integer.valueOf(jobTag.getChildTextTrim("priority")); - } - - Element envTag = jobTag.getChild("env"); - if (envTag != null) { - handleEnvironmentTag(envTag, buildableJob.env); - } - - handleLayerTags(buildableJob, jobTag); - - if (buildableJob.getBuildableLayers().size() > MAX_LAYERS) { - throw new SpecBuilderException("The job " + job.name + " has over " + MAX_LAYERS + " layers"); - } - - if (buildableJob.getBuildableLayers().size() < 1) { - throw new SpecBuilderException("The job " + job.name + " has no layers"); - } - - return buildableJob; - } - - /** - * - * @param buildableJob - * @param jobTag - */ - private void handleLayerTags(BuildableJob buildableJob, Element jobTag) { - - Set layerNames = new HashSet(); - int dispatchOrder = 0; - - for (Object layerTmp : jobTag.getChild("layers").getChildren("layer")) { - - Element layerTag = (Element) layerTmp; - - /* - * Setup a LayerDetail and Buildable layer, add layer to job - */ - LayerDetail layer = new LayerDetail(); - BuildableLayer buildableLayer = new BuildableLayer(layer); - - /* - * Setup the layer type - */ - String layerType = layerTag.getAttributeValue("type"); - /* - * The Enum is capitalized so make sure that we capitalize the string we received from the - * user. - */ - layer.type = LayerType.valueOf(layerType.toUpperCase()); - if (layer.type == null) { - throw new SpecBuilderException("error, the layer " + layer.name - + " was defined with an invalid type: " + layerTag.getAttributeValue("type")); - } - - /* - * If the layer is a post layer, we add it to the post job. - */ - if (layer.type.equals(LayerType.POST)) { - if (buildableJob.getPostJob() == null) { - buildableJob.setPostJob(initPostJob(buildableJob)); - } - buildableJob.getPostJob().addBuildableLayer(buildableLayer); - } else { - buildableJob.addBuildableLayer(buildableLayer); - } - - /* - * Check to make sure the name is unique for this job. - */ - if (layerTag.getAttributeValue("name") == null) { - throw new SpecBuilderException("error, the layer name cannot be null"); - } - - layer.name = conformLayerName(layerTag.getAttributeValue("name")); - - if (layerNames.contains(layer.name)) { - throw new SpecBuilderException("error, the layer " + layer.name - + " was already defined in job " + buildableJob.detail.name); - } - layerNames.add(layer.name); - - /* - * Setup the simple layer properties. - */ - layer.command = layerTag.getChildTextTrim("cmd"); - layer.range = layerTag.getChildTextTrim("range"); - layer.dispatchOrder = ++dispatchOrder; - - /* - * Determine some of the more complex attributes. - */ - determineResourceDefaults(layerTag, buildableJob, layer); - determineChunkSize(layerTag, layer); - determineMinimumCores(layerTag, layer); - determineMinimumGpus(layerTag, layer); - determineThreadable(layerTag, layer); - determineTags(buildableJob, layer, layerTag); - determineMinimumMemory(buildableJob, layerTag, layer, buildableLayer); - determineMinimumGpuMemory(buildableJob, layerTag, layer); - determineOutputs(layerTag, buildableJob, layer); - - // set a timeout value on the layer - if (layerTag.getChildTextTrim("timeout") != null) { - layer.timeout = Integer.parseInt(layerTag.getChildTextTrim("timeout")); - } - - if (layerTag.getChildTextTrim("timeout_llu") != null) { - layer.timeout_llu = Integer.parseInt(layerTag.getChildTextTrim("timeout_llu")); - } - - /* - * Handle the layer environment - */ - Element envTag = layerTag.getChild("env"); - if (envTag != null) { - handleEnvironmentTag(envTag, buildableLayer.env); - } - - totalFrames = totalFrames + getFrameRangeSize(layer.range, layer.chunkSize); - - if (buildableJob.getBuildableLayers().size() > MAX_LAYERS) { - throw new SpecBuilderException( - "error, your job has " + buildableJob.getBuildableLayers().size() + " layers, " - + " the maximum number of allowed layers is " + MAX_LAYERS); - } - - if (totalFrames > MAX_FRAMES) { - throw new SpecBuilderException("error, your job has " + totalFrames - + " frames, the maximum number of allowed " + "frames is " + MAX_FRAMES); - } - } - } - - /** - * Convert string given for memory, with m for megabytes or g for gigabytes to kilobytes. - * - * @param input - */ - private long convertMemoryInput(String input) { - if (input.contains("m")) { - double megs = Double.valueOf(input.substring(0, input.lastIndexOf("m"))); - return (long) (megs * 1024); - } else if (input.contains("g")) { - return Long.valueOf(input.substring(0, input.lastIndexOf("g"))) * CueUtil.GB; - } else { - return Long.valueOf(input) * CueUtil.GB; - } - } - - private void determineMinimumMemory(BuildableJob buildableJob, Element layerTag, - LayerDetail layer, BuildableLayer buildableLayer) { - - if (layerTag.getChildTextTrim("memory") == null) { - return; - } - - long minMemory; - String memory = layerTag.getChildTextTrim("memory").toLowerCase(); - - minMemory = convertMemoryInput(memory); - long memReservedMin = env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); - long memReservedMax = env.getRequiredProperty("dispatcher.memory.mem_reserved_max", Long.class); - - // Some quick sanity checks to make sure memory hasn't gone - // over or under reasonable defaults. - if (minMemory > memReservedMax) { - logger.warn("Setting memory for " + buildableJob.detail.name + "/" + layer.name + " to: " - + memReservedMax); - layer.minimumMemory = memReservedMax; - } else if (minMemory < memReservedMin) { - logger.warn(buildableJob.detail.name + "/" + layer.name - + "Specified too little memory, defaulting to: " + memReservedMin); - minMemory = memReservedMin; - } - - buildableLayer.isMemoryOverride = true; - layer.minimumMemory = minMemory; - - } - - /** - * If the gpu_memory option is set, set minimumGpuMemory to that supplied value - * - * @param layerTag - * @param layer - */ - private void determineMinimumGpuMemory(BuildableJob buildableJob, Element layerTag, - LayerDetail layer) { - - String gpu = layerTag.getChildTextTrim("gpu"); - String gpuMemory = layerTag.getChildTextTrim("gpu_memory"); - if (gpu == null && gpuMemory == null) { - return; - } - - String memory = null; - if (gpu != null) { - logger.warn( - buildableJob.detail.name + "/" + layer.name + " has the deprecated gpu. Use gpu_memory."); - memory = gpu.toLowerCase(); - } - if (gpuMemory != null) - memory = gpuMemory.toLowerCase(); - - long minGpuMemory; - try { - minGpuMemory = convertMemoryInput(memory); - long memGpuReservedMin = - env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_min", Long.class); - long memGpuReservedMax = - env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_max", Long.class); - - // Some quick sanity checks to make sure gpu memory hasn't gone - // over or under reasonable defaults. - if (minGpuMemory > memGpuReservedMax) { - throw new SpecBuilderException( - "Gpu memory requirements exceed " + "maximum. Are you specifying the correct units?"); - } else if (minGpuMemory < memGpuReservedMin) { - logger.warn(buildableJob.detail.name + "/" + layer.name - + "Specified too little gpu memory, defaulting to: " + memGpuReservedMin); - minGpuMemory = memGpuReservedMin; - } - - layer.minimumGpuMemory = minGpuMemory; - - } catch (Exception e) { - logger.info("Error setting gpu memory for " + buildableJob.detail.name + "/" + layer.name - + " failed, reason: " + e + ". Using default."); - layer.minimumGpuMemory = - env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_min", Long.class); - } - } - - /** - * Cores may be specified as a decimal or core points. - * - * If no core value is specified, we default to the value of - * Dispatcher.CORE_POINTS_RESERVED_DEFAULT - * - * If the value is specified but is less than the minimum allowed, then the value is reset to the - * default. - * - * If the value is specified but is greater than the max allowed, then the value is reset to the - * default. - * - */ - private void determineMinimumCores(Element layerTag, LayerDetail layer) { - - String cores = layerTag.getChildTextTrim("cores"); - if (cores == null) { - return; - } - - int corePoints = layer.minimumCores; - - if (cores.contains(".")) { - if (cores.contains("-")) { - corePoints = (int) (Double.valueOf(cores) * 100 - .5); - } else { - corePoints = (int) (Double.valueOf(cores) * 100 + .5); - } - } else { - corePoints = Integer.valueOf(cores); - } - - if (corePoints > 0 && corePoints < Dispatcher.CORE_POINTS_RESERVED_MIN) { - corePoints = Dispatcher.CORE_POINTS_RESERVED_DEFAULT; - } else if (corePoints > Dispatcher.CORE_POINTS_RESERVED_MAX) { - corePoints = Dispatcher.CORE_POINTS_RESERVED_MAX; - } - - layer.minimumCores = corePoints; - } - - /** - * Gpu is a int. - * - * If no gpu value is specified, we default to the value of Dispatcher.GPU_RESERVED_DEFAULT - */ - private void determineMinimumGpus(Element layerTag, LayerDetail layer) { - - String gpus = layerTag.getChildTextTrim("gpus"); - if (gpus != null) { - layer.minimumGpus = Integer.valueOf(gpus); - } - } - - private void determineChunkSize(Element layerTag, LayerDetail layer) { - layer.chunkSize = Integer.parseInt(layerTag.getChildTextTrim("chunk")); - } - - /** - * Determine if the layer is threadable. A manually set threadable option in the job spec should - * override the service defaults. - * - * @param layerTag - * @param layer - */ - private void determineThreadable(Element layerTag, LayerDetail layer) { - // Must have at least 1 core to thread. - if (layer.minimumCores > 0 && layer.minimumCores < 100) { - layer.isThreadable = false; - } else if (layerTag.getChildTextTrim("threadable") != null) { - layer.isThreadable = Convert.stringToBool(layerTag.getChildTextTrim("threadable")); - } - } - - private void determineResourceDefaults(Element layerTag, BuildableJob job, LayerDetail layer) { - - Element t_services = layerTag.getChild("services"); - List services = new ArrayList(); - - /* - * Build a list of services from the XML. Filter out duplicates and empty services. - */ - if (t_services != null) { + if (jobTag.getChildTextTrim("maxcores") != null) { + buildableJob.maxCoresOverride = Integer.valueOf(jobTag.getChildTextTrim("maxcores")); + } + if (jobTag.getChildTextTrim("maxgpus") != null) { + buildableJob.maxGpusOverride = Integer.valueOf(jobTag.getChildTextTrim("maxgpus")); + } + + if (jobTag.getChildTextTrim("priority") != null) { + job.priority = Integer.valueOf(jobTag.getChildTextTrim("priority")); + } + + Element envTag = jobTag.getChild("env"); + if (envTag != null) { + handleEnvironmentTag(envTag, buildableJob.env); + } - for (Object tmp : t_services.getChildren()) { - Element t_service = (Element) tmp; - String service_name = t_service.getTextTrim(); + handleLayerTags(buildableJob, jobTag); - if (service_name.length() == 0) { - continue; + if (buildableJob.getBuildableLayers().size() > MAX_LAYERS) { + throw new SpecBuilderException( + "The job " + job.name + " has over " + MAX_LAYERS + " layers"); } - if (services.contains(service_name)) { - continue; + if (buildableJob.getBuildableLayers().size() < 1) { + throw new SpecBuilderException("The job " + job.name + " has no layers"); } - services.add(service_name); - } + + return buildableJob; } - /* - * Start from the beginning and check each service. The first one that has a service record will - * be the one to use. - */ - ServiceEntity primaryService = null; - for (String service_name : services) { - try { - primaryService = serviceManager.getService(service_name, job.detail.showName); - // Once a service is found, break; - break; - } catch (EmptyResultDataAccessException e) { - logger.warn("warning, service not found for layer " + layer.getName() + " " + service_name); - } - } - - /* - * If no primary service was found, use the default service. + /** + * + * @param buildableJob + * @param jobTag */ - if (primaryService == null) { - primaryService = serviceManager.getService(DEFAULT_SERVICE); - services.add(primaryService.name); + private void handleLayerTags(BuildableJob buildableJob, Element jobTag) { + + Set layerNames = new HashSet(); + int dispatchOrder = 0; + + for (Object layerTmp : jobTag.getChild("layers").getChildren("layer")) { + + Element layerTag = (Element) layerTmp; + + /* + * Setup a LayerDetail and Buildable layer, add layer to job + */ + LayerDetail layer = new LayerDetail(); + BuildableLayer buildableLayer = new BuildableLayer(layer); + + /* + * Setup the layer type + */ + String layerType = layerTag.getAttributeValue("type"); + /* + * The Enum is capitalized so make sure that we capitalize the string we received from + * the user. + */ + layer.type = LayerType.valueOf(layerType.toUpperCase()); + if (layer.type == null) { + throw new SpecBuilderException( + "error, the layer " + layer.name + " was defined with an invalid type: " + + layerTag.getAttributeValue("type")); + } + + /* + * If the layer is a post layer, we add it to the post job. + */ + if (layer.type.equals(LayerType.POST)) { + if (buildableJob.getPostJob() == null) { + buildableJob.setPostJob(initPostJob(buildableJob)); + } + buildableJob.getPostJob().addBuildableLayer(buildableLayer); + } else { + buildableJob.addBuildableLayer(buildableLayer); + } + + /* + * Check to make sure the name is unique for this job. + */ + if (layerTag.getAttributeValue("name") == null) { + throw new SpecBuilderException("error, the layer name cannot be null"); + } + + layer.name = conformLayerName(layerTag.getAttributeValue("name")); + + if (layerNames.contains(layer.name)) { + throw new SpecBuilderException("error, the layer " + layer.name + + " was already defined in job " + buildableJob.detail.name); + } + layerNames.add(layer.name); + + /* + * Setup the simple layer properties. + */ + layer.command = layerTag.getChildTextTrim("cmd"); + layer.range = layerTag.getChildTextTrim("range"); + layer.dispatchOrder = ++dispatchOrder; + + /* + * Determine some of the more complex attributes. + */ + determineResourceDefaults(layerTag, buildableJob, layer); + determineChunkSize(layerTag, layer); + determineMinimumCores(layerTag, layer); + determineMinimumGpus(layerTag, layer); + determineThreadable(layerTag, layer); + determineTags(buildableJob, layer, layerTag); + determineMinimumMemory(buildableJob, layerTag, layer, buildableLayer); + determineMinimumGpuMemory(buildableJob, layerTag, layer); + determineOutputs(layerTag, buildableJob, layer); + + // set a timeout value on the layer + if (layerTag.getChildTextTrim("timeout") != null) { + layer.timeout = Integer.parseInt(layerTag.getChildTextTrim("timeout")); + } + + if (layerTag.getChildTextTrim("timeout_llu") != null) { + layer.timeout_llu = Integer.parseInt(layerTag.getChildTextTrim("timeout_llu")); + } + + /* + * Handle the layer environment + */ + Element envTag = layerTag.getChild("env"); + if (envTag != null) { + handleEnvironmentTag(envTag, buildableLayer.env); + } + + totalFrames = totalFrames + getFrameRangeSize(layer.range, layer.chunkSize); + + if (buildableJob.getBuildableLayers().size() > MAX_LAYERS) { + throw new SpecBuilderException("error, your job has " + + buildableJob.getBuildableLayers().size() + " layers, " + + " the maximum number of allowed layers is " + MAX_LAYERS); + } + + if (totalFrames > MAX_FRAMES) { + throw new SpecBuilderException("error, your job has " + totalFrames + + " frames, the maximum number of allowed " + "frames is " + MAX_FRAMES); + } + } } - Element t_limits = layerTag.getChild("limits"); - List limits = new ArrayList(); + /** + * Convert string given for memory, with m for megabytes or g for gigabytes to kilobytes. + * + * @param input + */ + private long convertMemoryInput(String input) { + if (input.contains("m")) { + double megs = Double.valueOf(input.substring(0, input.lastIndexOf("m"))); + return (long) (megs * 1024); + } else if (input.contains("g")) { + return Long.valueOf(input.substring(0, input.lastIndexOf("g"))) * CueUtil.GB; + } else { + return Long.valueOf(input) * CueUtil.GB; + } + } - if (t_limits != null) { - for (Object tmp : t_limits.getChildren()) { - Element t_limit = (Element) tmp; - String limitName = t_limit.getTextTrim(); + private void determineMinimumMemory(BuildableJob buildableJob, Element layerTag, + LayerDetail layer, BuildableLayer buildableLayer) { - if (limitName.length() == 0) { - continue; + if (layerTag.getChildTextTrim("memory") == null) { + return; } - if (limits.contains(limitName)) { - continue; + long minMemory; + String memory = layerTag.getChildTextTrim("memory").toLowerCase(); + + minMemory = convertMemoryInput(memory); + long memReservedMin = + env.getRequiredProperty("dispatcher.memory.mem_reserved_min", Long.class); + long memReservedMax = + env.getRequiredProperty("dispatcher.memory.mem_reserved_max", Long.class); + + // Some quick sanity checks to make sure memory hasn't gone + // over or under reasonable defaults. + if (minMemory > memReservedMax) { + logger.warn("Setting memory for " + buildableJob.detail.name + "/" + layer.name + + " to: " + memReservedMax); + layer.minimumMemory = memReservedMax; + } else if (minMemory < memReservedMin) { + logger.warn(buildableJob.detail.name + "/" + layer.name + + "Specified too little memory, defaulting to: " + memReservedMin); + minMemory = memReservedMin; } - limits.add(limitName); - } - } - logger.info("primary service: " + primaryService.getName() + " " + layer.getName()); + buildableLayer.isMemoryOverride = true; + layer.minimumMemory = minMemory; - /* - * Now apply the primaryService values to the layer. - */ - layer.isThreadable = primaryService.threadable; - layer.maximumCores = primaryService.maxCores; - layer.minimumCores = primaryService.minCores; - layer.minimumMemory = primaryService.minMemory; - layer.maximumGpus = primaryService.maxGpus; - layer.minimumGpus = primaryService.minGpus; - layer.minimumGpuMemory = primaryService.minGpuMemory; - layer.tags.addAll(primaryService.tags); - layer.services.addAll(services); - layer.limits.addAll(limits); - layer.timeout = primaryService.timeout; - layer.timeout_llu = primaryService.timeout_llu; - } - - private void determineOutputs(Element layerTag, BuildableJob job, LayerDetail layer) { - - Element t_outputs = layerTag.getChild("outputs"); - List outputs = new ArrayList(); - /* - * Build a list of outputs from the XML. Filter out duplicates and empty outputs. + } + + /** + * If the gpu_memory option is set, set minimumGpuMemory to that supplied value + * + * @param layerTag + * @param layer */ - if (t_outputs != null) { - for (Object tmp : t_outputs.getChildren()) { - Element t_output = (Element) tmp; - String output_path = t_output.getTextTrim(); + private void determineMinimumGpuMemory(BuildableJob buildableJob, Element layerTag, + LayerDetail layer) { - if (output_path.length() == 0) { - continue; + String gpu = layerTag.getChildTextTrim("gpu"); + String gpuMemory = layerTag.getChildTextTrim("gpu_memory"); + if (gpu == null && gpuMemory == null) { + return; } - if (outputs.contains(output_path)) { - continue; + String memory = null; + if (gpu != null) { + logger.warn(buildableJob.detail.name + "/" + layer.name + + " has the deprecated gpu. Use gpu_memory."); + memory = gpu.toLowerCase(); } - outputs.add(output_path); - } - } - layer.outputs.addAll(outputs); - } + if (gpuMemory != null) + memory = gpuMemory.toLowerCase(); - /** - * Converts the job space tagging format into a set of strings. Also verifies each tag. - * - * @param job - * @param layer - * @return - */ - private void determineTags(BuildableJob job, LayerDetail layer, Element layerTag) { - Set newTags = new LinkedHashSet(); - String tags = layerTag.getChildTextTrim("tags"); + long minGpuMemory; + try { + minGpuMemory = convertMemoryInput(memory); + long memGpuReservedMin = + env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_min", Long.class); + long memGpuReservedMax = + env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_max", Long.class); + + // Some quick sanity checks to make sure gpu memory hasn't gone + // over or under reasonable defaults. + if (minGpuMemory > memGpuReservedMax) { + throw new SpecBuilderException("Gpu memory requirements exceed " + + "maximum. Are you specifying the correct units?"); + } else if (minGpuMemory < memGpuReservedMin) { + logger.warn(buildableJob.detail.name + "/" + layer.name + + "Specified too little gpu memory, defaulting to: " + memGpuReservedMin); + minGpuMemory = memGpuReservedMin; + } + + layer.minimumGpuMemory = minGpuMemory; - if (tags == null) { - return; + } catch (Exception e) { + logger.info("Error setting gpu memory for " + buildableJob.detail.name + "/" + + layer.name + " failed, reason: " + e + ". Using default."); + layer.minimumGpuMemory = + env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_min", Long.class); + } } - if (tags.length() == 0) { - return; + /** + * Cores may be specified as a decimal or core points. + * + * If no core value is specified, we default to the value of + * Dispatcher.CORE_POINTS_RESERVED_DEFAULT + * + * If the value is specified but is less than the minimum allowed, then the value is reset to + * the default. + * + * If the value is specified but is greater than the max allowed, then the value is reset to the + * default. + * + */ + private void determineMinimumCores(Element layerTag, LayerDetail layer) { + + String cores = layerTag.getChildTextTrim("cores"); + if (cores == null) { + return; + } + + int corePoints = layer.minimumCores; + + if (cores.contains(".")) { + if (cores.contains("-")) { + corePoints = (int) (Double.valueOf(cores) * 100 - .5); + } else { + corePoints = (int) (Double.valueOf(cores) * 100 + .5); + } + } else { + corePoints = Integer.valueOf(cores); + } + + if (corePoints > 0 && corePoints < Dispatcher.CORE_POINTS_RESERVED_MIN) { + corePoints = Dispatcher.CORE_POINTS_RESERVED_DEFAULT; + } else if (corePoints > Dispatcher.CORE_POINTS_RESERVED_MAX) { + corePoints = Dispatcher.CORE_POINTS_RESERVED_MAX; + } + + layer.minimumCores = corePoints; } - String[] e = tags.replaceAll(" ", "").split("\\|"); - for (String s : e) { - if (e.length == 0) { - continue; - } - Matcher matcher = NAME_PATTERN.matcher(s); - if (!matcher.matches()) { - throw new SpecBuilderException("error, invalid tag " + s - + ", tags must be alpha numberic and at least " + "3 characters in length."); - } - newTags.add(s); + /** + * Gpu is a int. + * + * If no gpu value is specified, we default to the value of Dispatcher.GPU_RESERVED_DEFAULT + */ + private void determineMinimumGpus(Element layerTag, LayerDetail layer) { + + String gpus = layerTag.getChildTextTrim("gpus"); + if (gpus != null) { + layer.minimumGpus = Integer.valueOf(gpus); + } } - if (newTags.size() > 0) { - layer.tags = newTags; + private void determineChunkSize(Element layerTag, LayerDetail layer) { + layer.chunkSize = Integer.parseInt(layerTag.getChildTextTrim("chunk")); } - } - /** - * Determine the frame range - * - * @param range - * @param chunkSize - * @return - */ - public int getFrameRangeSize(String range, int chunkSize) { - try { - return CueUtil.normalizeFrameRange(range, chunkSize).size(); - } catch (Exception e) { - throw new SpecBuilderException("error, the range " + range + " is invalid"); + /** + * Determine if the layer is threadable. A manually set threadable option in the job spec should + * override the service defaults. + * + * @param layerTag + * @param layer + */ + private void determineThreadable(Element layerTag, LayerDetail layer) { + // Must have at least 1 core to thread. + if (layer.minimumCores > 0 && layer.minimumCores < 100) { + layer.isThreadable = false; + } else if (layerTag.getChildTextTrim("threadable") != null) { + layer.isThreadable = Convert.stringToBool(layerTag.getChildTextTrim("threadable")); + } } - } - private BuildableDependency handleDependTag(Element tag) { + private void determineResourceDefaults(Element layerTag, BuildableJob job, LayerDetail layer) { - BuildableDependency depend = new BuildableDependency(); - depend.type = DependType.valueOf(tag.getAttributeValue("type").toUpperCase()); + Element t_services = layerTag.getChild("services"); + List services = new ArrayList(); - /* - * If the depend type is layer on layer, allow dependAny to be set. Depend any is not - * implemented for any other depend type. - */ - if (depend.type.equals(DependType.LAYER_ON_LAYER)) { - depend.anyFrame = Convert.stringToBool(tag.getAttributeValue("anyframe")); + /* + * Build a list of services from the XML. Filter out duplicates and empty services. + */ + if (t_services != null) { + + for (Object tmp : t_services.getChildren()) { + Element t_service = (Element) tmp; + String service_name = t_service.getTextTrim(); + + if (service_name.length() == 0) { + continue; + } + + if (services.contains(service_name)) { + continue; + } + services.add(service_name); + } + } + + /* + * Start from the beginning and check each service. The first one that has a service record + * will be the one to use. + */ + ServiceEntity primaryService = null; + for (String service_name : services) { + try { + primaryService = serviceManager.getService(service_name, job.detail.showName); + // Once a service is found, break; + break; + } catch (EmptyResultDataAccessException e) { + logger.warn("warning, service not found for layer " + layer.getName() + " " + + service_name); + } + } + + /* + * If no primary service was found, use the default service. + */ + if (primaryService == null) { + primaryService = serviceManager.getService(DEFAULT_SERVICE); + services.add(primaryService.name); + } + + Element t_limits = layerTag.getChild("limits"); + List limits = new ArrayList(); + + if (t_limits != null) { + for (Object tmp : t_limits.getChildren()) { + Element t_limit = (Element) tmp; + String limitName = t_limit.getTextTrim(); + + if (limitName.length() == 0) { + continue; + } + + if (limits.contains(limitName)) { + continue; + } + limits.add(limitName); + } + } + + logger.info("primary service: " + primaryService.getName() + " " + layer.getName()); + + /* + * Now apply the primaryService values to the layer. + */ + layer.isThreadable = primaryService.threadable; + layer.maximumCores = primaryService.maxCores; + layer.minimumCores = primaryService.minCores; + layer.minimumMemory = primaryService.minMemory; + layer.maximumGpus = primaryService.maxGpus; + layer.minimumGpus = primaryService.minGpus; + layer.minimumGpuMemory = primaryService.minGpuMemory; + layer.tags.addAll(primaryService.tags); + layer.services.addAll(services); + layer.limits.addAll(limits); + layer.timeout = primaryService.timeout; + layer.timeout_llu = primaryService.timeout_llu; + } + + private void determineOutputs(Element layerTag, BuildableJob job, LayerDetail layer) { + + Element t_outputs = layerTag.getChild("outputs"); + List outputs = new ArrayList(); + /* + * Build a list of outputs from the XML. Filter out duplicates and empty outputs. + */ + if (t_outputs != null) { + for (Object tmp : t_outputs.getChildren()) { + Element t_output = (Element) tmp; + String output_path = t_output.getTextTrim(); + + if (output_path.length() == 0) { + continue; + } + + if (outputs.contains(output_path)) { + continue; + } + outputs.add(output_path); + } + } + layer.outputs.addAll(outputs); } - /* - * Set job names + /** + * Converts the job space tagging format into a set of strings. Also verifies each tag. + * + * @param job + * @param layer + * @return */ - depend.setDependErJobName(conformJobName(tag.getChildTextTrim("depjob"))); - depend.setDependOnJobName(conformJobName(tag.getChildTextTrim("onjob"))); + private void determineTags(BuildableJob job, LayerDetail layer, Element layerTag) { + Set newTags = new LinkedHashSet(); + String tags = layerTag.getChildTextTrim("tags"); - /* - * Set layer names - */ - String depLayer = tag.getChildTextTrim("deplayer"); - String onLayer = tag.getChildTextTrim("onlayer"); + if (tags == null) { + return; + } - if (depLayer != null) { - depend.setDependErLayerName(conformLayerName(depLayer)); - } - if (onLayer != null) { - depend.setDependOnLayerName(conformLayerName(onLayer)); + if (tags.length() == 0) { + return; + } + + String[] e = tags.replaceAll(" ", "").split("\\|"); + for (String s : e) { + if (e.length == 0) { + continue; + } + Matcher matcher = NAME_PATTERN.matcher(s); + if (!matcher.matches()) { + throw new SpecBuilderException( + "error, invalid tag " + s + ", tags must be alpha numberic and at least " + + "3 characters in length."); + } + newTags.add(s); + } + + if (newTags.size() > 0) { + layer.tags = newTags; + } } - /* - * Set frame names + /** + * Determine the frame range + * + * @param range + * @param chunkSize + * @return */ - String depFrame = tag.getChildTextTrim("depframe"); - String onFrame = tag.getChildTextTrim("onframe"); - - if (depFrame != null) { - depFrame = conformFrameName(depFrame); - depend.setDependErFrameName(depFrame); - } - if (onFrame != null) { - onFrame = conformFrameName(onFrame); - depend.setDependOnFrameName(onFrame); - } - - // double check to make sure we don't have two of the same frame/ - if (onFrame != null && depFrame != null) { - if (onFrame.equals(depFrame)) { - throw new SpecBuilderException("The frame name: " + depFrame + " cannot depend on itself."); - } - } - - return depend; - } - - /** - * Tags a env tag and populates the supplied map with key value pairs. - * - * @param tag - * @param map - */ - private void handleEnvironmentTag(Element tag, Map map) { - if (tag == null) { - return; - } - for (Object tmp : tag.getChildren()) { - Element envTag = (Element) tmp; - String key = envTag.getAttributeValue("name"); - if (key == null) { - continue; - } - map.put(key, envTag.getTextTrim()); - } - } - - public void parse(File file) { - try { - doc = new SAXBuilder(true).build(file); - - } catch (Exception e) { - throw new SpecBuilderException("Failed to parse job spec XML, " + e); - } - - handleSpecTag(); - handleJobsTag(); - handleDependsTags(); - } - - private class DTDRedirector implements EntityResolver { - public InputSource resolveEntity(String publicId, String systemId) - throws SAXException, IOException { - if (systemId.startsWith(SPCUE_DTD_URL)) { - // Redirect to resource file. + public int getFrameRangeSize(String range, int chunkSize) { try { - String filename = systemId.substring(SPCUE_DTD_URL.length()); - InputStream dtd = getClass().getResourceAsStream("/public/dtd/" + filename); - return new InputSource(dtd); + return CueUtil.normalizeFrameRange(range, chunkSize).size(); } catch (Exception e) { - throw new SpecBuilderException("Failed to redirect DTD " + systemId + ", " + e); + throw new SpecBuilderException("error, the range " + range + " is invalid"); } - } else { - // Use default resolver. - return null; - } } - } - public void parse(String cjsl) { - try { - SAXBuilder builder = new SAXBuilder(true); - builder.setEntityResolver(new DTDRedirector()); - doc = builder.build(new StringReader(cjsl)); + private BuildableDependency handleDependTag(Element tag) { + + BuildableDependency depend = new BuildableDependency(); + depend.type = DependType.valueOf(tag.getAttributeValue("type").toUpperCase()); + + /* + * If the depend type is layer on layer, allow dependAny to be set. Depend any is not + * implemented for any other depend type. + */ + if (depend.type.equals(DependType.LAYER_ON_LAYER)) { + depend.anyFrame = Convert.stringToBool(tag.getAttributeValue("anyframe")); + } + + /* + * Set job names + */ + depend.setDependErJobName(conformJobName(tag.getChildTextTrim("depjob"))); + depend.setDependOnJobName(conformJobName(tag.getChildTextTrim("onjob"))); + + /* + * Set layer names + */ + String depLayer = tag.getChildTextTrim("deplayer"); + String onLayer = tag.getChildTextTrim("onlayer"); + + if (depLayer != null) { + depend.setDependErLayerName(conformLayerName(depLayer)); + } + if (onLayer != null) { + depend.setDependOnLayerName(conformLayerName(onLayer)); + } + + /* + * Set frame names + */ + String depFrame = tag.getChildTextTrim("depframe"); + String onFrame = tag.getChildTextTrim("onframe"); - } catch (Exception e) { - throw new SpecBuilderException("Failed to parse job spec XML, " + e); + if (depFrame != null) { + depFrame = conformFrameName(depFrame); + depend.setDependErFrameName(depFrame); + } + if (onFrame != null) { + onFrame = conformFrameName(onFrame); + depend.setDependOnFrameName(onFrame); + } + + // double check to make sure we don't have two of the same frame/ + if (onFrame != null && depFrame != null) { + if (onFrame.equals(depFrame)) { + throw new SpecBuilderException( + "The frame name: " + depFrame + " cannot depend on itself."); + } + } + + return depend; } - handleSpecTag(); - handleJobsTag(); - handleDependsTags(); - } + /** + * Tags a env tag and populates the supplied map with key value pairs. + * + * @param tag + * @param map + */ + private void handleEnvironmentTag(Element tag, Map map) { + if (tag == null) { + return; + } + for (Object tmp : tag.getChildren()) { + Element envTag = (Element) tmp; + String key = envTag.getAttributeValue("name"); + if (key == null) { + continue; + } + map.put(key, envTag.getTextTrim()); + } + } - private BuildableJob initPostJob(BuildableJob parent) { + public void parse(File file) { + try { + doc = new SAXBuilder(true).build(file); - JobDetail job = new JobDetail(); - job.name = parent.detail.name + "_post_job_" + System.currentTimeMillis(); - job.name = job.name.replace(user, "monitor"); - job.state = JobState.STARTUP; - job.isPaused = false; - job.maxCoreUnits = 500; - job.startTime = CueUtil.getTime(); - job.maxRetries = 2; - job.shot = shot; - job.user = "monitor"; - job.uid = uid; - job.email = null; - job.os = parent.detail.os; + } catch (Exception e) { + throw new SpecBuilderException("Failed to parse job spec XML, " + e); + } - job.showName = show; - job.facilityName = facility; - job.deptName = parent.detail.deptName; + handleSpecTag(); + handleJobsTag(); + handleDependsTags(); + } + + private class DTDRedirector implements EntityResolver { + public InputSource resolveEntity(String publicId, String systemId) + throws SAXException, IOException { + if (systemId.startsWith(SPCUE_DTD_URL)) { + // Redirect to resource file. + try { + String filename = systemId.substring(SPCUE_DTD_URL.length()); + InputStream dtd = getClass().getResourceAsStream("/public/dtd/" + filename); + return new InputSource(dtd); + } catch (Exception e) { + throw new SpecBuilderException("Failed to redirect DTD " + systemId + ", " + e); + } + } else { + // Use default resolver. + return null; + } + } + } - BuildableJob postJob = new BuildableJob(job); + public void parse(String cjsl) { + try { + SAXBuilder builder = new SAXBuilder(true); + builder.setEntityResolver(new DTDRedirector()); + doc = builder.build(new StringReader(cjsl)); - for (String key : parent.env.keySet()) { - postJob.env.put(key, parent.env.get(key)); + } catch (Exception e) { + throw new SpecBuilderException("Failed to parse job spec XML, " + e); + } + + handleSpecTag(); + handleJobsTag(); + handleDependsTags(); } - return postJob; - } + private BuildableJob initPostJob(BuildableJob parent) { - public Document getDoc() { - return doc; - } + JobDetail job = new JobDetail(); + job.name = parent.detail.name + "_post_job_" + System.currentTimeMillis(); + job.name = job.name.replace(user, "monitor"); + job.state = JobState.STARTUP; + job.isPaused = false; + job.maxCoreUnits = 500; + job.startTime = CueUtil.getTime(); + job.maxRetries = 2; + job.shot = shot; + job.user = "monitor"; + job.uid = uid; + job.email = null; + job.os = parent.detail.os; - public List getDepends() { - return depends; - } + job.showName = show; + job.facilityName = facility; + job.deptName = parent.detail.deptName; - public List getJobs() { - return jobs; - } + BuildableJob postJob = new BuildableJob(job); - public String getShot() { - return shot; - } + for (String key : parent.env.keySet()) { + postJob.env.put(key, parent.env.get(key)); + } - public String getShow() { - return show; - } + return postJob; + } - public Optional getUid() { - return uid; - } + public Document getDoc() { + return doc; + } - public String getUser() { - return user; - } + public List getDepends() { + return depends; + } - public ServiceManager getServiceManager() { - return serviceManager; - } + public List getJobs() { + return jobs; + } + + public String getShot() { + return shot; + } - public void setServiceManager(ServiceManager serviceManager) { - this.serviceManager = serviceManager; - } + public String getShow() { + return show; + } + + public Optional getUid() { + return uid; + } + + public String getUser() { + return user; + } + + public ServiceManager getServiceManager() { + return serviceManager; + } + + public void setServiceManager(ServiceManager serviceManager) { + this.serviceManager = serviceManager; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/LocalBookingSupport.java b/cuebot/src/main/java/com/imageworks/spcue/service/LocalBookingSupport.java index efaecc4f5..8448fcb74 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/LocalBookingSupport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/LocalBookingSupport.java @@ -33,144 +33,147 @@ */ public class LocalBookingSupport { - private static final Logger logger = LogManager.getLogger(LocalBookingSupport.class); - - private HostManager hostManager; - private LocalDispatcher localDispatcher; - private OwnerManager ownerManager; - private BookingManager bookingManager; - - public boolean bookLocal(JobInterface job, String hostname, String user, - LocalHostAssignment lha) { + private static final Logger logger = LogManager.getLogger(LocalBookingSupport.class); + + private HostManager hostManager; + private LocalDispatcher localDispatcher; + private OwnerManager ownerManager; + private BookingManager bookingManager; + + public boolean bookLocal(JobInterface job, String hostname, String user, + LocalHostAssignment lha) { + + logger.info("Setting up local booking for " + user + " on " + job); + + DispatchHost host = hostManager.findDispatchHost(hostname); + if (host.lockState.equals(LockState.OPEN)) { + throw new SpcueRuntimeException("The host " + host + " is not NIMBY locked"); + } + + OwnerEntity owner = ownerManager.findOwner(user); + if (!ownerManager.isOwner(owner, host)) { + throw new SpcueRuntimeException( + user + " is not the owner of the host " + host.getName()); + } + + bookingManager.createLocalHostAssignment(host, job, lha); + + try { + if (localDispatcher.dispatchHost(host, job).size() > 0) { + return true; + } + } catch (Exception e) { + /* + * Eat everything here and we'll throw our own ice exception. + */ + logger.info("addRenderPartition to job " + job + " failed, " + e); + } + + logger.info("bookLocal failed to book " + host + " to " + job + + ", there were no suitable frames to book."); + + return false; + } - logger.info("Setting up local booking for " + user + " on " + job); + public boolean bookLocal(LayerInterface layer, String hostname, String user, + LocalHostAssignment lha) { - DispatchHost host = hostManager.findDispatchHost(hostname); - if (host.lockState.equals(LockState.OPEN)) { - throw new SpcueRuntimeException("The host " + host + " is not NIMBY locked"); - } + logger.info("Setting up local booking for " + user + " on " + layer); - OwnerEntity owner = ownerManager.findOwner(user); - if (!ownerManager.isOwner(owner, host)) { - throw new SpcueRuntimeException(user + " is not the owner of the host " + host.getName()); - } + DispatchHost host = hostManager.findDispatchHost(hostname); + if (host.lockState.equals(LockState.OPEN)) { + throw new SpcueRuntimeException("The host " + host + " is not NIMBY locked"); + } - bookingManager.createLocalHostAssignment(host, job, lha); - - try { - if (localDispatcher.dispatchHost(host, job).size() > 0) { - return true; - } - } catch (Exception e) { - /* - * Eat everything here and we'll throw our own ice exception. - */ - logger.info("addRenderPartition to job " + job + " failed, " + e); - } + OwnerEntity owner = ownerManager.findOwner(user); + if (!ownerManager.isOwner(owner, host)) { + throw new SpcueRuntimeException( + user + " is not the owner of the host " + host.getName()); + } - logger.info("bookLocal failed to book " + host + " to " + job - + ", there were no suitable frames to book."); + bookingManager.createLocalHostAssignment(host, layer, lha); - return false; - } + try { + if (localDispatcher.dispatchHost(host, layer).size() > 0) { + return true; + } + } catch (Exception e) { + /* + * Eat everything here and we'll throw our own ice exception. + */ + logger.info("addRenderPartition to job " + layer + " failed, " + e); + } - public boolean bookLocal(LayerInterface layer, String hostname, String user, - LocalHostAssignment lha) { + logger.info("bookLocafailed to book " + host + " to " + layer + + ", there were no suitable frames to book."); - logger.info("Setting up local booking for " + user + " on " + layer); + return false; - DispatchHost host = hostManager.findDispatchHost(hostname); - if (host.lockState.equals(LockState.OPEN)) { - throw new SpcueRuntimeException("The host " + host + " is not NIMBY locked"); } - OwnerEntity owner = ownerManager.findOwner(user); - if (!ownerManager.isOwner(owner, host)) { - throw new SpcueRuntimeException(user + " is not the owner of the host " + host.getName()); + public boolean bookLocal(FrameInterface frame, String hostname, String user, + LocalHostAssignment lha) { + + logger.info("Setting up local booking for " + user + " on " + frame); + + DispatchHost host = hostManager.findDispatchHost(hostname); + if (host.lockState.equals(LockState.OPEN)) { + throw new SpcueRuntimeException("The host " + host + " is not NIMBY locked"); + } + + OwnerEntity owner = ownerManager.findOwner(user); + if (!ownerManager.isOwner(owner, host)) { + throw new SpcueRuntimeException( + user + " is not the owner of the host " + host.getName()); + } + + bookingManager.createLocalHostAssignment(host, frame, lha); + try { + if (localDispatcher.dispatchHost(host, frame).size() > 0) { + return true; + } + } catch (Exception e) { + /* + * Eat everything here and we'll throw our own ice exception. + */ + logger.info("addRenderPartition to job " + frame + " failed, " + e); + } + + logger.info("bookLocafailed to book " + host + " to " + frame + + ", there were no suitable frames to book."); + + return false; } - bookingManager.createLocalHostAssignment(host, layer, lha); - - try { - if (localDispatcher.dispatchHost(host, layer).size() > 0) { - return true; - } - } catch (Exception e) { - /* - * Eat everything here and we'll throw our own ice exception. - */ - logger.info("addRenderPartition to job " + layer + " failed, " + e); + public HostManager getHostManager() { + return hostManager; } - logger.info("bookLocafailed to book " + host + " to " + layer - + ", there were no suitable frames to book."); - - return false; - - } - - public boolean bookLocal(FrameInterface frame, String hostname, String user, - LocalHostAssignment lha) { - - logger.info("Setting up local booking for " + user + " on " + frame); - - DispatchHost host = hostManager.findDispatchHost(hostname); - if (host.lockState.equals(LockState.OPEN)) { - throw new SpcueRuntimeException("The host " + host + " is not NIMBY locked"); + public void setHostManager(HostManager hostManager) { + this.hostManager = hostManager; } - OwnerEntity owner = ownerManager.findOwner(user); - if (!ownerManager.isOwner(owner, host)) { - throw new SpcueRuntimeException(user + " is not the owner of the host " + host.getName()); + public LocalDispatcher getLocalDispatcher() { + return localDispatcher; } - bookingManager.createLocalHostAssignment(host, frame, lha); - try { - if (localDispatcher.dispatchHost(host, frame).size() > 0) { - return true; - } - } catch (Exception e) { - /* - * Eat everything here and we'll throw our own ice exception. - */ - logger.info("addRenderPartition to job " + frame + " failed, " + e); + public void setLocalDispatcher(LocalDispatcher localDispatcher) { + this.localDispatcher = localDispatcher; } - logger.info("bookLocafailed to book " + host + " to " + frame - + ", there were no suitable frames to book."); - - return false; - } - - public HostManager getHostManager() { - return hostManager; - } - - public void setHostManager(HostManager hostManager) { - this.hostManager = hostManager; - } - - public LocalDispatcher getLocalDispatcher() { - return localDispatcher; - } - - public void setLocalDispatcher(LocalDispatcher localDispatcher) { - this.localDispatcher = localDispatcher; - } - - public OwnerManager getOwnerManager() { - return ownerManager; - } + public OwnerManager getOwnerManager() { + return ownerManager; + } - public void setOwnerManager(OwnerManager ownerManager) { - this.ownerManager = ownerManager; - } + public void setOwnerManager(OwnerManager ownerManager) { + this.ownerManager = ownerManager; + } - public BookingManager getBookingManager() { - return bookingManager; - } + public BookingManager getBookingManager() { + return bookingManager; + } - public void setBookingManager(BookingManager bookingManager) { - this.bookingManager = bookingManager; - } + public void setBookingManager(BookingManager bookingManager) { + this.bookingManager = bookingManager; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/MaintenanceManagerSupport.java b/cuebot/src/main/java/com/imageworks/spcue/service/MaintenanceManagerSupport.java index f51b48a18..fff3d1a41 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/MaintenanceManagerSupport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/MaintenanceManagerSupport.java @@ -42,243 +42,246 @@ public class MaintenanceManagerSupport { - private static final Logger logger = LogManager.getLogger(MaintenanceManagerSupport.class); - - @Autowired - private Environment env; - - private MaintenanceDao maintenanceDao; - - private ProcDao procDao; - - private FrameDao frameDao; - - private HostDao hostDao; - - private JobManager jobManager; - - private DispatchSupport dispatchSupport; - - private HistoricalSupport historicalSupport; - - private DepartmentManager departmentManager; - - private static final long WAIT_FOR_HOST_REPORTS_MS = 600000; - - private static final int CHECKPOINT_MAX_WAIT_SEC = 300; - - private long dbConnectionFailureTime = 0; - - /** - * Checks the cue for down hosts. If there are any down they are cleared of procs. Additionally - * the orphaned proc check is done. - * - * If a DB Connection exception is thrown, its caught and the current time is noted. Once the DB - * comes back up, down proc checks will not resume for WAIT_FOR_HOST_REPORTS_MS milliseconds. This - * is to give procs a chance to report back in. - * - */ - public void checkHardwareState() { - try { + private static final Logger logger = LogManager.getLogger(MaintenanceManagerSupport.class); + + @Autowired + private Environment env; + + private MaintenanceDao maintenanceDao; + + private ProcDao procDao; + + private FrameDao frameDao; + + private HostDao hostDao; + + private JobManager jobManager; + + private DispatchSupport dispatchSupport; + + private HistoricalSupport historicalSupport; + + private DepartmentManager departmentManager; + + private static final long WAIT_FOR_HOST_REPORTS_MS = 600000; + + private static final int CHECKPOINT_MAX_WAIT_SEC = 300; + + private long dbConnectionFailureTime = 0; + + /** + * Checks the cue for down hosts. If there are any down they are cleared of procs. Additionally + * the orphaned proc check is done. + * + * If a DB Connection exception is thrown, its caught and the current time is noted. Once the DB + * comes back up, down proc checks will not resume for WAIT_FOR_HOST_REPORTS_MS milliseconds. + * This is to give procs a chance to report back in. + * + */ + public void checkHardwareState() { + try { + + if (!maintenanceDao.lockTask(MaintenanceTask.LOCK_HARDWARE_STATE_CHECK)) { + return; + } + try { + if (dbConnectionFailureTime > 0) { + if (System.currentTimeMillis() + - dbConnectionFailureTime < WAIT_FOR_HOST_REPORTS_MS) { + logger.warn( + "NOT running checkHardwareState, waiting for hosts to report in."); + return; + } + dbConnectionFailureTime = 0; + } + + int hosts = maintenanceDao.setUpHostsToDown(); + if (hosts > 0) { + clearDownProcs(); + + boolean autoDeleteDownHosts = env.getProperty( + "maintenance.auto_delete_down_hosts", Boolean.class, false); + if (autoDeleteDownHosts) { + hostDao.deleteDownHosts(); + } + } + clearOrphanedProcs(); + } finally { + maintenanceDao.unlockTask(MaintenanceTask.LOCK_HARDWARE_STATE_CHECK); + } + } catch (Exception e) { + // This catch could be more specific using CannotGetJdbcConnectionException, but + // we need + // to catch a wider range of exceptions from HikariPool. + // HikariPool will log this message very frequently with error level, the + // following check + // avoids polluting the logs by logging it twice + if (!e.getMessage().contains("Exception during pool initialization")) { + logger.warn("Error obtaining DB connection for hardware state check", e); + } + // If this fails, then the network went down, set the current time. + dbConnectionFailureTime = System.currentTimeMillis(); + } + } - if (!maintenanceDao.lockTask(MaintenanceTask.LOCK_HARDWARE_STATE_CHECK)) { - return; - } - try { - if (dbConnectionFailureTime > 0) { - if (System.currentTimeMillis() - dbConnectionFailureTime < WAIT_FOR_HOST_REPORTS_MS) { - logger.warn("NOT running checkHardwareState, waiting for hosts to report in."); + public void archiveFinishedJobs() { + if (!maintenanceDao.lockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER)) { return; - } - dbConnectionFailureTime = 0; } + try { + historicalSupport.archiveHistoricalJobData(); + } catch (Exception e) { + logger.warn("failed to archive finished jobs: " + e); + } finally { + maintenanceDao.unlockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER); + } + } - int hosts = maintenanceDao.setUpHostsToDown(); - if (hosts > 0) { - clearDownProcs(); + private void clearOrphanedProcs() { + List procs = procDao.findOrphanedVirtualProcs(100); + for (VirtualProc proc : procs) { + try { + dispatchSupport.lostProc(proc, "Removed by maintenance, orphaned", + Dispatcher.EXIT_STATUS_FRAME_ORPHAN); + + Sentry.configureScope(scope -> { + scope.setExtra("frame_id", proc.getFrameId()); + scope.setExtra("host_id", proc.getHostId()); + scope.setExtra("name", proc.getName()); + Sentry.captureMessage("Manager cleaning orphan procs"); + }); + } catch (Exception e) { + logger.info("failed to clear orphaned proc: " + proc.getName() + " " + e); + } + } - boolean autoDeleteDownHosts = - env.getProperty("maintenance.auto_delete_down_hosts", Boolean.class, false); - if (autoDeleteDownHosts) { - hostDao.deleteDownHosts(); - } + List frames = frameDao.getOrphanedFrames(); + for (FrameInterface frame : frames) { + try { + frameDao.updateFrameStopped(frame, FrameState.WAITING, + Dispatcher.EXIT_STATUS_FRAME_ORPHAN); + } catch (Exception e) { + logger.info("failed to clear orphaned frame: " + frame.getName() + " " + e); + } } - clearOrphanedProcs(); - } finally { - maintenanceDao.unlockTask(MaintenanceTask.LOCK_HARDWARE_STATE_CHECK); - } - } catch (Exception e) { - // This catch could be more specific using CannotGetJdbcConnectionException, but - // we need - // to catch a wider range of exceptions from HikariPool. - // HikariPool will log this message very frequently with error level, the - // following check - // avoids polluting the logs by logging it twice - if (!e.getMessage().contains("Exception during pool initialization")) { - logger.warn("Error obtaining DB connection for hardware state check", e); - } - // If this fails, then the network went down, set the current time. - dbConnectionFailureTime = System.currentTimeMillis(); } - } - public void archiveFinishedJobs() { - if (!maintenanceDao.lockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER)) { - return; - } - try { - historicalSupport.archiveHistoricalJobData(); - } catch (Exception e) { - logger.warn("failed to archive finished jobs: " + e); - } finally { - maintenanceDao.unlockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER); - } - } - - private void clearOrphanedProcs() { - List procs = procDao.findOrphanedVirtualProcs(100); - for (VirtualProc proc : procs) { - try { - dispatchSupport.lostProc(proc, "Removed by maintenance, orphaned", - Dispatcher.EXIT_STATUS_FRAME_ORPHAN); - - Sentry.configureScope(scope -> { - scope.setExtra("frame_id", proc.getFrameId()); - scope.setExtra("host_id", proc.getHostId()); - scope.setExtra("name", proc.getName()); - Sentry.captureMessage("Manager cleaning orphan procs"); - }); - } catch (Exception e) { - logger.info("failed to clear orphaned proc: " + proc.getName() + " " + e); - } + private void clearDownProcs() { + List procs = procDao.findVirtualProcs(HardwareState.DOWN); + logger.warn("found " + procs.size() + " that are down."); + for (VirtualProc proc : procs) { + try { + dispatchSupport.lostProc(proc, proc.getName() + " was marked as down.", + Dispatcher.EXIT_STATUS_DOWN_HOST); + FrameInterface f = frameDao.getFrame(proc.frameId); + FrameDetail frameDetail = frameDao.getFrameDetail(f); + Sentry.configureScope(scope -> { + scope.setExtra("host", proc.getName()); + scope.setExtra("procId", proc.getProcId()); + scope.setExtra("frame Name", frameDetail.getName()); + scope.setExtra("frame Exit Status", String.valueOf(frameDetail.exitStatus)); + scope.setExtra("Frame Job ID", frameDetail.getJobId()); + Sentry.captureMessage("MaintenanceManager proc removed due to host offline"); + }); + } catch (Exception e) { + logger.info("failed to down proc: " + proc.getName() + " " + e); + } + } } - List frames = frameDao.getOrphanedFrames(); - for (FrameInterface frame : frames) { - try { - frameDao.updateFrameStopped(frame, FrameState.WAITING, Dispatcher.EXIT_STATUS_FRAME_ORPHAN); - } catch (Exception e) { - logger.info("failed to clear orphaned frame: " + frame.getName() + " " + e); - } - } - } - - private void clearDownProcs() { - List procs = procDao.findVirtualProcs(HardwareState.DOWN); - logger.warn("found " + procs.size() + " that are down."); - for (VirtualProc proc : procs) { - try { - dispatchSupport.lostProc(proc, proc.getName() + " was marked as down.", - Dispatcher.EXIT_STATUS_DOWN_HOST); - FrameInterface f = frameDao.getFrame(proc.frameId); - FrameDetail frameDetail = frameDao.getFrameDetail(f); - Sentry.configureScope(scope -> { - scope.setExtra("host", proc.getName()); - scope.setExtra("procId", proc.getProcId()); - scope.setExtra("frame Name", frameDetail.getName()); - scope.setExtra("frame Exit Status", String.valueOf(frameDetail.exitStatus)); - scope.setExtra("Frame Job ID", frameDetail.getJobId()); - Sentry.captureMessage("MaintenanceManager proc removed due to host offline"); - }); - } catch (Exception e) { - logger.info("failed to down proc: " + proc.getName() + " " + e); - } + public void clearStaleCheckpoints() { + logger.info("Checking for stale checkpoint frames."); + if (!maintenanceDao.lockTask(MaintenanceTask.LOCK_STALE_CHECKPOINT)) { + return; + } + try { + List frames = jobManager.getStaleCheckpoints(CHECKPOINT_MAX_WAIT_SEC); + logger.warn("found " + frames.size() + " frames that failed to checkpoint"); + for (FrameInterface frame : frames) { + jobManager.updateCheckpointState(frame, CheckpointState.DISABLED); + jobManager.updateFrameState(frame, FrameState.WAITING); + } + } catch (Exception e) { + logger.warn("failed to unlock stale checkpoint " + e); + } finally { + maintenanceDao.unlockTask(MaintenanceTask.LOCK_STALE_CHECKPOINT); + } } - } - public void clearStaleCheckpoints() { - logger.info("Checking for stale checkpoint frames."); - if (!maintenanceDao.lockTask(MaintenanceTask.LOCK_STALE_CHECKPOINT)) { - return; - } - try { - List frames = jobManager.getStaleCheckpoints(CHECKPOINT_MAX_WAIT_SEC); - logger.warn("found " + frames.size() + " frames that failed to checkpoint"); - for (FrameInterface frame : frames) { - jobManager.updateCheckpointState(frame, CheckpointState.DISABLED); - jobManager.updateFrameState(frame, FrameState.WAITING); - } - } catch (Exception e) { - logger.warn("failed to unlock stale checkpoint " + e); - } finally { - maintenanceDao.unlockTask(MaintenanceTask.LOCK_STALE_CHECKPOINT); + public void updateTaskValues() { + if (!maintenanceDao.lockTask(MaintenanceTask.LOCK_TASK_UPDATE, 700)) { + return; + } + try { + logger.info("running task updates"); + for (PointDetail pd : departmentManager.getManagedPointConfs()) { + departmentManager.updateManagedTasks(pd); + } + } catch (Exception e) { + logger.warn("failed to archive finished jobs: " + e); + } finally { + maintenanceDao.unlockTask(MaintenanceTask.LOCK_TASK_UPDATE); + } } - } - public void updateTaskValues() { - if (!maintenanceDao.lockTask(MaintenanceTask.LOCK_TASK_UPDATE, 700)) { - return; + public FrameDao getFrameDao() { + return frameDao; } - try { - logger.info("running task updates"); - for (PointDetail pd : departmentManager.getManagedPointConfs()) { - departmentManager.updateManagedTasks(pd); - } - } catch (Exception e) { - logger.warn("failed to archive finished jobs: " + e); - } finally { - maintenanceDao.unlockTask(MaintenanceTask.LOCK_TASK_UPDATE); - } - } - - public FrameDao getFrameDao() { - return frameDao; - } - public void setFrameDao(FrameDao frameDao) { - this.frameDao = frameDao; - } + public void setFrameDao(FrameDao frameDao) { + this.frameDao = frameDao; + } - public void setHostDao(HostDao hostDao) { - this.hostDao = hostDao; - } + public void setHostDao(HostDao hostDao) { + this.hostDao = hostDao; + } - public DispatchSupport getDispatchSupport() { - return dispatchSupport; - } + public DispatchSupport getDispatchSupport() { + return dispatchSupport; + } - public void setDispatchSupport(DispatchSupport dispatchSupport) { - this.dispatchSupport = dispatchSupport; - } + public void setDispatchSupport(DispatchSupport dispatchSupport) { + this.dispatchSupport = dispatchSupport; + } - public MaintenanceDao getMaintenanceDao() { - return maintenanceDao; - } + public MaintenanceDao getMaintenanceDao() { + return maintenanceDao; + } - public void setMaintenanceDao(MaintenanceDao maintenanceDao) { - this.maintenanceDao = maintenanceDao; - } + public void setMaintenanceDao(MaintenanceDao maintenanceDao) { + this.maintenanceDao = maintenanceDao; + } - public ProcDao getProcDao() { - return procDao; - } + public ProcDao getProcDao() { + return procDao; + } - public void setProcDao(ProcDao procDao) { - this.procDao = procDao; - } + public void setProcDao(ProcDao procDao) { + this.procDao = procDao; + } - public HistoricalSupport getHistoricalSupport() { - return historicalSupport; - } + public HistoricalSupport getHistoricalSupport() { + return historicalSupport; + } - public void setHistoricalSupport(HistoricalSupport historicalSupport) { - this.historicalSupport = historicalSupport; - } + public void setHistoricalSupport(HistoricalSupport historicalSupport) { + this.historicalSupport = historicalSupport; + } - public DepartmentManager getDepartmentManager() { - return departmentManager; - } + public DepartmentManager getDepartmentManager() { + return departmentManager; + } - public void setDepartmentManager(DepartmentManager departmentManager) { - this.departmentManager = departmentManager; - } + public void setDepartmentManager(DepartmentManager departmentManager) { + this.departmentManager = departmentManager; + } - public JobManager getJobManager() { - return jobManager; - } + public JobManager getJobManager() { + return jobManager; + } - public void setJobManager(JobManager jobManager) { - this.jobManager = jobManager; - } + public void setJobManager(JobManager jobManager) { + this.jobManager = jobManager; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManager.java index aa8ac3a3f..984ea54f3 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManager.java @@ -23,77 +23,77 @@ public interface OwnerManager { - /** - * Return true if the given users owns the particular host. - * - * @param owner - * @param host - * @return - */ - boolean isOwner(OwnerEntity owner, HostInterface host); + /** + * Return true if the given users owns the particular host. + * + * @param owner + * @param host + * @return + */ + boolean isOwner(OwnerEntity owner, HostInterface host); - /** - * Create a new owner. - * - * @param user - * @param email - */ - OwnerEntity createOwner(String user, ShowInterface show); + /** + * Create a new owner. + * + * @param user + * @param email + */ + OwnerEntity createOwner(String user, ShowInterface show); - /** - * Get an owner record by ID. - * - * @param id - */ - OwnerEntity getOwner(String id); + /** + * Get an owner record by ID. + * + * @param id + */ + OwnerEntity getOwner(String id); - /** - * Return an owner record by name. - * - * @param name - */ - OwnerEntity findOwner(String name); + /** + * Return an owner record by name. + * + * @param name + */ + OwnerEntity findOwner(String name); - /** - * Delete the specified owner and all his/her deeds. Return true if the owner was actually - * deleted. False if not. - */ - boolean deleteOwner(Entity owner); + /** + * Delete the specified owner and all his/her deeds. Return true if the owner was actually + * deleted. False if not. + */ + boolean deleteOwner(Entity owner); - /** - * Set the show of the given user. - * - * @param owner - * @param show - */ - void setShow(Entity owner, ShowInterface show); + /** + * Set the show of the given user. + * + * @param owner + * @param show + */ + void setShow(Entity owner, ShowInterface show); - /** - * Assigns the given host to the owner. - * - * @param owner - * @param host - */ - DeedEntity takeOwnership(OwnerEntity owner, HostInterface host); + /** + * Assigns the given host to the owner. + * + * @param owner + * @param host + */ + DeedEntity takeOwnership(OwnerEntity owner, HostInterface host); - /** - * - * @param id - * @return - */ - DeedEntity getDeed(String id); + /** + * + * @param id + * @return + */ + DeedEntity getDeed(String id); - /** - * Deletes a deed for the specified host. - * - * @param host - */ - void removeDeed(HostInterface host); + /** + * Deletes a deed for the specified host. + * + * @param host + */ + void removeDeed(HostInterface host); - /** - * Remove the given deed. - * - * @param deed - */ - void removeDeed(DeedEntity deed); + /** + * Remove the given deed. + * + * @param deed + */ + void removeDeed(DeedEntity deed); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManagerService.java index 9bc87a565..40acecd28 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManagerService.java @@ -30,88 +30,88 @@ @Transactional public class OwnerManagerService implements OwnerManager { - private OwnerDao ownerDao; - private DeedDao deedDao; - private HostDao hostDao; - - @Override - public OwnerEntity createOwner(String user, ShowInterface show) { - OwnerEntity owner = new OwnerEntity(user); - ownerDao.insertOwner(owner, show); - return owner; - } - - @Override - public boolean deleteOwner(Entity owner) { - return ownerDao.deleteOwner(owner); - } - - @Override - public OwnerEntity findOwner(String name) { - return ownerDao.findOwner(name); - } - - @Override - public OwnerEntity getOwner(String id) { - return ownerDao.getOwner(id); - } - - @Override - public void setShow(Entity owner, ShowInterface show) { - ownerDao.updateShow(owner, show); - } - - @Override - public DeedEntity getDeed(String id) { - return deedDao.getDeed(id); - } - - @Override - public DeedEntity takeOwnership(OwnerEntity owner, HostInterface host) { - if (!hostDao.isNimbyHost(host)) { - throw new SpcueRuntimeException("Cannot setup deeeds on non-NIMBY hosts."); - } - - deedDao.deleteDeed(host); - return deedDao.insertDeed(owner, host); - } - - @Override - public void removeDeed(HostInterface host) { - deedDao.deleteDeed(host); - } - - @Override - public void removeDeed(DeedEntity deed) { - deedDao.deleteDeed(deed); - } - - @Override - public boolean isOwner(OwnerEntity owner, HostInterface host) { - return ownerDao.isOwner(owner, host); - } - - public OwnerDao getOwnerDao() { - return ownerDao; - } - - public void setOwnerDao(OwnerDao ownerDao) { - this.ownerDao = ownerDao; - } - - public DeedDao getDeedDao() { - return deedDao; - } - - public void setDeedDao(DeedDao deedDao) { - this.deedDao = deedDao; - } - - public HostDao getHostDao() { - return hostDao; - } - - public void setHostDao(HostDao hostDao) { - this.hostDao = hostDao; - } + private OwnerDao ownerDao; + private DeedDao deedDao; + private HostDao hostDao; + + @Override + public OwnerEntity createOwner(String user, ShowInterface show) { + OwnerEntity owner = new OwnerEntity(user); + ownerDao.insertOwner(owner, show); + return owner; + } + + @Override + public boolean deleteOwner(Entity owner) { + return ownerDao.deleteOwner(owner); + } + + @Override + public OwnerEntity findOwner(String name) { + return ownerDao.findOwner(name); + } + + @Override + public OwnerEntity getOwner(String id) { + return ownerDao.getOwner(id); + } + + @Override + public void setShow(Entity owner, ShowInterface show) { + ownerDao.updateShow(owner, show); + } + + @Override + public DeedEntity getDeed(String id) { + return deedDao.getDeed(id); + } + + @Override + public DeedEntity takeOwnership(OwnerEntity owner, HostInterface host) { + if (!hostDao.isNimbyHost(host)) { + throw new SpcueRuntimeException("Cannot setup deeeds on non-NIMBY hosts."); + } + + deedDao.deleteDeed(host); + return deedDao.insertDeed(owner, host); + } + + @Override + public void removeDeed(HostInterface host) { + deedDao.deleteDeed(host); + } + + @Override + public void removeDeed(DeedEntity deed) { + deedDao.deleteDeed(deed); + } + + @Override + public boolean isOwner(OwnerEntity owner, HostInterface host) { + return ownerDao.isOwner(owner, host); + } + + public OwnerDao getOwnerDao() { + return ownerDao; + } + + public void setOwnerDao(OwnerDao ownerDao) { + this.ownerDao = ownerDao; + } + + public DeedDao getDeedDao() { + return deedDao; + } + + public void setDeedDao(DeedDao deedDao) { + this.deedDao = deedDao; + } + + public HostDao getHostDao() { + return hostDao; + } + + public void setHostDao(HostDao hostDao) { + this.hostDao = hostDao; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/RedirectService.java b/cuebot/src/main/java/com/imageworks/spcue/service/RedirectService.java index a58341bf0..a145332b3 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/RedirectService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/RedirectService.java @@ -34,95 +34,95 @@ @Transactional(isolation = Isolation.SERIALIZABLE, propagation = Propagation.REQUIRES_NEW) public class RedirectService { - private static final Logger logger = LogManager.getLogger(RedirectService.class); - - @Resource - private PlatformTransactionManager txManager; - - private RedirectDao redirectDao; - - public RedirectService(RedirectDao redirectDao) { - this.redirectDao = redirectDao; - } - - /** - * Check for redirect existence. - * - * @param key Redirect key - * - * @return True if redirect exists - */ - @Transactional(readOnly = true) - public boolean containsKey(String key) { - return redirectDao.containsKey(key); - } - - /** - * Count redirects in a group. - * - * @param groupId the group to query - * - * @return count of redirects in group - */ - @Transactional(readOnly = true) - public int countRedirectsWithGroup(String groupId) { - return redirectDao.countRedirectsWithGroup(groupId); - } - - /** - * Delete all redirects that are past expiration age. - * - * @return count of redirects deleted - */ - public int deleteExpired() { - return redirectDao.deleteExpired(); - } - - /** - * Add redirect. - * - * @param key Redirect key - * - * @param r Redirect to add - */ - @Transactional(propagation = Propagation.NOT_SUPPORTED) - public void put(String key, Redirect r) { - DefaultTransactionDefinition def = new DefaultTransactionDefinition(); - def.setPropagationBehavior(DefaultTransactionDefinition.PROPAGATION_REQUIRES_NEW); - def.setIsolationLevel(DefaultTransactionDefinition.ISOLATION_SERIALIZABLE); - - while (true) { - TransactionStatus status = txManager.getTransaction(def); - try { - redirectDao.put(key, r); - } catch (CannotSerializeTransactionException e) { - // MERGE statement race lost; try again. - txManager.rollback(status); - continue; - } catch (DuplicateKeyException e) { - if (e.getMessage() != null && e.getMessage().contains("C_REDIRECT_PK")) { - // MERGE statement race lost; try again. - txManager.rollback(status); - continue; + private static final Logger logger = LogManager.getLogger(RedirectService.class); + + @Resource + private PlatformTransactionManager txManager; + + private RedirectDao redirectDao; + + public RedirectService(RedirectDao redirectDao) { + this.redirectDao = redirectDao; + } + + /** + * Check for redirect existence. + * + * @param key Redirect key + * + * @return True if redirect exists + */ + @Transactional(readOnly = true) + public boolean containsKey(String key) { + return redirectDao.containsKey(key); + } + + /** + * Count redirects in a group. + * + * @param groupId the group to query + * + * @return count of redirects in group + */ + @Transactional(readOnly = true) + public int countRedirectsWithGroup(String groupId) { + return redirectDao.countRedirectsWithGroup(groupId); + } + + /** + * Delete all redirects that are past expiration age. + * + * @return count of redirects deleted + */ + public int deleteExpired() { + return redirectDao.deleteExpired(); + } + + /** + * Add redirect. + * + * @param key Redirect key + * + * @param r Redirect to add + */ + @Transactional(propagation = Propagation.NOT_SUPPORTED) + public void put(String key, Redirect r) { + DefaultTransactionDefinition def = new DefaultTransactionDefinition(); + def.setPropagationBehavior(DefaultTransactionDefinition.PROPAGATION_REQUIRES_NEW); + def.setIsolationLevel(DefaultTransactionDefinition.ISOLATION_SERIALIZABLE); + + while (true) { + TransactionStatus status = txManager.getTransaction(def); + try { + redirectDao.put(key, r); + } catch (CannotSerializeTransactionException e) { + // MERGE statement race lost; try again. + txManager.rollback(status); + continue; + } catch (DuplicateKeyException e) { + if (e.getMessage() != null && e.getMessage().contains("C_REDIRECT_PK")) { + // MERGE statement race lost; try again. + txManager.rollback(status); + continue; + } + throw e; + } catch (Exception e) { + txManager.rollback(status); + throw e; + } + txManager.commit(status); + break; } - throw e; - } catch (Exception e) { - txManager.rollback(status); - throw e; - } - txManager.commit(status); - break; } - } - - /** - * Remove a redirect for a specific key. - * - * @param key - * - * @return The redirect that was removed, or null - */ - public Redirect remove(String key) { - return redirectDao.remove(key); - } + + /** + * Remove a redirect for a specific key. + * + * @param key + * + * @return The redirect that was removed, or null + */ + public Redirect remove(String key) { + return redirectDao.remove(key); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/ServiceManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/ServiceManager.java index 41d3a86bf..940349e05 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/ServiceManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/ServiceManager.java @@ -20,24 +20,24 @@ public interface ServiceManager { - public ServiceEntity getService(String id); + public ServiceEntity getService(String id); - public ServiceEntity getDefaultService(); + public ServiceEntity getDefaultService(); - public void createService(ServiceEntity s); + public void createService(ServiceEntity s); - void createService(ServiceOverrideEntity s); + void createService(ServiceOverrideEntity s); - void updateService(ServiceOverrideEntity s); + void updateService(ServiceOverrideEntity s); - void updateService(ServiceEntity s); + void updateService(ServiceEntity s); - void deleteService(ServiceOverrideEntity s); + void deleteService(ServiceOverrideEntity s); - void deleteService(ServiceEntity s); + void deleteService(ServiceEntity s); - ServiceOverrideEntity getServiceOverride(String id); + ServiceOverrideEntity getServiceOverride(String id); - ServiceEntity getService(String id, String show); + ServiceEntity getService(String id, String show); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/ServiceManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/ServiceManagerService.java index 6ab8de774..29f8f0b6e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/ServiceManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/ServiceManagerService.java @@ -29,73 +29,73 @@ @Transactional public class ServiceManagerService implements ServiceManager { - private ServiceDao serviceDao; - - private static final String DEFAULT_SERVICE = "default"; - - @Override - public void createService(ServiceEntity s) { - serviceDao.insert(s); - } - - @Override - public void createService(ServiceOverrideEntity s) { - serviceDao.insert(s); - } - - @Override - public void deleteService(ServiceEntity s) { - serviceDao.delete(s); - } - - @Override - public void deleteService(ServiceOverrideEntity s) { - serviceDao.delete(s); - } - - @Override - public void updateService(ServiceEntity s) { - serviceDao.update(s); - } - - @Override - public void updateService(ServiceOverrideEntity s) { - serviceDao.update(s); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public ServiceEntity getService(String id, String show) { - try { - return serviceDao.getOverride(id, show); - } catch (EmptyResultDataAccessException e) { - return serviceDao.get(id); + private ServiceDao serviceDao; + + private static final String DEFAULT_SERVICE = "default"; + + @Override + public void createService(ServiceEntity s) { + serviceDao.insert(s); + } + + @Override + public void createService(ServiceOverrideEntity s) { + serviceDao.insert(s); + } + + @Override + public void deleteService(ServiceEntity s) { + serviceDao.delete(s); + } + + @Override + public void deleteService(ServiceOverrideEntity s) { + serviceDao.delete(s); + } + + @Override + public void updateService(ServiceEntity s) { + serviceDao.update(s); + } + + @Override + public void updateService(ServiceOverrideEntity s) { + serviceDao.update(s); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public ServiceEntity getService(String id, String show) { + try { + return serviceDao.getOverride(id, show); + } catch (EmptyResultDataAccessException e) { + return serviceDao.get(id); + } + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public ServiceOverrideEntity getServiceOverride(String id) { + return serviceDao.getOverride(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public ServiceEntity getService(String id) { + return serviceDao.get(id); + } + + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly = true) + public ServiceEntity getDefaultService() { + return serviceDao.get(DEFAULT_SERVICE); + } + + public ServiceDao getServiceDao() { + return serviceDao; + } + + public void setServiceDao(ServiceDao serviceDao) { + this.serviceDao = serviceDao; } - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public ServiceOverrideEntity getServiceOverride(String id) { - return serviceDao.getOverride(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public ServiceEntity getService(String id) { - return serviceDao.get(id); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly = true) - public ServiceEntity getDefaultService() { - return serviceDao.get(DEFAULT_SERVICE); - } - - public ServiceDao getServiceDao() { - return serviceDao; - } - - public void setServiceDao(ServiceDao serviceDao) { - this.serviceDao = serviceDao; - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/Whiteboard.java b/cuebot/src/main/java/com/imageworks/spcue/service/Whiteboard.java index 014b84cc5..85031cc46 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/Whiteboard.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/Whiteboard.java @@ -24,12 +24,12 @@ */ public interface Whiteboard extends WhiteboardDao, NestedWhiteboardDao { - /** - * Returns true if the job is pending. - * - * @param name - * @return - */ - boolean isJobPending(String name); + /** + * Returns true if the job is pending. + * + * @param name + * @return + */ + boolean isJobPending(String name); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/WhiteboardService.java b/cuebot/src/main/java/com/imageworks/spcue/service/WhiteboardService.java index 3e7a3ea03..a5e20e017 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/WhiteboardService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/WhiteboardService.java @@ -105,411 +105,411 @@ @Transactional(readOnly = true, propagation = Propagation.REQUIRED) public class WhiteboardService implements Whiteboard { - @SuppressWarnings("unused") - private static final Logger logger = LogManager.getLogger(WhiteboardService.class); + @SuppressWarnings("unused") + private static final Logger logger = LogManager.getLogger(WhiteboardService.class); - private WhiteboardDao whiteboardDao; + private WhiteboardDao whiteboardDao; - private NestedWhiteboardDao nestedWhiteboardDao; + private NestedWhiteboardDao nestedWhiteboardDao; - private JobDao jobDao; + private JobDao jobDao; - public JobDao getJobDao() { - return jobDao; - } + public JobDao getJobDao() { + return jobDao; + } - public void setJobDao(JobDao jobDao) { - this.jobDao = jobDao; - } + public void setJobDao(JobDao jobDao) { + this.jobDao = jobDao; + } - public boolean isJobPending(String name) { - return jobDao.exists(name); - } + public boolean isJobPending(String name) { + return jobDao.exists(name); + } - public FilterSeq getFilters(ShowInterface show) { - return whiteboardDao.getFilters(show); - } + public FilterSeq getFilters(ShowInterface show) { + return whiteboardDao.getFilters(show); + } - public LayerSeq getLayers(JobInterface job) { - return whiteboardDao.getLayers(job); - } + public LayerSeq getLayers(JobInterface job) { + return whiteboardDao.getLayers(job); + } - public List getJobNames(JobSearchInterface r) { - return whiteboardDao.getJobNames(r); - } + public List getJobNames(JobSearchInterface r) { + return whiteboardDao.getJobNames(r); + } - public Job findJob(String name) { - return whiteboardDao.findJob(name); - } + public Job findJob(String name) { + return whiteboardDao.findJob(name); + } - public Job getJob(String id) { - return whiteboardDao.getJob(id); - } + public Job getJob(String id) { + return whiteboardDao.getJob(id); + } - public FrameSeq getFrames(FrameSearchInterface r) { - return this.whiteboardDao.getFrames(r); - } + public FrameSeq getFrames(FrameSearchInterface r) { + return this.whiteboardDao.getFrames(r); + } - public NestedHostSeq getHostWhiteboard() { - return nestedWhiteboardDao.getHostWhiteboard(); - } + public NestedHostSeq getHostWhiteboard() { + return nestedWhiteboardDao.getHostWhiteboard(); + } - public Show findShow(String name) { - return whiteboardDao.findShow(name); - } + public Show findShow(String name) { + return whiteboardDao.findShow(name); + } - public Show getShow(String id) { - return whiteboardDao.getShow(id); - } + public Show getShow(String id) { + return whiteboardDao.getShow(id); + } - public ShowSeq getShows() { - return whiteboardDao.getShows(); - } + public ShowSeq getShows() { + return whiteboardDao.getShows(); + } - public Subscription getSubscription(String id) { - return this.whiteboardDao.getSubscription(id); - } + public Subscription getSubscription(String id) { + return this.whiteboardDao.getSubscription(id); + } - public SubscriptionSeq getSubscriptions(ShowInterface show) { - return this.whiteboardDao.getSubscriptions(show); - } + public SubscriptionSeq getSubscriptions(ShowInterface show) { + return this.whiteboardDao.getSubscriptions(show); + } - public Allocation findAllocation(String name) { - return this.whiteboardDao.findAllocation(name); - } + public Allocation findAllocation(String name) { + return this.whiteboardDao.findAllocation(name); + } - public Allocation getAllocation(String id) { - return this.whiteboardDao.getAllocation(id); - } + public Allocation getAllocation(String id) { + return this.whiteboardDao.getAllocation(id); + } - public AllocationSeq getAllocations() { - return this.whiteboardDao.getAllocations(); - } + public AllocationSeq getAllocations() { + return this.whiteboardDao.getAllocations(); + } - public GroupSeq getGroups(ShowInterface show) { - return this.whiteboardDao.getGroups(show); - } + public GroupSeq getGroups(ShowInterface show) { + return this.whiteboardDao.getGroups(show); + } - public GroupSeq getGroups(GroupInterface group) { - return this.whiteboardDao.getGroups(group); - } + public GroupSeq getGroups(GroupInterface group) { + return this.whiteboardDao.getGroups(group); + } - public Group getGroup(String id) { - return this.whiteboardDao.getGroup(id); - } + public Group getGroup(String id) { + return this.whiteboardDao.getGroup(id); + } - public WhiteboardDao getWhiteboardDao() { - return whiteboardDao; - } + public WhiteboardDao getWhiteboardDao() { + return whiteboardDao; + } - public void setWhiteboardDao(WhiteboardDao whiteboardDao) { - this.whiteboardDao = whiteboardDao; - } + public void setWhiteboardDao(WhiteboardDao whiteboardDao) { + this.whiteboardDao = whiteboardDao; + } - public Action getAction(ActionInterface action) { - return whiteboardDao.getAction(action); - } + public Action getAction(ActionInterface action) { + return whiteboardDao.getAction(action); + } - public ActionSeq getActions(FilterInterface filter) { - return whiteboardDao.getActions(filter); - } + public ActionSeq getActions(FilterInterface filter) { + return whiteboardDao.getActions(filter); + } - public Matcher getMatcher(MatcherInterface matcher) { - return whiteboardDao.getMatcher(matcher); - } + public Matcher getMatcher(MatcherInterface matcher) { + return whiteboardDao.getMatcher(matcher); + } - public MatcherSeq getMatchers(FilterInterface filter) { - return whiteboardDao.getMatchers(filter); - } + public MatcherSeq getMatchers(FilterInterface filter) { + return whiteboardDao.getMatchers(filter); + } - public Filter getFilter(FilterInterface filter) { - return whiteboardDao.getFilter(filter); - } + public Filter getFilter(FilterInterface filter) { + return whiteboardDao.getFilter(filter); + } - public Filter findFilter(ShowInterface show, String name) { - return whiteboardDao.findFilter(show, name); - } + public Filter findFilter(ShowInterface show, String name) { + return whiteboardDao.findFilter(show, name); + } - public Group getRootGroup(ShowInterface show) { - return whiteboardDao.getRootGroup(show); - } + public Group getRootGroup(ShowInterface show) { + return whiteboardDao.getRootGroup(show); + } - public NestedGroup getJobWhiteboard(ShowInterface show) { - return nestedWhiteboardDao.getJobWhiteboard(show); - } + public NestedGroup getJobWhiteboard(ShowInterface show) { + return nestedWhiteboardDao.getJobWhiteboard(show); + } - public JobSeq getJobs(GroupInterface group) { - return whiteboardDao.getJobs(group); - } + public JobSeq getJobs(GroupInterface group) { + return whiteboardDao.getJobs(group); + } - public NestedWhiteboardDao getNestedWhiteboardDao() { - return nestedWhiteboardDao; - } + public NestedWhiteboardDao getNestedWhiteboardDao() { + return nestedWhiteboardDao; + } - public void setNestedWhiteboardDao(NestedWhiteboardDao nestedWhiteboardDao) { - this.nestedWhiteboardDao = nestedWhiteboardDao; - } + public void setNestedWhiteboardDao(NestedWhiteboardDao nestedWhiteboardDao) { + this.nestedWhiteboardDao = nestedWhiteboardDao; + } - public Depend getDepend(DependInterface depend) { - return whiteboardDao.getDepend(depend); - } + public Depend getDepend(DependInterface depend) { + return whiteboardDao.getDepend(depend); + } - public DependSeq getWhatDependsOnThis(JobInterface job) { - - return whiteboardDao.getWhatDependsOnThis(job); - } - - public DependSeq getWhatDependsOnThis(LayerInterface layer) { - return whiteboardDao.getWhatDependsOnThis(layer); - } - - public DependSeq getWhatDependsOnThis(FrameInterface frame) { - return whiteboardDao.getWhatDependsOnThis(frame); - } - - public DependSeq getWhatThisDependsOn(JobInterface job) { - return whiteboardDao.getWhatThisDependsOn(job); - } - - public DependSeq getWhatThisDependsOn(LayerInterface layer) { - return whiteboardDao.getWhatThisDependsOn(layer); - } - - public DependSeq getWhatThisDependsOn(FrameInterface frame) { - return whiteboardDao.getWhatThisDependsOn(frame); - } - - public DependSeq getDepends(JobInterface job) { - return whiteboardDao.getDepends(job); - } - - public Frame findFrame(String job, String layer, int frame) { - return whiteboardDao.findFrame(job, layer, frame); - } - - public Layer findLayer(String job, String layer) { - return whiteboardDao.findLayer(job, layer); - } - - public Host findHost(String name) { - return whiteboardDao.findHost(name); - } - - public Depend getDepend(String id) { - return whiteboardDao.getDepend(id); - } - - public Group findGroup(String show, String group) { - return whiteboardDao.findGroup(show, group); - } - - public Filter findFilter(String show, String name) { - return whiteboardDao.findFilter(show, name); - } - - public UpdatedFrameCheckResult getUpdatedFrames(JobInterface job, List layers, - int epochTime) { - return whiteboardDao.getUpdatedFrames(job, layers, epochTime); - } - - public CommentSeq getComments(JobInterface j) { - return whiteboardDao.getComments(j); - } - - public CommentSeq getComments(HostInterface h) { - return whiteboardDao.getComments(h); - } - - public SubscriptionSeq getSubscriptions(AllocationInterface alloc) { - return whiteboardDao.getSubscriptions(alloc); - } - - public Subscription findSubscription(String show, String alloc) { - return whiteboardDao.findSubscription(show, alloc); - } - - @Override - public Task getTask(ShowInterface show, DepartmentInterface dept, String shot) { - return whiteboardDao.getTask(show, dept, shot); - } - - @Override - public TaskSeq getTasks(ShowInterface show, DepartmentInterface dept) { - return whiteboardDao.getTasks(show, dept); - } - - @Override - public List getDepartmentNames() { - return whiteboardDao.getDepartmentNames(); - } - - @Override - public Department getDepartment(ShowInterface show, String name) { - return whiteboardDao.getDepartment(show, name); - } - - @Override - public DepartmentSeq getDepartments(ShowInterface show) { - return whiteboardDao.getDepartments(show); - } - - @Override - public JobSeq getJobs(JobSearchInterface r) { - return whiteboardDao.getJobs(r); - } - - @Override - public Frame getFrame(String id) { - return whiteboardDao.getFrame(id); - } - - @Override - public Host getHost(String id) { - return whiteboardDao.getHost(id); - } - - @Override - public Layer getLayer(String id) { - return whiteboardDao.getLayer(id); - } - - @Override - public HostSeq getHosts(HostSearchInterface r) { - return whiteboardDao.getHosts(r); - } - - @Override - public ProcSeq getProcs(HostInterface h) { - return whiteboardDao.getProcs(h); - } - - @Override - public ProcSeq getProcs(ProcSearchInterface p) { - return whiteboardDao.getProcs(p); - } - - @Override - public Depend getDepend(AbstractDepend depend) { - return whiteboardDao.getDepend(depend); - } - - @Override - public Host getHost(DeedEntity deed) { - return whiteboardDao.getHost(deed); - } - - @Override - public Owner getOwner(DeedEntity deed) { - return whiteboardDao.getOwner(deed); - } - - @Override - public DeedSeq getDeeds(OwnerEntity owner) { - return whiteboardDao.getDeeds(owner); - } - - @Override - public DeedSeq getDeeds(ShowInterface show) { - return whiteboardDao.getDeeds(show); - } - - @Override - public HostSeq getHosts(OwnerEntity owner) { - return whiteboardDao.getHosts(owner); - } - - @Override - public Owner getOwner(HostInterface host) { - return whiteboardDao.getOwner(host); - } - - @Override - public List getOwners(ShowInterface show) { - return whiteboardDao.getOwners(show); - } - - @Override - public Owner getOwner(String name) { - return whiteboardDao.getOwner(name); - } - - @Override - public Deed getDeed(HostInterface host) { - return whiteboardDao.getDeed(host); - } - - @Override - public RenderPartition getRenderPartition(LocalHostAssignment l) { - return whiteboardDao.getRenderPartition(l); - } - - @Override - public RenderPartitionSeq getRenderPartitions(HostInterface host) { - return whiteboardDao.getRenderPartitions(host); - } - - @Override - public FacilitySeq getFacilities() { - return whiteboardDao.getFacilities(); - } - - @Override - public Facility getFacility(String name) { - return whiteboardDao.getFacility(name); - } - - @Override - public AllocationSeq getAllocations(com.imageworks.spcue.FacilityInterface facility) { - return whiteboardDao.getAllocations(facility); - } - - @Override - public ShowSeq getActiveShows() { - return whiteboardDao.getActiveShows(); - } - - @Override - public Service getService(String id) { - return whiteboardDao.getService(id); - } - - @Override - public ServiceSeq getDefaultServices() { - return whiteboardDao.getDefaultServices(); - } - - @Override - public Service findService(String name) { - return whiteboardDao.findService(name); - } - - @Override - public ServiceOverrideSeq getServiceOverrides(ShowInterface show) { - return whiteboardDao.getServiceOverrides(show); - } - - @Override - public ServiceOverride getServiceOverride(ShowInterface show, String name) { - return whiteboardDao.getServiceOverride(show, name); - } - - @Override - public Limit findLimit(String name) { - return whiteboardDao.findLimit(name); - } - - @Override - public Limit getLimit(String id) { - return whiteboardDao.getLimit(id); - } - - @Override - public List getLimits() { - return whiteboardDao.getLimits(); - } - - @Override - public List getLimits(LayerInterface layer) { - return whiteboardDao.getLimits(layer); - } + public DependSeq getWhatDependsOnThis(JobInterface job) { + + return whiteboardDao.getWhatDependsOnThis(job); + } + + public DependSeq getWhatDependsOnThis(LayerInterface layer) { + return whiteboardDao.getWhatDependsOnThis(layer); + } + + public DependSeq getWhatDependsOnThis(FrameInterface frame) { + return whiteboardDao.getWhatDependsOnThis(frame); + } + + public DependSeq getWhatThisDependsOn(JobInterface job) { + return whiteboardDao.getWhatThisDependsOn(job); + } + + public DependSeq getWhatThisDependsOn(LayerInterface layer) { + return whiteboardDao.getWhatThisDependsOn(layer); + } + + public DependSeq getWhatThisDependsOn(FrameInterface frame) { + return whiteboardDao.getWhatThisDependsOn(frame); + } + + public DependSeq getDepends(JobInterface job) { + return whiteboardDao.getDepends(job); + } + + public Frame findFrame(String job, String layer, int frame) { + return whiteboardDao.findFrame(job, layer, frame); + } + + public Layer findLayer(String job, String layer) { + return whiteboardDao.findLayer(job, layer); + } + + public Host findHost(String name) { + return whiteboardDao.findHost(name); + } + + public Depend getDepend(String id) { + return whiteboardDao.getDepend(id); + } + + public Group findGroup(String show, String group) { + return whiteboardDao.findGroup(show, group); + } + + public Filter findFilter(String show, String name) { + return whiteboardDao.findFilter(show, name); + } + + public UpdatedFrameCheckResult getUpdatedFrames(JobInterface job, List layers, + int epochTime) { + return whiteboardDao.getUpdatedFrames(job, layers, epochTime); + } + + public CommentSeq getComments(JobInterface j) { + return whiteboardDao.getComments(j); + } + + public CommentSeq getComments(HostInterface h) { + return whiteboardDao.getComments(h); + } + + public SubscriptionSeq getSubscriptions(AllocationInterface alloc) { + return whiteboardDao.getSubscriptions(alloc); + } + + public Subscription findSubscription(String show, String alloc) { + return whiteboardDao.findSubscription(show, alloc); + } + + @Override + public Task getTask(ShowInterface show, DepartmentInterface dept, String shot) { + return whiteboardDao.getTask(show, dept, shot); + } + + @Override + public TaskSeq getTasks(ShowInterface show, DepartmentInterface dept) { + return whiteboardDao.getTasks(show, dept); + } + + @Override + public List getDepartmentNames() { + return whiteboardDao.getDepartmentNames(); + } + + @Override + public Department getDepartment(ShowInterface show, String name) { + return whiteboardDao.getDepartment(show, name); + } + + @Override + public DepartmentSeq getDepartments(ShowInterface show) { + return whiteboardDao.getDepartments(show); + } + + @Override + public JobSeq getJobs(JobSearchInterface r) { + return whiteboardDao.getJobs(r); + } + + @Override + public Frame getFrame(String id) { + return whiteboardDao.getFrame(id); + } + + @Override + public Host getHost(String id) { + return whiteboardDao.getHost(id); + } + + @Override + public Layer getLayer(String id) { + return whiteboardDao.getLayer(id); + } + + @Override + public HostSeq getHosts(HostSearchInterface r) { + return whiteboardDao.getHosts(r); + } + + @Override + public ProcSeq getProcs(HostInterface h) { + return whiteboardDao.getProcs(h); + } + + @Override + public ProcSeq getProcs(ProcSearchInterface p) { + return whiteboardDao.getProcs(p); + } + + @Override + public Depend getDepend(AbstractDepend depend) { + return whiteboardDao.getDepend(depend); + } + + @Override + public Host getHost(DeedEntity deed) { + return whiteboardDao.getHost(deed); + } + + @Override + public Owner getOwner(DeedEntity deed) { + return whiteboardDao.getOwner(deed); + } + + @Override + public DeedSeq getDeeds(OwnerEntity owner) { + return whiteboardDao.getDeeds(owner); + } + + @Override + public DeedSeq getDeeds(ShowInterface show) { + return whiteboardDao.getDeeds(show); + } + + @Override + public HostSeq getHosts(OwnerEntity owner) { + return whiteboardDao.getHosts(owner); + } + + @Override + public Owner getOwner(HostInterface host) { + return whiteboardDao.getOwner(host); + } + + @Override + public List getOwners(ShowInterface show) { + return whiteboardDao.getOwners(show); + } + + @Override + public Owner getOwner(String name) { + return whiteboardDao.getOwner(name); + } + + @Override + public Deed getDeed(HostInterface host) { + return whiteboardDao.getDeed(host); + } + + @Override + public RenderPartition getRenderPartition(LocalHostAssignment l) { + return whiteboardDao.getRenderPartition(l); + } + + @Override + public RenderPartitionSeq getRenderPartitions(HostInterface host) { + return whiteboardDao.getRenderPartitions(host); + } + + @Override + public FacilitySeq getFacilities() { + return whiteboardDao.getFacilities(); + } + + @Override + public Facility getFacility(String name) { + return whiteboardDao.getFacility(name); + } + + @Override + public AllocationSeq getAllocations(com.imageworks.spcue.FacilityInterface facility) { + return whiteboardDao.getAllocations(facility); + } + + @Override + public ShowSeq getActiveShows() { + return whiteboardDao.getActiveShows(); + } + + @Override + public Service getService(String id) { + return whiteboardDao.getService(id); + } + + @Override + public ServiceSeq getDefaultServices() { + return whiteboardDao.getDefaultServices(); + } + + @Override + public Service findService(String name) { + return whiteboardDao.findService(name); + } + + @Override + public ServiceOverrideSeq getServiceOverrides(ShowInterface show) { + return whiteboardDao.getServiceOverrides(show); + } + + @Override + public ServiceOverride getServiceOverride(ShowInterface show, String name) { + return whiteboardDao.getServiceOverride(show, name); + } + + @Override + public Limit findLimit(String name) { + return whiteboardDao.findLimit(name); + } + + @Override + public Limit getLimit(String id) { + return whiteboardDao.getLimit(id); + } + + @Override + public List getLimits() { + return whiteboardDao.getLimits(); + } + + @Override + public List getLimits(LayerInterface layer) { + return whiteboardDao.getLimits(layer); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servlet/HealthCheckServlet.java b/cuebot/src/main/java/com/imageworks/spcue/servlet/HealthCheckServlet.java index 0a3e4a245..d552f84a6 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servlet/HealthCheckServlet.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servlet/HealthCheckServlet.java @@ -39,101 +39,101 @@ @SuppressWarnings("serial") public class HealthCheckServlet extends FrameworkServlet { - private static final Logger logger = LogManager.getLogger(HealthCheckServlet.class); - private CueStatic cueStatic; - private Environment env; + private static final Logger logger = LogManager.getLogger(HealthCheckServlet.class); + private CueStatic cueStatic; + private Environment env; - private enum HealthStatus { - SERVER_ERROR, DISPATCH_QUEUE_UNHEALTHY, MANAGE_QUEUE_UNHEALTHY, REPORT_QUEUE_UNHEALTHY, BOOKING_QUEUE_UNHEALTHY, JOB_QUERY_ERROR - } + private enum HealthStatus { + SERVER_ERROR, DISPATCH_QUEUE_UNHEALTHY, MANAGE_QUEUE_UNHEALTHY, REPORT_QUEUE_UNHEALTHY, BOOKING_QUEUE_UNHEALTHY, JOB_QUERY_ERROR + } - @Override - public void initFrameworkServlet() throws ServletException { - this.cueStatic = (CueStatic) Objects.requireNonNull(this.getWebApplicationContext()) - .getBean("cueStaticServant"); - this.env = (Environment) Objects.requireNonNull(this.getWebApplicationContext()) - .getBean("environment"); - } + @Override + public void initFrameworkServlet() throws ServletException { + this.cueStatic = (CueStatic) Objects.requireNonNull(this.getWebApplicationContext()) + .getBean("cueStaticServant"); + this.env = (Environment) Objects.requireNonNull(this.getWebApplicationContext()) + .getBean("environment"); + } - private ArrayList getHealthStatus() { - ArrayList statusList = new ArrayList(); + private ArrayList getHealthStatus() { + ArrayList statusList = new ArrayList(); - if (this.cueStatic == null) { - statusList.add(HealthStatus.SERVER_ERROR); - } else { - // Check queue capacity - if (!this.cueStatic.isDispatchQueueHealthy()) { - statusList.add(HealthStatus.DISPATCH_QUEUE_UNHEALTHY); - } - if (!this.cueStatic.isManageQueueHealthy()) { - statusList.add(HealthStatus.MANAGE_QUEUE_UNHEALTHY); - } - if (!this.cueStatic.isReportQueueHealthy()) { - statusList.add(HealthStatus.REPORT_QUEUE_UNHEALTHY); - } - if (!this.cueStatic.isBookingQueueHealthy()) { - statusList.add(HealthStatus.BOOKING_QUEUE_UNHEALTHY); - } - // Run get jobs, if it crashes, set error, if it takes longer than expected, - // the caller (HEALTHCHECK) will timeout - try { - getJobs(); - } catch (RuntimeException re) { - Sentry.captureException(re); - statusList.add(HealthStatus.JOB_QUERY_ERROR); - } + if (this.cueStatic == null) { + statusList.add(HealthStatus.SERVER_ERROR); + } else { + // Check queue capacity + if (!this.cueStatic.isDispatchQueueHealthy()) { + statusList.add(HealthStatus.DISPATCH_QUEUE_UNHEALTHY); + } + if (!this.cueStatic.isManageQueueHealthy()) { + statusList.add(HealthStatus.MANAGE_QUEUE_UNHEALTHY); + } + if (!this.cueStatic.isReportQueueHealthy()) { + statusList.add(HealthStatus.REPORT_QUEUE_UNHEALTHY); + } + if (!this.cueStatic.isBookingQueueHealthy()) { + statusList.add(HealthStatus.BOOKING_QUEUE_UNHEALTHY); + } + // Run get jobs, if it crashes, set error, if it takes longer than expected, + // the caller (HEALTHCHECK) will timeout + try { + getJobs(); + } catch (RuntimeException re) { + Sentry.captureException(re); + statusList.add(HealthStatus.JOB_QUERY_ERROR); + } + } + return statusList; } - return statusList; - } - private void getJobs() { - if (this.cueStatic != null && this.env != null) { - // Defaults to testing show, which is added as part of the seeding data script - String defaultShow = - env.getProperty("protected_shows", String.class, "testing").split(",")[0]; - ShowEntity s = new ShowEntity(); - s.name = defaultShow; - JobSearchInterface js = new JobSearch(); - js.filterByShow(s); + private void getJobs() { + if (this.cueStatic != null && this.env != null) { + // Defaults to testing show, which is added as part of the seeding data script + String defaultShow = + env.getProperty("protected_shows", String.class, "testing").split(",")[0]; + ShowEntity s = new ShowEntity(); + s.name = defaultShow; + JobSearchInterface js = new JobSearch(); + js.filterByShow(s); - // GetJobs will throw an exception if there's a problem getting - // data from the database - JobSeq jobs = this.cueStatic.getWhiteboard().getJobs(js); + // GetJobs will throw an exception if there's a problem getting + // data from the database + JobSeq jobs = this.cueStatic.getWhiteboard().getJobs(js); + } } - } - @Override - protected void doService(HttpServletRequest request, HttpServletResponse response) - throws Exception { - logger.info("HealthCheckServlet: Received request"); - try { - ArrayList statusList = getHealthStatus(); - if (!statusList.isEmpty()) { - response.setStatus(500); - StringBuilder out = new StringBuilder("FAILED: "); - for (HealthStatus status : statusList) { - out.append(status.name()); - out.append(" "); - } - Sentry.captureMessage("Healthcheck failure: " + out); + @Override + protected void doService(HttpServletRequest request, HttpServletResponse response) + throws Exception { + logger.info("HealthCheckServlet: Received request"); + try { + ArrayList statusList = getHealthStatus(); + if (!statusList.isEmpty()) { + response.setStatus(500); + StringBuilder out = new StringBuilder("FAILED: "); + for (HealthStatus status : statusList) { + out.append(status.name()); + out.append(" "); + } + Sentry.captureMessage("Healthcheck failure: " + out); - sendResponse(response, out.toString()); - } else { - sendResponse(response, "SUCCESS"); - } - } catch (Exception e) { - logger.error("Unexpected error", e); - response.setStatus(500); - sendResponse(response, "FAILED " + e.getMessage()); + sendResponse(response, out.toString()); + } else { + sendResponse(response, "SUCCESS"); + } + } catch (Exception e) { + logger.error("Unexpected error", e); + response.setStatus(500); + sendResponse(response, "FAILED " + e.getMessage()); + } } - } - private void sendResponse(HttpServletResponse response, String message) { - response.setContentLength(message.length()); - try { - response.getOutputStream().println(message); - } catch (IOException e) { - // failed to send response, just eat it. + private void sendResponse(HttpServletResponse response, String message) { + response.setContentLength(message.length()); + try { + response.getOutputStream().println(message); + } catch (IOException e) { + // failed to send response, just eat it. + } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servlet/JobLaunchServlet.java b/cuebot/src/main/java/com/imageworks/spcue/servlet/JobLaunchServlet.java index 5df479fd7..eb61c0e10 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servlet/JobLaunchServlet.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servlet/JobLaunchServlet.java @@ -35,42 +35,42 @@ @SuppressWarnings("serial") public class JobLaunchServlet extends FrameworkServlet { - private static final Logger logger = LogManager.getLogger(JobLaunchServlet.class); + private static final Logger logger = LogManager.getLogger(JobLaunchServlet.class); - private JobLauncher jobLauncher; + private JobLauncher jobLauncher; - @Override - public void initFrameworkServlet() throws ServletException { - jobLauncher = (JobLauncher) Objects.requireNonNull(this.getWebApplicationContext()) - .getBean("jobLauncher"); - } + @Override + public void initFrameworkServlet() throws ServletException { + jobLauncher = (JobLauncher) Objects.requireNonNull(this.getWebApplicationContext()) + .getBean("jobLauncher"); + } - @Override - protected void doService(HttpServletRequest request, HttpServletResponse response) - throws Exception { + @Override + protected void doService(HttpServletRequest request, HttpServletResponse response) + throws Exception { - try { - JobSpec spec = jobLauncher.parse(request.getParameter("payload")); - jobLauncher.queueAndLaunch(spec); + try { + JobSpec spec = jobLauncher.parse(request.getParameter("payload")); + jobLauncher.queueAndLaunch(spec); - StringBuilder sb = new StringBuilder(4096); - for (BuildableJob job : spec.getJobs()) { - sb.append(job.detail.name); - sb.append(","); - } - sendResponse(response, "SUCCESS " + sb.toString()); - } catch (Exception e) { - logger.debug("Misc error", e); - sendResponse(response, "FAILED " + e.getMessage()); + StringBuilder sb = new StringBuilder(4096); + for (BuildableJob job : spec.getJobs()) { + sb.append(job.detail.name); + sb.append(","); + } + sendResponse(response, "SUCCESS " + sb.toString()); + } catch (Exception e) { + logger.debug("Misc error", e); + sendResponse(response, "FAILED " + e.getMessage()); + } } - } - private void sendResponse(HttpServletResponse response, String message) { - response.setContentLength(message.length()); - try { - response.getOutputStream().println(message); - } catch (IOException e) { - // failed to send response, just eat it. + private void sendResponse(HttpServletResponse response, String message) { + response.setContentLength(message.length()); + try { + response.getOutputStream().println(message); + } catch (IOException e) { + // failed to send response, just eat it. + } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/Convert.java b/cuebot/src/main/java/com/imageworks/spcue/util/Convert.java index 89a5c72c8..fcee21631 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/Convert.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/Convert.java @@ -25,45 +25,45 @@ */ public final class Convert { - public static final int coresToCoreUnits(float cores) { - return new BigDecimal(cores * 100).setScale(2, RoundingMode.HALF_UP).intValue(); - } + public static final int coresToCoreUnits(float cores) { + return new BigDecimal(cores * 100).setScale(2, RoundingMode.HALF_UP).intValue(); + } - public static final int coresToCoreUnits(int cores) { - return cores * 100; - } + public static final int coresToCoreUnits(int cores) { + return cores * 100; + } - public static final int coresToWholeCoreUnits(float cores) { - if (cores == -1) { - return -1; + public static final int coresToWholeCoreUnits(float cores) { + if (cores == -1) { + return -1; + } + return (int) (((cores * 100.0f) + 0.5f) / 100) * 100; } - return (int) (((cores * 100.0f) + 0.5f) / 100) * 100; - } - public static final float coreUnitsToCores(int coreUnits) { - if (coreUnits == -1) { - return -1f; + public static final float coreUnitsToCores(int coreUnits) { + if (coreUnits == -1) { + return -1f; + } + return Float.valueOf(String.format(Locale.ROOT, "%6.2f", coreUnits / 100.0f)); } - return Float.valueOf(String.format(Locale.ROOT, "%6.2f", coreUnits / 100.0f)); - } - public static final float coreUnitsToWholeCores(int coreUnits) { - if (coreUnits == -1) { - return -1f; + public static final float coreUnitsToWholeCores(int coreUnits) { + if (coreUnits == -1) { + return -1f; + } + return Float.valueOf((int) ((coreUnits / 100.0f) + 0.5)); } - return Float.valueOf((int) ((coreUnits / 100.0f) + 0.5)); - } - private static final List MATCH_BOOL = - java.util.Arrays.asList(new String[] {"true", "yes", "1", "on"}); + private static final List MATCH_BOOL = + java.util.Arrays.asList(new String[] {"true", "yes", "1", "on"}); - public static final boolean stringToBool(String value) { - if (value == null) { - return false; - } - if (MATCH_BOOL.contains(value.toLowerCase())) { - return true; + public static final boolean stringToBool(String value) { + if (value == null) { + return false; + } + if (MATCH_BOOL.contains(value.toLowerCase())) { + return true; + } + return false; } - return false; - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/CueExceptionUtil.java b/cuebot/src/main/java/com/imageworks/spcue/util/CueExceptionUtil.java index 97f20c061..bf02b756c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/CueExceptionUtil.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/CueExceptionUtil.java @@ -28,32 +28,32 @@ */ public class CueExceptionUtil { - /** - * returns the stack track for an exception as a string. - * - * @param aThrowable - * @return String - */ - public static String getStackTrace(Throwable aThrowable) { - final Writer result = new StringWriter(); - final PrintWriter printWriter = new PrintWriter(result); - aThrowable.printStackTrace(printWriter); - return result.toString(); - } + /** + * returns the stack track for an exception as a string. + * + * @param aThrowable + * @return String + */ + public static String getStackTrace(Throwable aThrowable) { + final Writer result = new StringWriter(); + final PrintWriter printWriter = new PrintWriter(result); + aThrowable.printStackTrace(printWriter); + return result.toString(); + } - /** - * Creates an error message string which w/ a stack track and returns it. - * - * @param msg - * @param aThrowable - * @return String - */ - public static void logStackTrace(String msg, Throwable aThrowable) { - Logger error_logger = LogManager.getLogger(CueExceptionUtil.class); - error_logger.info("Caught unexpected exception caused by: " + aThrowable); - error_logger.info("StackTrace: \n" + getStackTrace(aThrowable)); - if (aThrowable.getCause() != null) { - error_logger.info("Caused By: " + getStackTrace(aThrowable.getCause())); + /** + * Creates an error message string which w/ a stack track and returns it. + * + * @param msg + * @param aThrowable + * @return String + */ + public static void logStackTrace(String msg, Throwable aThrowable) { + Logger error_logger = LogManager.getLogger(CueExceptionUtil.class); + error_logger.info("Caught unexpected exception caused by: " + aThrowable); + error_logger.info("StackTrace: \n" + getStackTrace(aThrowable)); + if (aThrowable.getCause() != null) { + error_logger.info("Caused By: " + getStackTrace(aThrowable.getCause())); + } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/CueUtil.java b/cuebot/src/main/java/com/imageworks/spcue/util/CueUtil.java index a94a96aec..4fc8e2948 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/CueUtil.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/CueUtil.java @@ -59,325 +59,328 @@ @Component public final class CueUtil { - private static final Logger logger = LogManager.getLogger(CueUtil.class); - private static String smtpHost = ""; - @Autowired - private Environment env; - - /** - * Commonly used macros for gigabyte values in KB. - */ - public static final long MB128 = 131072; - public static final long MB256 = 262144; - public static final long MB512 = 524288; - public static final long GB = 1048576; - public static final long GB2 = 1048576L * 2; - public static final long GB4 = 1048576L * 4; - public static final long GB8 = 1048576L * 8; - public static final long GB16 = 1048576L * 16; - public static final long GB32 = 1048576L * 32; - - /** - * Features that relay on an integer greated than 0 to work properly are disabled by setting them - * to -1. - */ - public static final int FEATURE_DISABLED = -1; - - /** - * A const to repesent a single core - */ - public static final int ONE_CORE = 100; - - /** - * One hour of time in seconds. - */ - public static final int ONE_HOUR = 3600; - - @PostConstruct - public void init() { - CueUtil.smtpHost = this.env.getRequiredProperty("smtp_host", String.class); - } - - /** - * Return true if the given name is formatted as a valid allocation name. Allocation names should - * be facility.unique_name. - * - * @param name - * @return - */ - public static boolean verifyAllocationNameFormat(String name) { - return Pattern.matches("^(\\w+)\\.(\\w+)$", name); - } - - /** - * Split an allocation name and return its parts in a String array. The first element is the - * facility, the second is the allocation's unique name. - * - * @param name - * @return - */ - public static String[] splitAllocationName(String name) { - String[] parts = name.split("\\.", 2); - if (parts.length != 2 || !verifyAllocationNameFormat(name)) { - throw new SpcueRuntimeException( - "Allocation names must be in the form of facility.alloc. The name " + name - + " is not valid."); + private static final Logger logger = LogManager.getLogger(CueUtil.class); + private static String smtpHost = ""; + @Autowired + private Environment env; + + /** + * Commonly used macros for gigabyte values in KB. + */ + public static final long MB128 = 131072; + public static final long MB256 = 262144; + public static final long MB512 = 524288; + public static final long GB = 1048576; + public static final long GB2 = 1048576L * 2; + public static final long GB4 = 1048576L * 4; + public static final long GB8 = 1048576L * 8; + public static final long GB16 = 1048576L * 16; + public static final long GB32 = 1048576L * 32; + + /** + * Features that relay on an integer greated than 0 to work properly are disabled by setting + * them to -1. + */ + public static final int FEATURE_DISABLED = -1; + + /** + * A const to repesent a single core + */ + public static final int ONE_CORE = 100; + + /** + * One hour of time in seconds. + */ + public static final int ONE_HOUR = 3600; + + @PostConstruct + public void init() { + CueUtil.smtpHost = this.env.getRequiredProperty("smtp_host", String.class); + } + + /** + * Return true if the given name is formatted as a valid allocation name. Allocation names + * should be facility.unique_name. + * + * @param name + * @return + */ + public static boolean verifyAllocationNameFormat(String name) { + return Pattern.matches("^(\\w+)\\.(\\w+)$", name); } - return parts; - } - - /** - * Finds the chunk that the dependErFrame belongs to in the given sequence of frames. - * - * @param dependOnFrames - the full frame range to depend on - * @param dependErFrame - the dependent frame number. - * @return - */ - public static int findChunk(List dependOnFrames, int dependErFrame) { - int dependOnFrame = -1; - if (dependOnFrames.contains(dependErFrame)) { - dependOnFrame = dependErFrame; - } else { - int size = dependOnFrames.size(); - for (int i = 0; i < size; i++) { - dependOnFrame = dependOnFrames.get(i); - if (dependOnFrame > dependErFrame) { - dependOnFrame = dependOnFrames.get(i - 1); - break; + + /** + * Split an allocation name and return its parts in a String array. The first element is the + * facility, the second is the allocation's unique name. + * + * @param name + * @return + */ + public static String[] splitAllocationName(String name) { + String[] parts = name.split("\\.", 2); + if (parts.length != 2 || !verifyAllocationNameFormat(name)) { + throw new SpcueRuntimeException( + "Allocation names must be in the form of facility.alloc. The name " + name + + " is not valid."); } - } + return parts; } - if (dependOnFrame == -1) { - throw new RuntimeException("unable to find chunk for frame: " + dependErFrame - + " in the range: " + dependOnFrames.toString()); + /** + * Finds the chunk that the dependErFrame belongs to in the given sequence of frames. + * + * @param dependOnFrames - the full frame range to depend on + * @param dependErFrame - the dependent frame number. + * @return + */ + public static int findChunk(List dependOnFrames, int dependErFrame) { + int dependOnFrame = -1; + if (dependOnFrames.contains(dependErFrame)) { + dependOnFrame = dependErFrame; + } else { + int size = dependOnFrames.size(); + for (int i = 0; i < size; i++) { + dependOnFrame = dependOnFrames.get(i); + if (dependOnFrame > dependErFrame) { + dependOnFrame = dependOnFrames.get(i - 1); + break; + } + } + } + + if (dependOnFrame == -1) { + throw new RuntimeException("unable to find chunk for frame: " + dependErFrame + + " in the range: " + dependOnFrames.toString()); + } + return dependOnFrame; } - return dependOnFrame; - } - - /** - * A simple send mail method - * - * @param to - * @param from - * @param subject - * @param body - * @param images - */ - public static void sendmail(String to, String from, String subject, StringBuilder body, - Map images, File attachment) { - try { - Properties props = System.getProperties(); - props.put("mail.smtp.host", CueUtil.smtpHost); - Session session = Session.getDefaultInstance(props, null); - Message msg = new MimeMessage(session); - msg.setFrom(new InternetAddress(from)); - msg.setReplyTo(new InternetAddress[] {new InternetAddress(from)}); - msg.setRecipients(Message.RecipientType.TO, InternetAddress.parse(to, false)); - msg.setSubject(subject); - - MimeMultipart mimeMultipart = new MimeMultipart(); - mimeMultipart.setSubType("alternative"); - - BodyPart htmlBodyPart = new MimeBodyPart(); - htmlBodyPart.setContent(body.toString(), "text/html"); - mimeMultipart.addBodyPart(htmlBodyPart); - - for (Entry e : images.entrySet()) { - String name = e.getKey().replace('/', '_'); - - BodyPart imageBodyPart = new MimeBodyPart(); - DataSource ds = new ByteArrayDataSource(e.getValue(), "image/png"); - DataHandler dh = new DataHandler(ds); - imageBodyPart.setDataHandler(dh); - imageBodyPart.setFileName(name); - imageBodyPart.setDisposition("inline"); - imageBodyPart.setHeader("Content-ID", '<' + name + '>'); - mimeMultipart.addBodyPart(imageBodyPart); - } - if (attachment != null && attachment.length() != 0) { - MimeBodyPart attachmentPart = new MimeBodyPart(); - attachmentPart.attachFile(attachment); - mimeMultipart.addBodyPart(attachmentPart); - } - - msg.setContent(mimeMultipart); - msg.setHeader("X-Mailer", "OpenCueMailer"); - msg.setSentDate(new Date()); - Transport transport = session.getTransport("smtp"); - transport.connect(CueUtil.smtpHost, null, null); - Transport.send(msg); - } catch (Exception e) { - throw new RuntimeException("failed to send email: " + e); + + /** + * A simple send mail method + * + * @param to + * @param from + * @param subject + * @param body + * @param images + */ + public static void sendmail(String to, String from, String subject, StringBuilder body, + Map images, File attachment) { + try { + Properties props = System.getProperties(); + props.put("mail.smtp.host", CueUtil.smtpHost); + Session session = Session.getDefaultInstance(props, null); + Message msg = new MimeMessage(session); + msg.setFrom(new InternetAddress(from)); + msg.setReplyTo(new InternetAddress[] {new InternetAddress(from)}); + msg.setRecipients(Message.RecipientType.TO, InternetAddress.parse(to, false)); + msg.setSubject(subject); + + MimeMultipart mimeMultipart = new MimeMultipart(); + mimeMultipart.setSubType("alternative"); + + BodyPart htmlBodyPart = new MimeBodyPart(); + htmlBodyPart.setContent(body.toString(), "text/html"); + mimeMultipart.addBodyPart(htmlBodyPart); + + for (Entry e : images.entrySet()) { + String name = e.getKey().replace('/', '_'); + + BodyPart imageBodyPart = new MimeBodyPart(); + DataSource ds = new ByteArrayDataSource(e.getValue(), "image/png"); + DataHandler dh = new DataHandler(ds); + imageBodyPart.setDataHandler(dh); + imageBodyPart.setFileName(name); + imageBodyPart.setDisposition("inline"); + imageBodyPart.setHeader("Content-ID", '<' + name + '>'); + mimeMultipart.addBodyPart(imageBodyPart); + } + if (attachment != null && attachment.length() != 0) { + MimeBodyPart attachmentPart = new MimeBodyPart(); + attachmentPart.attachFile(attachment); + mimeMultipart.addBodyPart(attachmentPart); + } + + msg.setContent(mimeMultipart); + msg.setHeader("X-Mailer", "OpenCueMailer"); + msg.setSentDate(new Date()); + Transport transport = session.getTransport("smtp"); + transport.connect(CueUtil.smtpHost, null, null); + Transport.send(msg); + } catch (Exception e) { + throw new RuntimeException("failed to send email: " + e); + } + } + + public static final String formatDuration(long seconds) { + return String.format("%02d:%02d:%02d", seconds / 3600, (seconds % 3600) / 60, seconds % 60); } - } - - public static final String formatDuration(long seconds) { - return String.format("%02d:%02d:%02d", seconds / 3600, (seconds % 3600) / 60, seconds % 60); - } - - public static final String formatDuration(int seconds) { - return String.format("%02d:%02d:%02d", seconds / 3600, (seconds % 3600) / 60, seconds % 60); - } - - public static final String KbToMb(long kb) { - return String.format("%dMB", kb / 1024); - } - - public static final long convertKbToFakeKb64bit(Environment env, long Kb) { - long memReservedSystem = - env.getRequiredProperty("dispatcher.memory.mem_reserved_system", Long.class); - return (long) (Math.ceil((Kb * 0.0009765625) * 0.0009765625) * 1048576) - memReservedSystem; - } - - public static final long convertKbToFakeKb32bit(Environment env, long Kb) { - long memReservedSystem = - env.getRequiredProperty("dispatcher.memory.mem_reserved_system", Long.class); - return (long) (Math.floor((Kb * 0.0009765625) * 0.0009765625) * 1048576) - memReservedSystem; - } - - /** - * returns epoch time - * - * @return int - */ - public static int getTime() { - return (int) (System.currentTimeMillis() / 1000); - } - - /** - * returns a frame name from a layer and frame number. - * - * @param layer - * @param num - * @return String - */ - public final static String buildFrameName(LayerInterface layer, int num) { - return String.format("%04d-%s", num, layer.getName()); - } - - public final static String buildProcName(String host, int cores, int gpus) { - return String.format(Locale.ROOT, "%s/%4.2f/%d", host, Convert.coreUnitsToCores(cores), gpus); - } - - /** - * for logging how long an operation took - * - * @param time - * @param message - */ - public final static void logDuration(long time, String message) { - long duration = System.currentTimeMillis() - time; - logger.info("Operation: " + message + " took " + duration + "ms"); - } - - /** - * return the milliseconds since time - * - * @param time - */ - public final static long duration(long time) { - return System.currentTimeMillis() - time; - } - - public static final long getCpuUsage() { - ThreadMXBean mx = ManagementFactory.getThreadMXBean(); - mx.setThreadCpuTimeEnabled(true); - long result = 0; - for (long id : mx.getAllThreadIds()) { - result = result + mx.getThreadUserTime(id); + + public static final String formatDuration(int seconds) { + return String.format("%02d:%02d:%02d", seconds / 3600, (seconds % 3600) / 60, seconds % 60); } - return result; - } - private static final int DAY_START = 7; - private static final int DAY_END = 19; + public static final String KbToMb(long kb) { + return String.format("%dMB", kb / 1024); + } - public static boolean isDayTime() { - Calendar cal = Calendar.getInstance(); - int hour_of_day = cal.get(Calendar.HOUR_OF_DAY); - if (hour_of_day >= DAY_START && hour_of_day < DAY_END) { - return true; + public static final long convertKbToFakeKb64bit(Environment env, long Kb) { + long memReservedSystem = + env.getRequiredProperty("dispatcher.memory.mem_reserved_system", Long.class); + return (long) (Math.ceil((Kb * 0.0009765625) * 0.0009765625) * 1048576) - memReservedSystem; + } + + public static final long convertKbToFakeKb32bit(Environment env, long Kb) { + long memReservedSystem = + env.getRequiredProperty("dispatcher.memory.mem_reserved_system", Long.class); + return (long) (Math.floor((Kb * 0.0009765625) * 0.0009765625) * 1048576) + - memReservedSystem; } - return false; - } - - /** - * Take a frame range and chunk size and return an ordered array of frames with all duplicates - * removed. - * - * @param range - * @param chunkSize - * @return - */ - public static List normalizeFrameRange(String range, int chunkSize) { - return normalizeFrameRange(new FrameSet(range), chunkSize); - } - - /** - * Take a frame range and chunk size and return an ordered array of frames with all duplicates - * removed. - * - * @param frameSet - * @param chunkSize - * @return - */ - public static List normalizeFrameRange(FrameSet frameSet, int chunkSize) { - - int rangeSize = frameSet.size(); - Set result = new LinkedHashSet(rangeSize / chunkSize); /** - * Have to remove all duplicates and maintain order before chunking it. + * returns epoch time + * + * @return int */ - if (chunkSize > 1) { + public static int getTime() { + return (int) (System.currentTimeMillis() / 1000); + } - /** - * This handles people who chunk on 1,000,000. - */ - if (chunkSize > rangeSize) { - result.add(frameSet.get(0)); - } else { + /** + * returns a frame name from a layer and frame number. + * + * @param layer + * @param num + * @return String + */ + public final static String buildFrameName(LayerInterface layer, int num) { + return String.format("%04d-%s", num, layer.getName()); + } - /** - * A linked hash set to weed out duplicates but maintain frame ordering. - */ - final Set tempResult = new LinkedHashSet((rangeSize / chunkSize) + 1); + public final static String buildProcName(String host, int cores, int gpus) { + return String.format(Locale.ROOT, "%s/%4.2f/%d", host, Convert.coreUnitsToCores(cores), + gpus); + } - for (int idx = 0; idx < rangeSize; idx = idx + 1) { - tempResult.add(frameSet.get(idx)); + /** + * for logging how long an operation took + * + * @param time + * @param message + */ + public final static void logDuration(long time, String message) { + long duration = System.currentTimeMillis() - time; + logger.info("Operation: " + message + " took " + duration + "ms"); + } + + /** + * return the milliseconds since time + * + * @param time + */ + public final static long duration(long time) { + return System.currentTimeMillis() - time; + } + + public static final long getCpuUsage() { + ThreadMXBean mx = ManagementFactory.getThreadMXBean(); + mx.setThreadCpuTimeEnabled(true); + long result = 0; + for (long id : mx.getAllThreadIds()) { + result = result + mx.getThreadUserTime(id); } + return result; + } + + private static final int DAY_START = 7; + private static final int DAY_END = 19; + + public static boolean isDayTime() { + Calendar cal = Calendar.getInstance(); + int hour_of_day = cal.get(Calendar.HOUR_OF_DAY); + if (hour_of_day >= DAY_START && hour_of_day < DAY_END) { + return true; + } + return false; + } + + /** + * Take a frame range and chunk size and return an ordered array of frames with all duplicates + * removed. + * + * @param range + * @param chunkSize + * @return + */ + public static List normalizeFrameRange(String range, int chunkSize) { + return normalizeFrameRange(new FrameSet(range), chunkSize); + } + + /** + * Take a frame range and chunk size and return an ordered array of frames with all duplicates + * removed. + * + * @param frameSet + * @param chunkSize + * @return + */ + public static List normalizeFrameRange(FrameSet frameSet, int chunkSize) { + + int rangeSize = frameSet.size(); + Set result = new LinkedHashSet(rangeSize / chunkSize); /** - * Now go through the frames and add 1 frame for every chunk. + * Have to remove all duplicates and maintain order before chunking it. */ - int idx = 0; - for (int frame : tempResult) { - if (idx % chunkSize == 0) { - result.add(frame); - } - idx = idx + 1; + if (chunkSize > 1) { + + /** + * This handles people who chunk on 1,000,000. + */ + if (chunkSize > rangeSize) { + result.add(frameSet.get(0)); + } else { + + /** + * A linked hash set to weed out duplicates but maintain frame ordering. + */ + final Set tempResult = + new LinkedHashSet((rangeSize / chunkSize) + 1); + + for (int idx = 0; idx < rangeSize; idx = idx + 1) { + tempResult.add(frameSet.get(idx)); + } + + /** + * Now go through the frames and add 1 frame for every chunk. + */ + int idx = 0; + for (int frame : tempResult) { + if (idx % chunkSize == 0) { + result.add(frame); + } + idx = idx + 1; + } + } + } else { + for (int idx = 0; idx < rangeSize; idx = idx + 1) { + result.add(frameSet.get(idx)); + } } - } - } else { - for (int idx = 0; idx < rangeSize; idx = idx + 1) { - result.add(frameSet.get(idx)); - } + + return Collections.unmodifiableList(new ArrayList(result)); } - return Collections.unmodifiableList(new ArrayList(result)); - } - - /** - * Get "{prefix}.{key}" property int value - * - * @param env - * @param prefix Example "dispatcher.report_queue" - * @param key Example "core_pool_size" - */ - public static int getIntProperty(Environment env, String prefix, String key) - throws IllegalStateException { - Integer value = env.getRequiredProperty(prefix + "." + key, Integer.class); - return value.intValue(); - } + /** + * Get "{prefix}.{key}" property int value + * + * @param env + * @param prefix Example "dispatcher.report_queue" + * @param key Example "core_pool_size" + */ + public static int getIntProperty(Environment env, String prefix, String key) + throws IllegalStateException { + Integer value = env.getRequiredProperty(prefix + "." + key, Integer.class); + return value.intValue(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/FrameRange.java b/cuebot/src/main/java/com/imageworks/spcue/util/FrameRange.java index e00485a57..0698ede91 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/FrameRange.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/FrameRange.java @@ -20,170 +20,170 @@ */ public class FrameRange { - private static final Pattern SINGLE_FRAME_PATTERN = Pattern.compile("(-?)\\d+"); - private static final Pattern SIMPLE_FRAME_RANGE_PATTERN = - Pattern.compile("(?(-?)\\d+)-(?(-?)\\d+)"); - private static final Pattern STEP_PATTERN = - Pattern.compile("(?(-?)\\d+)-(?(-?)\\d+)(?[xy])(?(-?)\\d+)"); - private static final Pattern INTERLEAVE_PATTERN = - Pattern.compile("(?(-?)\\d+)-(?(-?)\\d+):(?(-?)\\d+)"); - - private ImmutableList frameList; - - /** - * Construct a FrameRange object by parsing a spec. - * - * FrameSet("1-10x3"); FrameSet("1-10y3"); // inverted step FrameSet("10-1x-1"); FrameSet("1"); // - * same as "1-1x1" FrameSet("1-10:5"); // interleave of 5 - * - * A valid spec consists of: - * - * An inTime. An optional hyphen and outTime. An optional x or y and stepSize. Or an optional : - * and interleaveSize. If outTime is less than inTime, stepSize must be negative. - * - * A stepSize of 0 produces an empty FrameRange. - * - * A stepSize cannot be combined with a interleaveSize. - * - * A stepSize designated with y creates an inverted step. Frames that would be included with an x - * step are excluded. - * - * Example: 1-10y3 == 2, 3, 5, 6, 8, 9. - * - * An interleaveSize alters the order of frames when iterating over the FrameRange. The iterator - * will first produce the list of frames from inTime to outTime with a stepSize equal to - * interleaveSize. The interleaveSize is then divided in half, producing another set of frames - * unique from the first set. This process is repeated until interleaveSize reaches 1. - * - * Example: 1-10:5 == 1, 6, 3, 5 ,7 ,9, 2, 4, 8, 10. - */ - public FrameRange(String frameRange) { - frameList = parseFrameRange(frameRange); - } - - /** - * Gets the number of frames contained in this sequence. - * - * @return - */ - public int size() { - return frameList.size(); - } - - /** - * Gets an individual entry in the sequence, by numerical position. - * - * @param idx - * @return - */ - public int get(int idx) { - return frameList.get(idx); - } - - /** - * Query index of frame number in frame set. - * - * @param idx - * @return Index of frame. -1 if frame set does not contain frame. - */ - public int index(int idx) { - return frameList.indexOf(idx); - } - - /** - * Gets the full numerical sequence. - * - * @return - */ - public ImmutableList getAll() { - return frameList; - } - - protected static ImmutableList parseFrameRange(String frameRange) { - Matcher singleFrameMatcher = SINGLE_FRAME_PATTERN.matcher(frameRange); - if (singleFrameMatcher.matches()) { - return ImmutableList.of(Integer.valueOf(frameRange)); + private static final Pattern SINGLE_FRAME_PATTERN = Pattern.compile("(-?)\\d+"); + private static final Pattern SIMPLE_FRAME_RANGE_PATTERN = + Pattern.compile("(?(-?)\\d+)-(?(-?)\\d+)"); + private static final Pattern STEP_PATTERN = + Pattern.compile("(?(-?)\\d+)-(?(-?)\\d+)(?[xy])(?(-?)\\d+)"); + private static final Pattern INTERLEAVE_PATTERN = + Pattern.compile("(?(-?)\\d+)-(?(-?)\\d+):(?(-?)\\d+)"); + + private ImmutableList frameList; + + /** + * Construct a FrameRange object by parsing a spec. + * + * FrameSet("1-10x3"); FrameSet("1-10y3"); // inverted step FrameSet("10-1x-1"); FrameSet("1"); + * // same as "1-1x1" FrameSet("1-10:5"); // interleave of 5 + * + * A valid spec consists of: + * + * An inTime. An optional hyphen and outTime. An optional x or y and stepSize. Or an optional : + * and interleaveSize. If outTime is less than inTime, stepSize must be negative. + * + * A stepSize of 0 produces an empty FrameRange. + * + * A stepSize cannot be combined with a interleaveSize. + * + * A stepSize designated with y creates an inverted step. Frames that would be included with an + * x step are excluded. + * + * Example: 1-10y3 == 2, 3, 5, 6, 8, 9. + * + * An interleaveSize alters the order of frames when iterating over the FrameRange. The iterator + * will first produce the list of frames from inTime to outTime with a stepSize equal to + * interleaveSize. The interleaveSize is then divided in half, producing another set of frames + * unique from the first set. This process is repeated until interleaveSize reaches 1. + * + * Example: 1-10:5 == 1, 6, 3, 5 ,7 ,9, 2, 4, 8, 10. + */ + public FrameRange(String frameRange) { + frameList = parseFrameRange(frameRange); } - Matcher simpleRangeMatcher = SIMPLE_FRAME_RANGE_PATTERN.matcher(frameRange); - if (simpleRangeMatcher.matches()) { - Integer startFrame = Integer.valueOf(simpleRangeMatcher.group("sf")); - Integer endFrame = Integer.valueOf(simpleRangeMatcher.group("ef")); - return getIntRange(startFrame, endFrame, (endFrame >= startFrame ? 1 : -1)); + /** + * Gets the number of frames contained in this sequence. + * + * @return + */ + public int size() { + return frameList.size(); } - Matcher rangeWithStepMatcher = STEP_PATTERN.matcher(frameRange); - if (rangeWithStepMatcher.matches()) { - Integer startFrame = Integer.valueOf(rangeWithStepMatcher.group("sf")); - Integer endFrame = Integer.valueOf(rangeWithStepMatcher.group("ef")); - Integer step = Integer.valueOf(rangeWithStepMatcher.group("step")); - String stepSep = rangeWithStepMatcher.group("stepSep"); - return getSteppedRange(startFrame, endFrame, step, "y".equals(stepSep)); + /** + * Gets an individual entry in the sequence, by numerical position. + * + * @param idx + * @return + */ + public int get(int idx) { + return frameList.get(idx); } - Matcher rangeWithInterleaveMatcher = INTERLEAVE_PATTERN.matcher(frameRange); - if (rangeWithInterleaveMatcher.matches()) { - Integer startFrame = Integer.valueOf(rangeWithInterleaveMatcher.group("sf")); - Integer endFrame = Integer.valueOf(rangeWithInterleaveMatcher.group("ef")); - Integer step = Integer.valueOf(rangeWithInterleaveMatcher.group("step")); - return getInterleavedRange(startFrame, endFrame, step); + /** + * Query index of frame number in frame set. + * + * @param idx + * @return Index of frame. -1 if frame set does not contain frame. + */ + public int index(int idx) { + return frameList.indexOf(idx); } - throw new IllegalArgumentException("unrecognized frame range syntax " + frameRange); - } + /** + * Gets the full numerical sequence. + * + * @return + */ + public ImmutableList getAll() { + return frameList; + } + + protected static ImmutableList parseFrameRange(String frameRange) { + Matcher singleFrameMatcher = SINGLE_FRAME_PATTERN.matcher(frameRange); + if (singleFrameMatcher.matches()) { + return ImmutableList.of(Integer.valueOf(frameRange)); + } + + Matcher simpleRangeMatcher = SIMPLE_FRAME_RANGE_PATTERN.matcher(frameRange); + if (simpleRangeMatcher.matches()) { + Integer startFrame = Integer.valueOf(simpleRangeMatcher.group("sf")); + Integer endFrame = Integer.valueOf(simpleRangeMatcher.group("ef")); + return getIntRange(startFrame, endFrame, (endFrame >= startFrame ? 1 : -1)); + } + + Matcher rangeWithStepMatcher = STEP_PATTERN.matcher(frameRange); + if (rangeWithStepMatcher.matches()) { + Integer startFrame = Integer.valueOf(rangeWithStepMatcher.group("sf")); + Integer endFrame = Integer.valueOf(rangeWithStepMatcher.group("ef")); + Integer step = Integer.valueOf(rangeWithStepMatcher.group("step")); + String stepSep = rangeWithStepMatcher.group("stepSep"); + return getSteppedRange(startFrame, endFrame, step, "y".equals(stepSep)); + } + + Matcher rangeWithInterleaveMatcher = INTERLEAVE_PATTERN.matcher(frameRange); + if (rangeWithInterleaveMatcher.matches()) { + Integer startFrame = Integer.valueOf(rangeWithInterleaveMatcher.group("sf")); + Integer endFrame = Integer.valueOf(rangeWithInterleaveMatcher.group("ef")); + Integer step = Integer.valueOf(rangeWithInterleaveMatcher.group("step")); + return getInterleavedRange(startFrame, endFrame, step); + } + + throw new IllegalArgumentException("unrecognized frame range syntax " + frameRange); + } - private static ImmutableList getIntRange(Integer start, Integer end, Integer step) { - int streamStart = (step < 0 ? end : start); - int streamEnd = (step < 0 ? start : end); - int streamStep = abs(step); + private static ImmutableList getIntRange(Integer start, Integer end, Integer step) { + int streamStart = (step < 0 ? end : start); + int streamEnd = (step < 0 ? start : end); + int streamStep = abs(step); - List intList = IntStream.rangeClosed(streamStart, streamEnd) - .filter(n -> (n - start) % streamStep == 0).boxed().collect(Collectors.toList()); + List intList = IntStream.rangeClosed(streamStart, streamEnd) + .filter(n -> (n - start) % streamStep == 0).boxed().collect(Collectors.toList()); - if (step < 0) { - return ImmutableList.copyOf(Lists.reverse(intList)); + if (step < 0) { + return ImmutableList.copyOf(Lists.reverse(intList)); + } + return ImmutableList.copyOf(intList); } - return ImmutableList.copyOf(intList); - } - - private static ImmutableList getSteppedRange(Integer start, Integer end, Integer step, - Boolean inverseStep) { - validateStepSign(start, end, step); - ImmutableList steppedRange = getIntRange(start, end, step); - if (inverseStep) { - ImmutableList fullRange = getIntRange(start, end, (step < 0 ? -1 : 1)); - return ImmutableList - .copyOf(Collections2.filter(fullRange, Predicates.not(Predicates.in(steppedRange)))); + + private static ImmutableList getSteppedRange(Integer start, Integer end, Integer step, + Boolean inverseStep) { + validateStepSign(start, end, step); + ImmutableList steppedRange = getIntRange(start, end, step); + if (inverseStep) { + ImmutableList fullRange = getIntRange(start, end, (step < 0 ? -1 : 1)); + return ImmutableList.copyOf( + Collections2.filter(fullRange, Predicates.not(Predicates.in(steppedRange)))); + } + return steppedRange; } - return steppedRange; - } - private static ImmutableList getInterleavedRange(Integer start, Integer end, - Integer step) { - validateStepSign(start, end, step); - Set interleavedFrames = new LinkedHashSet<>(); + private static ImmutableList getInterleavedRange(Integer start, Integer end, + Integer step) { + validateStepSign(start, end, step); + Set interleavedFrames = new LinkedHashSet<>(); - while (abs(step) > 0) { - interleavedFrames.addAll(getIntRange(start, end, step)); - step /= 2; + while (abs(step) > 0) { + interleavedFrames.addAll(getIntRange(start, end, step)); + step /= 2; + } + return ImmutableList.copyOf(interleavedFrames); } - return ImmutableList.copyOf(interleavedFrames); - } - - private static void validateStepSign(Integer start, Integer end, Integer step) { - if (step > 1) { - if (end < start) { - throw new IllegalArgumentException( - "end frame may not be less than start frame when using a positive step"); - } - } else if (step == 0) { - throw new IllegalArgumentException("step cannot be zero"); - - } else if (step < 0) { - if (end >= start) { - throw new IllegalArgumentException( - "end frame may not be greater than start frame when using a negative step"); - } + + private static void validateStepSign(Integer start, Integer end, Integer step) { + if (step > 1) { + if (end < start) { + throw new IllegalArgumentException( + "end frame may not be less than start frame when using a positive step"); + } + } else if (step == 0) { + throw new IllegalArgumentException("step cannot be zero"); + + } else if (step < 0) { + if (end >= start) { + throw new IllegalArgumentException( + "end frame may not be greater than start frame when using a negative step"); + } + } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/FrameSet.java b/cuebot/src/main/java/com/imageworks/spcue/util/FrameSet.java index ef6381e48..e2ea57f0a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/FrameSet.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/FrameSet.java @@ -10,164 +10,165 @@ * Represents an ordered sequence of FrameRanges. */ public class FrameSet { - private ImmutableList frameList; - - /** - * Construct a FrameSet object by parsing a spec. - * - * See FrameRange for the supported syntax. A FrameSet follows the same syntax, with the addition - * that it may be a comma-separated list of different FrameRanges. - */ - public FrameSet(String frameRange) { - frameList = parseFrameRange(frameRange); - } - - /** - * Gets the number of frames contained in this sequence. - * - * @return - */ - public int size() { - return frameList.size(); - } - - /** - * Gets an individual entry in the sequence, by numerical position. - * - * @param idx - * @return - */ - public int get(int idx) { - return frameList.get(idx); - } - - /** - * Query index of frame number in frame set. - * - * @param idx - * @return Index of frame. -1 if frame set does not contain frame. - */ - public int index(int idx) { - return frameList.indexOf(idx); - } - - /** - * Gets the full numerical sequence. - * - * @return - */ - public ImmutableList getAll() { - return frameList; - } - - private ImmutableList parseFrameRange(String frameRange) { - ImmutableList.Builder builder = ImmutableList.builder(); - for (String frameRangeSection : frameRange.split(",")) { - builder.addAll(FrameRange.parseFrameRange(frameRangeSection)); + private ImmutableList frameList; + + /** + * Construct a FrameSet object by parsing a spec. + * + * See FrameRange for the supported syntax. A FrameSet follows the same syntax, with the + * addition that it may be a comma-separated list of different FrameRanges. + */ + public FrameSet(String frameRange) { + frameList = parseFrameRange(frameRange); } - return builder.build(); - } - - /** - * Return a sub-FrameSet object starting at startFrame with max chunkSize members - * - * @param startFrameIndex Index of frame to start at; not the frame itself - * @param chunkSize Max number of frames per chunk - * @return String representation of the chunk, e.g. 1-1001x3 - */ - public String getChunk(int startFrameIndex, int chunkSize) { - if (frameList.size() <= startFrameIndex || startFrameIndex < 0) { - String sf = String.valueOf(startFrameIndex); - String sz = String.valueOf(frameList.size() - 1); - throw new IllegalArgumentException("startFrameIndex " + sf + " is not in range 0-" + sz); + + /** + * Gets the number of frames contained in this sequence. + * + * @return + */ + public int size() { + return frameList.size(); } - if (chunkSize == 1) { - // Chunksize of 1 so the FrameSet is just the startFrame - return String.valueOf(frameList.get(startFrameIndex)); + + /** + * Gets an individual entry in the sequence, by numerical position. + * + * @param idx + * @return + */ + public int get(int idx) { + return frameList.get(idx); } - int finalFrameIndex = frameList.size() - 1; - int endFrameIndex = startFrameIndex + chunkSize - 1; - if (endFrameIndex > finalFrameIndex) { - // We don't have enough frames, so return the remaining frames. - endFrameIndex = finalFrameIndex; + + /** + * Query index of frame number in frame set. + * + * @param idx + * @return Index of frame. -1 if frame set does not contain frame. + */ + public int index(int idx) { + return frameList.indexOf(idx); } - return framesToFrameRanges(frameList.subList(startFrameIndex, endFrameIndex + 1)); - } - - /** - * Return a string representation of a subset of a frame range. - * - * This approach was adapted from https://pypi.org/project/Fileseq/ - * - * @param startFrame Start frame - * @param endFrame End frame - * @param step The step between frames - * @return String representation of the frame range, e.g. 1-1001x3 - */ - private String buildFrangePart(int startFrame, int endFrame, int step) { - if (startFrame == endFrame) { - return String.valueOf(startFrame); - } else if (step == 1) { - return String.format("%d-%d", startFrame, endFrame); - } else { - return String.format("%d-%dx%d", startFrame, endFrame, step); + /** + * Gets the full numerical sequence. + * + * @return + */ + public ImmutableList getAll() { + return frameList; } - } - - /** - * Return a String representation of a frame range based on a list of literal integer frame IDs. - * - * @param frames List of integers representing frame IDs, - * @return String representation of a frameset, e.g. '1-10,12-100x2' - */ - private String framesToFrameRanges(ImmutableList frames) { - int l = frames.size(); - if (l == 0) { - return ""; - } else if (l == 1) { - return String.valueOf(frames.get(0)); + + private ImmutableList parseFrameRange(String frameRange) { + ImmutableList.Builder builder = ImmutableList.builder(); + for (String frameRangeSection : frameRange.split(",")) { + builder.addAll(FrameRange.parseFrameRange(frameRangeSection)); + } + return builder.build(); } - StringJoiner resultBuilder = new StringJoiner(","); - - int curr_count = 1; - int curr_step = 0; - int new_step = 0; - int curr_start = frames.get(0); - int curr_frame = frames.get(0); - int last_frame = frames.get(0); - - for (int i = 1; i < frames.size(); i++) { - curr_frame = frames.get(i); - - if (curr_step == 0) { - curr_step = curr_frame - curr_start; - } - new_step = curr_frame - last_frame; - if (curr_step == new_step) { - last_frame = curr_frame; - curr_count += 1; - } else if (curr_count == 2 && curr_step != 1) { - resultBuilder.add(String.valueOf(curr_start)); - curr_step = 0; - curr_start = last_frame; - last_frame = curr_frame; - } else { - resultBuilder.add(buildFrangePart(curr_start, last_frame, curr_step)); - curr_step = 0; - curr_start = curr_frame; - last_frame = curr_frame; - curr_count = 1; - } + /** + * Return a sub-FrameSet object starting at startFrame with max chunkSize members + * + * @param startFrameIndex Index of frame to start at; not the frame itself + * @param chunkSize Max number of frames per chunk + * @return String representation of the chunk, e.g. 1-1001x3 + */ + public String getChunk(int startFrameIndex, int chunkSize) { + if (frameList.size() <= startFrameIndex || startFrameIndex < 0) { + String sf = String.valueOf(startFrameIndex); + String sz = String.valueOf(frameList.size() - 1); + throw new IllegalArgumentException( + "startFrameIndex " + sf + " is not in range 0-" + sz); + } + if (chunkSize == 1) { + // Chunksize of 1 so the FrameSet is just the startFrame + return String.valueOf(frameList.get(startFrameIndex)); + } + int finalFrameIndex = frameList.size() - 1; + int endFrameIndex = startFrameIndex + chunkSize - 1; + if (endFrameIndex > finalFrameIndex) { + // We don't have enough frames, so return the remaining frames. + endFrameIndex = finalFrameIndex; + } + + return framesToFrameRanges(frameList.subList(startFrameIndex, endFrameIndex + 1)); } - if (curr_count == 2 && curr_step != 1) { - resultBuilder.add(String.valueOf(curr_start)); - resultBuilder.add(String.valueOf(curr_frame)); - } else { - resultBuilder.add(buildFrangePart(curr_start, curr_frame, curr_step)); + + /** + * Return a string representation of a subset of a frame range. + * + * This approach was adapted from https://pypi.org/project/Fileseq/ + * + * @param startFrame Start frame + * @param endFrame End frame + * @param step The step between frames + * @return String representation of the frame range, e.g. 1-1001x3 + */ + private String buildFrangePart(int startFrame, int endFrame, int step) { + if (startFrame == endFrame) { + return String.valueOf(startFrame); + } else if (step == 1) { + return String.format("%d-%d", startFrame, endFrame); + } else { + return String.format("%d-%dx%d", startFrame, endFrame, step); + } } - return resultBuilder.toString(); - } + /** + * Return a String representation of a frame range based on a list of literal integer frame IDs. + * + * @param frames List of integers representing frame IDs, + * @return String representation of a frameset, e.g. '1-10,12-100x2' + */ + private String framesToFrameRanges(ImmutableList frames) { + int l = frames.size(); + if (l == 0) { + return ""; + } else if (l == 1) { + return String.valueOf(frames.get(0)); + } + + StringJoiner resultBuilder = new StringJoiner(","); + + int curr_count = 1; + int curr_step = 0; + int new_step = 0; + int curr_start = frames.get(0); + int curr_frame = frames.get(0); + int last_frame = frames.get(0); + + for (int i = 1; i < frames.size(); i++) { + curr_frame = frames.get(i); + + if (curr_step == 0) { + curr_step = curr_frame - curr_start; + } + new_step = curr_frame - last_frame; + if (curr_step == new_step) { + last_frame = curr_frame; + curr_count += 1; + } else if (curr_count == 2 && curr_step != 1) { + resultBuilder.add(String.valueOf(curr_start)); + curr_step = 0; + curr_start = last_frame; + last_frame = curr_frame; + } else { + resultBuilder.add(buildFrangePart(curr_start, last_frame, curr_step)); + curr_step = 0; + curr_start = curr_frame; + last_frame = curr_frame; + curr_count = 1; + } + } + if (curr_count == 2 && curr_step != 1) { + resultBuilder.add(String.valueOf(curr_start)); + resultBuilder.add(String.valueOf(curr_frame)); + } else { + resultBuilder.add(buildFrangePart(curr_start, curr_frame, curr_step)); + } + + return resultBuilder.toString(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/JobLogUtil.java b/cuebot/src/main/java/com/imageworks/spcue/util/JobLogUtil.java index 625732b8d..7ca22f518 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/JobLogUtil.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/JobLogUtil.java @@ -25,42 +25,43 @@ @Component public class JobLogUtil { - @Autowired - private Environment env; + @Autowired + private Environment env; - public boolean createJobLogDirectory(String path) { - File f = new File(path); - f.mkdir(); - f.setWritable(true, false); - return f.isDirectory(); - } + public boolean createJobLogDirectory(String path) { + File f = new File(path); + f.mkdir(); + f.setWritable(true, false); + return f.isDirectory(); + } - public String getJobLogDir(String show, String shot, String os) { - StringBuilder sb = new StringBuilder(512); - sb.append(getJobLogRootDir(os)); - sb.append("/"); - sb.append(show); - sb.append("/"); - sb.append(shot); - sb.append("/logs"); - return sb.toString(); - } + public String getJobLogDir(String show, String shot, String os) { + StringBuilder sb = new StringBuilder(512); + sb.append(getJobLogRootDir(os)); + sb.append("/"); + sb.append(show); + sb.append("/"); + sb.append(shot); + sb.append("/logs"); + return sb.toString(); + } - public String getJobLogPath(JobDetail job) { - StringBuilder sb = new StringBuilder(512); - sb.append(getJobLogDir(job.showName, job.shot, job.os)); - sb.append("/"); - sb.append(job.name); - sb.append("--"); - sb.append(job.id); - return sb.toString(); - } + public String getJobLogPath(JobDetail job) { + StringBuilder sb = new StringBuilder(512); + sb.append(getJobLogDir(job.showName, job.shot, job.os)); + sb.append("/"); + sb.append(job.name); + sb.append("--"); + sb.append(job.id); + return sb.toString(); + } - public String getJobLogRootDir(String os) { - try { - return env.getRequiredProperty(String.format("log.frame-log-root.%s", os), String.class); - } catch (IllegalStateException e) { - return env.getRequiredProperty("log.frame-log-root.default_os", String.class); + public String getJobLogRootDir(String os) { + try { + return env.getRequiredProperty(String.format("log.frame-log-root.%s", os), + String.class); + } catch (IllegalStateException e) { + return env.getRequiredProperty("log.frame-log-root.default_os", String.class); + } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/SqlUtil.java b/cuebot/src/main/java/com/imageworks/spcue/util/SqlUtil.java index a5036b567..7f4bca324 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/SqlUtil.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/SqlUtil.java @@ -23,120 +23,120 @@ public class SqlUtil { - public static String buildBindVariableArray(String col, Integer numValues) { - StringBuilder sb = new StringBuilder(1024); - sb.append(col); - sb.append(" IN ("); - for (int i = 0; i < numValues; i++) { - sb.append("?,"); + public static String buildBindVariableArray(String col, Integer numValues) { + StringBuilder sb = new StringBuilder(1024); + sb.append(col); + sb.append(" IN ("); + for (int i = 0; i < numValues; i++) { + sb.append("?,"); + } + sb.delete(sb.length() - 1, sb.length()); + sb.append(")"); + return sb.toString(); } - sb.delete(sb.length() - 1, sb.length()); - sb.append(")"); - return sb.toString(); - } - /** - * returns a 32 character UUID string that will be identical everytime its generated based on the - * name passed in. - * - * @param name String - * @return String - */ - public static String genShortKeyByName(String name) { - return UUID.nameUUIDFromBytes(name.getBytes()).toString().replaceAll("-", ""); - } + /** + * returns a 32 character UUID string that will be identical everytime its generated based on + * the name passed in. + * + * @param name String + * @return String + */ + public static String genShortKeyByName(String name) { + return UUID.nameUUIDFromBytes(name.getBytes()).toString().replaceAll("-", ""); + } - /** - * returns a 32 character UUID string that will be identical everytime its generated based on the - * name passed in. - * - * @param name String - * @return String - */ - public static String genShortKeyByNameAndTime(String name) { - StringBuilder sb = new StringBuilder(64); - sb.append(name); - sb.append(System.currentTimeMillis()); - return UUID.nameUUIDFromBytes(sb.toString().getBytes()).toString().replaceAll("-", ""); - } + /** + * returns a 32 character UUID string that will be identical everytime its generated based on + * the name passed in. + * + * @param name String + * @return String + */ + public static String genShortKeyByNameAndTime(String name) { + StringBuilder sb = new StringBuilder(64); + sb.append(name); + sb.append(System.currentTimeMillis()); + return UUID.nameUUIDFromBytes(sb.toString().getBytes()).toString().replaceAll("-", ""); + } - /** - * returns a random UUID - * - * @return String - */ - public static String genKeyRandom() { - return UUID.randomUUID().toString(); - } + /** + * returns a random UUID + * + * @return String + */ + public static String genKeyRandom() { + return UUID.randomUUID().toString(); + } - /** - * returns a 36 character UUID string that will be identical everytime its generated based on the - * name passed in. - * - * @param name String - * @return String - */ - public static String genKeyByName(String name) { - return UUID.nameUUIDFromBytes(name.getBytes()).toString(); - } + /** + * returns a 36 character UUID string that will be identical everytime its generated based on + * the name passed in. + * + * @param name String + * @return String + */ + public static String genKeyByName(String name) { + return UUID.nameUUIDFromBytes(name.getBytes()).toString(); + } - /** - * returns a 36 character UUID string that is based on the name and the time the UUID is created - * - * @param name String - * @return String - */ - public static String genKeyByNameAndTime(String name) { - StringBuilder sb = new StringBuilder(64); - sb.append(name); - sb.append(System.currentTimeMillis()); - sb.append(System.getenv("HOSTNAME")); - return UUID.nameUUIDFromBytes(sb.toString().getBytes()).toString(); - } + /** + * returns a 36 character UUID string that is based on the name and the time the UUID is created + * + * @param name String + * @return String + */ + public static String genKeyByNameAndTime(String name) { + StringBuilder sb = new StringBuilder(64); + sb.append(name); + sb.append(System.currentTimeMillis()); + sb.append(System.getenv("HOSTNAME")); + return UUID.nameUUIDFromBytes(sb.toString().getBytes()).toString(); + } - /** - * returns a 36 character UUID string that is based on time and the IP address of the primary - * network interface and the time - * - * @return String - */ - public static String genKeyByTime() { - String name = System.getenv("HOSTNAME") + System.currentTimeMillis(); - return UUID.nameUUIDFromBytes(name.getBytes()).toString(); - } + /** + * returns a 36 character UUID string that is based on time and the IP address of the primary + * network interface and the time + * + * @return String + */ + public static String genKeyByTime() { + String name = System.getenv("HOSTNAME") + System.currentTimeMillis(); + return UUID.nameUUIDFromBytes(name.getBytes()).toString(); + } - /** - * SQL ResultSet.getString method returns a null, but we want to initialize our message builders - * with an empty string. Call rs.getString but return an empty string instead of null.] - * - * @param rs ResultSet - * @param field String - * @return String - */ - public static String getString(ResultSet rs, String field) throws SQLException { - String value = rs.getString(field); - if (rs.wasNull()) { - return ""; - } else { - return value; + /** + * SQL ResultSet.getString method returns a null, but we want to initialize our message builders + * with an empty string. Call rs.getString but return an empty string instead of null.] + * + * @param rs ResultSet + * @param field String + * @return String + */ + public static String getString(ResultSet rs, String field) throws SQLException { + String value = rs.getString(field); + if (rs.wasNull()) { + return ""; + } else { + return value; + } } - } - public static String getString(ResultSet rs, int index) throws SQLException { - String value = rs.getString(index); - if (rs.wasNull()) { - return ""; - } else { - return value; + public static String getString(ResultSet rs, int index) throws SQLException { + String value = rs.getString(index); + if (rs.wasNull()) { + return ""; + } else { + return value; + } } - } - public static ByteString getByteString(ResultSet rs, String field) throws SQLException { - byte[] data = rs.getBytes(field); - if (rs.wasNull()) { - return ByteString.copyFrom("".getBytes()); - } else { - return ByteString.copyFrom(data); + public static ByteString getByteString(ResultSet rs, String field) throws SQLException { + byte[] data = rs.getBytes(field); + if (rs.wasNull()) { + return ByteString.copyFrom("".getBytes()); + } else { + return ByteString.copyFrom(data); + } } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/TagUtil.java b/cuebot/src/main/java/com/imageworks/spcue/util/TagUtil.java index 606f64c61..a17d9a9f2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/TagUtil.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/TagUtil.java @@ -22,25 +22,25 @@ public class TagUtil { - /** - * This will take the RQD tags and convert them into something usable for now until the RQD tag - * standard is set. - * - * @param host - * @return - */ - public static List buildHardwareTags(RenderHost host) { - List tags = new ArrayList(); - if (host.getTagsList().contains("linux")) { - tags.add("linux"); - } + /** + * This will take the RQD tags and convert them into something usable for now until the RQD tag + * standard is set. + * + * @param host + * @return + */ + public static List buildHardwareTags(RenderHost host) { + List tags = new ArrayList(); + if (host.getTagsList().contains("linux")) { + tags.add("linux"); + } - if (host.getTagsList().contains("64bit")) { - tags.add("64bit"); - tags.add("32bit"); - } else { - tags.add("32bit"); + if (host.getTagsList().contains("64bit")) { + tags.add("64bit"); + tags.add("32bit"); + } else { + tags.add("32bit"); + } + return tags; } - return tags; - } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/config/TestAppConfig.java b/cuebot/src/test/java/com/imageworks/spcue/config/TestAppConfig.java index 50873050a..c68b525c6 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/config/TestAppConfig.java +++ b/cuebot/src/test/java/com/imageworks/spcue/config/TestAppConfig.java @@ -28,20 +28,20 @@ @Configuration @ImportResource({"classpath:conf/spring/applicationContext-assumptions.xml", - "classpath:conf/spring/applicationContext-dbEngine.xml", - "classpath:conf/spring/applicationContext-grpc.xml", - "classpath:conf/spring/applicationContext-grpcServer.xml", - "classpath:conf/spring/applicationContext-service.xml", - "classpath:conf/spring/applicationContext-jms.xml", - "classpath:conf/spring/applicationContext-criteria.xml"}) + "classpath:conf/spring/applicationContext-dbEngine.xml", + "classpath:conf/spring/applicationContext-grpc.xml", + "classpath:conf/spring/applicationContext-grpcServer.xml", + "classpath:conf/spring/applicationContext-service.xml", + "classpath:conf/spring/applicationContext-jms.xml", + "classpath:conf/spring/applicationContext-criteria.xml"}) @EnableConfigurationProperties @PropertySource({"classpath:opencue.properties"}) public class TestAppConfig { - @Configuration - @Conditional(PostgresDatabaseCondition.class) - @ImportResource({"classpath:conf/spring/applicationContext-postgres-datasource.xml", - "classpath:conf/spring/applicationContext-dao-postgres.xml"}) - static class PostgresEngineConfig { - } + @Configuration + @Conditional(PostgresDatabaseCondition.class) + @ImportResource({"classpath:conf/spring/applicationContext-postgres-datasource.xml", + "classpath:conf/spring/applicationContext-dao-postgres.xml"}) + static class PostgresEngineConfig { + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/AssumingPostgresEngine.java b/cuebot/src/test/java/com/imageworks/spcue/test/AssumingPostgresEngine.java index 02de12538..bbd7704d1 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/AssumingPostgresEngine.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/AssumingPostgresEngine.java @@ -24,30 +24,30 @@ public class AssumingPostgresEngine implements TestRule { - private DatabaseEngine dbEngine; - - public AssumingPostgresEngine() {} - - @Override - public Statement apply(Statement base, Description description) { - return new Statement() { - @Override - public void evaluate() throws Throwable { - if (dbEngine == DatabaseEngine.POSTGRES) { - base.evaluate(); - } else { - throw new AssumptionViolatedException("Current database engine is " + dbEngine.toString() - + ", test requires POSTGRES. Skipping"); - } - } - }; - } - - public DatabaseEngine getDbEngine() { - return dbEngine; - } - - public void setDbEngine(DatabaseEngine dbEngine) { - this.dbEngine = dbEngine; - } + private DatabaseEngine dbEngine; + + public AssumingPostgresEngine() {} + + @Override + public Statement apply(Statement base, Description description) { + return new Statement() { + @Override + public void evaluate() throws Throwable { + if (dbEngine == DatabaseEngine.POSTGRES) { + base.evaluate(); + } else { + throw new AssumptionViolatedException("Current database engine is " + + dbEngine.toString() + ", test requires POSTGRES. Skipping"); + } + } + }; + } + + public DatabaseEngine getDbEngine() { + return dbEngine; + } + + public void setDbEngine(DatabaseEngine dbEngine) { + this.dbEngine = dbEngine; + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/EntityTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/EntityTests.java index 6a4727662..e48ef04b8 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/EntityTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/EntityTests.java @@ -27,45 +27,45 @@ */ public class EntityTests extends TestCase { - @Test - public void testEntityEquality() { - Entity a = new Entity("id", "name"); - Entity b = new Entity("id", "name"); - assertEquals(a, b); + @Test + public void testEntityEquality() { + Entity a = new Entity("id", "name"); + Entity b = new Entity("id", "name"); + assertEquals(a, b); - a = new Entity("id", "name"); - b = new Entity("id_a", "name"); - assertFalse(a.equals(b)); + a = new Entity("id", "name"); + b = new Entity("id_a", "name"); + assertFalse(a.equals(b)); - a = new Entity("id", "name"); - b = new Entity("id_a", "name_a"); - assertFalse(a.equals(b)); - } + a = new Entity("id", "name"); + b = new Entity("id_a", "name_a"); + assertFalse(a.equals(b)); + } - @Test - public void testEntityHashCode() { + @Test + public void testEntityHashCode() { - Entity a = new Entity("id", "name"); - Entity b = new Entity("id", "name"); - assertEquals(a.hashCode(), b.hashCode()); + Entity a = new Entity("id", "name"); + Entity b = new Entity("id", "name"); + assertEquals(a.hashCode(), b.hashCode()); - a = new Entity("id", "name"); - b = new Entity("id_a", "name"); - assertFalse(a.hashCode() == b.hashCode()); + a = new Entity("id", "name"); + b = new Entity("id_a", "name"); + assertFalse(a.hashCode() == b.hashCode()); - a = new Entity(); - b = new Entity(); - assertFalse(a.hashCode() == b.hashCode()); - } + a = new Entity(); + b = new Entity(); + assertFalse(a.hashCode() == b.hashCode()); + } - @Test - public void testEntityToString() { - Entity a = new Entity("id", "name"); - Entity b = new Entity("id", "name"); - assertEquals(a.toString(), b.toString()); + @Test + public void testEntityToString() { + Entity a = new Entity("id", "name"); + Entity b = new Entity("id", "name"); + assertEquals(a.toString(), b.toString()); - a = new Entity("id_a", "name"); - b = new Entity("id", "name"); - assertNotSame(a.toString(), b.toString()); - } + a = new Entity("id_a", "name"); + b = new Entity("id", "name"); + assertNotSame(a.toString(), b.toString()); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/TestDatabaseSetupPostgres.java b/cuebot/src/test/java/com/imageworks/spcue/test/TestDatabaseSetupPostgres.java index 794f6b638..0ec30b313 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/TestDatabaseSetupPostgres.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/TestDatabaseSetupPostgres.java @@ -26,49 +26,49 @@ import java.util.concurrent.atomic.AtomicBoolean; public final class TestDatabaseSetupPostgres { - private static final String DB_NAME = "postgres"; - private static final String USERNAME = "postgres"; - private static AtomicBoolean setupComplete = new AtomicBoolean(false); - private EmbeddedPostgres postgres; + private static final String DB_NAME = "postgres"; + private static final String USERNAME = "postgres"; + private static AtomicBoolean setupComplete = new AtomicBoolean(false); + private EmbeddedPostgres postgres; - public TestDatabaseSetupPostgres() {} + public TestDatabaseSetupPostgres() {} - public String getUrl() { - return postgres.getJdbcUrl(USERNAME, DB_NAME); - } - - public String getUsername() { - return USERNAME; - } - - public String getPassword() { - return null; - } + public String getUrl() { + return postgres.getJdbcUrl(USERNAME, DB_NAME); + } - public void create() throws Exception { - if (!setupComplete.compareAndSet(false, true)) { - return; + public String getUsername() { + return USERNAME; } - postgres = EmbeddedPostgres.start(); - Flyway flyway = Flyway.configure().dataSource(postgres.getPostgresDatabase()) - .locations("classpath:conf/ddl/postgres/migrations").load(); - flyway.migrate(); + public String getPassword() { + return null; + } - populateTestData(); - } + public void create() throws Exception { + if (!setupComplete.compareAndSet(false, true)) { + return; + } - private void populateTestData() throws Exception { - Connection conn = postgres.getPostgresDatabase().getConnection(); + postgres = EmbeddedPostgres.start(); + Flyway flyway = Flyway.configure().dataSource(postgres.getPostgresDatabase()) + .locations("classpath:conf/ddl/postgres/migrations").load(); + flyway.migrate(); - URL url = Resources.getResource("conf/ddl/postgres/test_data.sql"); - List testDataStatements = Resources.readLines(url, Charsets.UTF_8); - for (String testDataStatement : testDataStatements) { - Statement st = conn.createStatement(); - st.execute(testDataStatement); - st.close(); + populateTestData(); } - conn.close(); - } + private void populateTestData() throws Exception { + Connection conn = postgres.getPostgresDatabase().getConnection(); + + URL url = Resources.getResource("conf/ddl/postgres/test_data.sql"); + List testDataStatements = Resources.readLines(url, Charsets.UTF_8); + for (String testDataStatement : testDataStatements) { + Statement st = conn.createStatement(); + st.execute(testDataStatement); + st.close(); + } + + conn.close(); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/TransactionalTest.java b/cuebot/src/test/java/com/imageworks/spcue/test/TransactionalTest.java index a21eb36ff..617337454 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/TransactionalTest.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/TransactionalTest.java @@ -27,9 +27,9 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class TransactionalTest extends AbstractTransactionalJUnit4SpringContextTests { - @Test - public void testInit() { + @Test + public void testInit() { - } + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/FrameSearchTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/FrameSearchTests.java index adc9e202b..879b52195 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/FrameSearchTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/FrameSearchTests.java @@ -56,185 +56,189 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class FrameSearchTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - JobLauncher jobLauncher; - - @Resource - JobDao jobDao; - - @Resource - FrameSearchFactory frameSearchFactory; - - @Resource - FrameDao frameDao; - - @Resource - LayerDao layerDao; - - @Resource - WhiteboardDao whiteboardDao; - - @Resource - JobManager jobManager; - - @Before - public void launchTestJobs() { - ClassLoader classLoader = getClass().getClassLoader(); - File file = new File(classLoader.getResource("conf/jobspec/jobspec_depend_test.xml").getFile()); - - jobLauncher.testMode = true; - jobLauncher.launch(file); - } - - @Test - @Transactional - @Rollback - public void testGetCriteria() { - JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); - FrameSearchCriteria criteria = FrameSearchInterface.criteriaFactory(); - - FrameSearchInterface frameSearch = frameSearchFactory.create(job, criteria); - - assertEquals(criteria, frameSearch.getCriteria()); - } - - @Test - @Transactional - @Rollback - public void testSetCriteria() { - FrameSearchCriteria criteria = - FrameSearchInterface.criteriaFactory().toBuilder().setFrameRange("1-10").build(); - FrameSearchInterface frameSearch = frameSearchFactory.create(); - - // Ensure we can distinguish between the default and non-default criteria. - assertNotEquals(criteria, frameSearch.getCriteria()); - - frameSearch.setCriteria(criteria); - - assertEquals(criteria, frameSearch.getCriteria()); - } - - @Test - @Transactional - @Rollback - public void testFilterByFrameIds() { - JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); - FrameSearchInterface frameSearch = frameSearchFactory.create(); - LayerInterface layer = layerDao.getLayers(job).get(0); - FrameInterface frame1 = frameDao.findFrame(layer, 1); - FrameInterface frame2 = frameDao.findFrame(layer, 2); - frameSearch.filterByFrameIds(ImmutableList.of(frame1.getFrameId(), frame2.getFrameId())); - - List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() - .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); - - assertThat(frames).containsExactlyInAnyOrder(frame1, frame2); - } - - @Test - @Transactional - @Rollback - public void testFilterByFrame() { - JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); - FrameSearchInterface frameSearch = frameSearchFactory.create(); - LayerInterface layer = layerDao.getLayers(job).get(0); - FrameInterface frame1 = frameDao.findFrame(layer, 1); - frameSearch.filterByFrame(frame1); - - List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() - .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); - - assertThat(frames).containsExactly(frame1); - } - - @Test - @Transactional - @Rollback - public void testFilterByJob() { - JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); - String jobId = job.getJobId(); - FrameSearchInterface frameSearch = frameSearchFactory.create(); - frameSearch.filterByJob(job); - - List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() - .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); - - assertEquals(20, frames.size()); - assertTrue(frames.stream().allMatch(frame -> frame.getJobId().equals(jobId))); - } - - @Test - @Transactional - @Rollback - public void testFilterByLayer() { - JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); - LayerInterface layer = layerDao.getLayers(job).get(0); - FrameSearchInterface frameSearch = frameSearchFactory.create(); - frameSearch.filterByLayer(layer); - - List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() - .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); - - assertTrue(frames.stream().allMatch(frame -> frame.getLayerId().equals(layer.getLayerId()))); - } - - @Test - @Transactional - @Rollback - public void testFilterByFrameStates() { - JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_b"); - LayerInterface layer = layerDao.getLayers(job).get(1); - IntStream.range(1, 11).forEach( - i -> frameDao.updateFrameState(frameDao.findFrame(layer, i), FrameState.SUCCEEDED)); - FrameSearchInterface frameSearch = frameSearchFactory.create(); - frameSearch.filterByFrameStates(ImmutableList.of(FrameState.SUCCEEDED)); - - List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() - .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); - - assertEquals(10, frames.size()); - assertTrue(frames.stream().allMatch( - frame -> frameDao.getFrameDetail(frame.getFrameId()).state.equals(FrameState.SUCCEEDED))); - } - - @Test - @Transactional - @Rollback - public void testFilterByFrameSet() { - JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); - LayerInterface layer = layerDao.getLayers(job).get(0); - FrameSearchInterface frameSearch = frameSearchFactory.create(); - frameSearch.filterByFrameSet("5-6"); - - List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() - .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); - - assertEquals(8, frames.size()); - assertThat(frames.stream().map(frame -> frameDao.getFrameDetail(frame.getFrameId()).number) - .collect(Collectors.toList())).containsOnly(5, 6); - } - - @Test - @Transactional - @Rollback - public void filterByMemoryRange() { - JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); - LayerInterface layer = layerDao.getLayers(job).get(0); - IntStream.range(1, 11).forEach(i -> { - FrameInterface frame = frameDao.findFrame(layer, i); - frameDao.updateFrameState(frame, FrameState.RUNNING); - frameDao.updateFrameMemoryUsageAndLluTime(frame, CueUtil.GB * 5, CueUtil.GB, 0); - }); - - FrameSearchInterface frameSearch = frameSearchFactory.create(); - frameSearch.filterByMemoryRange("4.2-7.1"); - - List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() - .map(frame -> jobManager.getFrameDetail(frame.getId())).collect(Collectors.toList()); - - assertEquals(10, frames.size()); - assertTrue(frames.stream().allMatch(frame -> frame.maxRss == CueUtil.GB * 5)); - } - - // TODO(bcipriano) Add filterByDurationRange and filterByChangeDate tests. + @Resource + JobLauncher jobLauncher; + + @Resource + JobDao jobDao; + + @Resource + FrameSearchFactory frameSearchFactory; + + @Resource + FrameDao frameDao; + + @Resource + LayerDao layerDao; + + @Resource + WhiteboardDao whiteboardDao; + + @Resource + JobManager jobManager; + + @Before + public void launchTestJobs() { + ClassLoader classLoader = getClass().getClassLoader(); + File file = + new File(classLoader.getResource("conf/jobspec/jobspec_depend_test.xml").getFile()); + + jobLauncher.testMode = true; + jobLauncher.launch(file); + } + + @Test + @Transactional + @Rollback + public void testGetCriteria() { + JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); + FrameSearchCriteria criteria = FrameSearchInterface.criteriaFactory(); + + FrameSearchInterface frameSearch = frameSearchFactory.create(job, criteria); + + assertEquals(criteria, frameSearch.getCriteria()); + } + + @Test + @Transactional + @Rollback + public void testSetCriteria() { + FrameSearchCriteria criteria = + FrameSearchInterface.criteriaFactory().toBuilder().setFrameRange("1-10").build(); + FrameSearchInterface frameSearch = frameSearchFactory.create(); + + // Ensure we can distinguish between the default and non-default criteria. + assertNotEquals(criteria, frameSearch.getCriteria()); + + frameSearch.setCriteria(criteria); + + assertEquals(criteria, frameSearch.getCriteria()); + } + + @Test + @Transactional + @Rollback + public void testFilterByFrameIds() { + JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); + FrameSearchInterface frameSearch = frameSearchFactory.create(); + LayerInterface layer = layerDao.getLayers(job).get(0); + FrameInterface frame1 = frameDao.findFrame(layer, 1); + FrameInterface frame2 = frameDao.findFrame(layer, 2); + frameSearch.filterByFrameIds(ImmutableList.of(frame1.getFrameId(), frame2.getFrameId())); + + List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() + .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); + + assertThat(frames).containsExactlyInAnyOrder(frame1, frame2); + } + + @Test + @Transactional + @Rollback + public void testFilterByFrame() { + JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); + FrameSearchInterface frameSearch = frameSearchFactory.create(); + LayerInterface layer = layerDao.getLayers(job).get(0); + FrameInterface frame1 = frameDao.findFrame(layer, 1); + frameSearch.filterByFrame(frame1); + + List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() + .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); + + assertThat(frames).containsExactly(frame1); + } + + @Test + @Transactional + @Rollback + public void testFilterByJob() { + JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); + String jobId = job.getJobId(); + FrameSearchInterface frameSearch = frameSearchFactory.create(); + frameSearch.filterByJob(job); + + List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() + .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); + + assertEquals(20, frames.size()); + assertTrue(frames.stream().allMatch(frame -> frame.getJobId().equals(jobId))); + } + + @Test + @Transactional + @Rollback + public void testFilterByLayer() { + JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); + LayerInterface layer = layerDao.getLayers(job).get(0); + FrameSearchInterface frameSearch = frameSearchFactory.create(); + frameSearch.filterByLayer(layer); + + List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() + .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); + + assertTrue( + frames.stream().allMatch(frame -> frame.getLayerId().equals(layer.getLayerId()))); + } + + @Test + @Transactional + @Rollback + public void testFilterByFrameStates() { + JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_b"); + LayerInterface layer = layerDao.getLayers(job).get(1); + IntStream.range(1, 11).forEach( + i -> frameDao.updateFrameState(frameDao.findFrame(layer, i), FrameState.SUCCEEDED)); + FrameSearchInterface frameSearch = frameSearchFactory.create(); + frameSearch.filterByFrameStates(ImmutableList.of(FrameState.SUCCEEDED)); + + List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() + .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); + + assertEquals(10, frames.size()); + assertTrue( + frames.stream().allMatch(frame -> frameDao.getFrameDetail(frame.getFrameId()).state + .equals(FrameState.SUCCEEDED))); + } + + @Test + @Transactional + @Rollback + public void testFilterByFrameSet() { + JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); + LayerInterface layer = layerDao.getLayers(job).get(0); + FrameSearchInterface frameSearch = frameSearchFactory.create(); + frameSearch.filterByFrameSet("5-6"); + + List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() + .map(frame -> jobManager.getFrame(frame.getId())).collect(Collectors.toList()); + + assertEquals(8, frames.size()); + assertThat(frames.stream().map(frame -> frameDao.getFrameDetail(frame.getFrameId()).number) + .collect(Collectors.toList())).containsOnly(5, 6); + } + + @Test + @Transactional + @Rollback + public void filterByMemoryRange() { + JobInterface job = jobDao.findJob("pipe-dev.cue-testuser_depend_test_a"); + LayerInterface layer = layerDao.getLayers(job).get(0); + IntStream.range(1, 11).forEach(i -> { + FrameInterface frame = frameDao.findFrame(layer, i); + frameDao.updateFrameState(frame, FrameState.RUNNING); + frameDao.updateFrameMemoryUsageAndLluTime(frame, CueUtil.GB * 5, CueUtil.GB, 0); + }); + + FrameSearchInterface frameSearch = frameSearchFactory.create(); + frameSearch.filterByMemoryRange("4.2-7.1"); + + List frames = whiteboardDao.getFrames(frameSearch).getFramesList().stream() + .map(frame -> jobManager.getFrameDetail(frame.getId())) + .collect(Collectors.toList()); + + assertEquals(10, frames.size()); + assertTrue(frames.stream().allMatch(frame -> frame.maxRss == CueUtil.GB * 5)); + } + + // TODO(bcipriano) Add filterByDurationRange and filterByChangeDate tests. } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/HostSearchTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/HostSearchTests.java index 7a4682152..1d2bac734 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/HostSearchTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/HostSearchTests.java @@ -46,60 +46,60 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class HostSearchTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - HostSearchFactory hostSearchFactory; - - @Resource - AdminManager adminManager; - - @Resource - HostManager hostManager; - - @Resource - WhiteboardDao whiteboardDao; - - private AllocationEntity createAlloc(FacilityInterface facility, String allocName) { - AllocationEntity alloc = new AllocationEntity(); - alloc.name = allocName; - alloc.tag = "test-tag"; - adminManager.createAllocation(facility, alloc); - return alloc; - } - - private DispatchHost createHost(AllocationEntity alloc, String hostName) { - DispatchHost host = hostManager - .createHost(RenderHost.newBuilder().setName(hostName).setTotalMem(50000000).build()); - hostManager.setAllocation(host, alloc); - return host; - } - - @Test - @Transactional - @Rollback - public void testGetCriteria() { - HostSearchCriteria criteria = HostSearchInterface.criteriaFactory(); - - HostSearchInterface hostSearch = hostSearchFactory.create(criteria); - - assertEquals(criteria, hostSearch.getCriteria()); - } - - @Test - @Transactional - @Rollback - public void testFilterByAlloc() { - FacilityInterface facility = adminManager.createFacility("test-facility"); - AllocationEntity alloc1 = createAlloc(facility, "test-alloc-01"); - AllocationEntity alloc2 = createAlloc(facility, "test-alloc-02"); - DispatchHost expectedHost = createHost(alloc1, "test-host-01"); - createHost(alloc2, "test-host-02"); - HostSearchInterface hostSearch = - hostSearchFactory.create(HostSearchInterface.criteriaFactory()); - hostSearch.filterByAlloc(alloc1); - - List hosts = whiteboardDao.getHosts(hostSearch).getHostsList(); - - assertThat(hosts.stream().map(Host::getId).collect(Collectors.toList())) - .containsOnly(expectedHost.getHostId()); - } + @Resource + HostSearchFactory hostSearchFactory; + + @Resource + AdminManager adminManager; + + @Resource + HostManager hostManager; + + @Resource + WhiteboardDao whiteboardDao; + + private AllocationEntity createAlloc(FacilityInterface facility, String allocName) { + AllocationEntity alloc = new AllocationEntity(); + alloc.name = allocName; + alloc.tag = "test-tag"; + adminManager.createAllocation(facility, alloc); + return alloc; + } + + private DispatchHost createHost(AllocationEntity alloc, String hostName) { + DispatchHost host = hostManager.createHost( + RenderHost.newBuilder().setName(hostName).setTotalMem(50000000).build()); + hostManager.setAllocation(host, alloc); + return host; + } + + @Test + @Transactional + @Rollback + public void testGetCriteria() { + HostSearchCriteria criteria = HostSearchInterface.criteriaFactory(); + + HostSearchInterface hostSearch = hostSearchFactory.create(criteria); + + assertEquals(criteria, hostSearch.getCriteria()); + } + + @Test + @Transactional + @Rollback + public void testFilterByAlloc() { + FacilityInterface facility = adminManager.createFacility("test-facility"); + AllocationEntity alloc1 = createAlloc(facility, "test-alloc-01"); + AllocationEntity alloc2 = createAlloc(facility, "test-alloc-02"); + DispatchHost expectedHost = createHost(alloc1, "test-host-01"); + createHost(alloc2, "test-host-02"); + HostSearchInterface hostSearch = + hostSearchFactory.create(HostSearchInterface.criteriaFactory()); + hostSearch.filterByAlloc(alloc1); + + List hosts = whiteboardDao.getHosts(hostSearch).getHostsList(); + + assertThat(hosts.stream().map(Host::getId).collect(Collectors.toList())) + .containsOnly(expectedHost.getHostId()); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/JobSearchTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/JobSearchTests.java index 46d2edfc2..e207222ea 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/JobSearchTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/JobSearchTests.java @@ -43,68 +43,68 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class JobSearchTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - JobSearchFactory jobSearchFactory; + @Resource + JobSearchFactory jobSearchFactory; - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - WhiteboardDao whiteboardDao; + @Resource + WhiteboardDao whiteboardDao; - @Resource - ShowDao showDao; + @Resource + ShowDao showDao; - @Before - public void launchTestJobs() { - ClassLoader classLoader = getClass().getClassLoader(); - jobLauncher.testMode = true; + @Before + public void launchTestJobs() { + ClassLoader classLoader = getClass().getClassLoader(); + jobLauncher.testMode = true; - File file = new File(classLoader.getResource("conf/jobspec/jobspec.xml").getFile()); - jobLauncher.launch(file); + File file = new File(classLoader.getResource("conf/jobspec/jobspec.xml").getFile()); + jobLauncher.launch(file); - file = new File(classLoader.getResource("conf/jobspec/jobspec_other_show.xml").getFile()); - jobLauncher.launch(file); - } + file = new File(classLoader.getResource("conf/jobspec/jobspec_other_show.xml").getFile()); + jobLauncher.launch(file); + } - @Test - @Transactional - @Rollback - public void testGetCriteria() { - JobSearchCriteria criteria = JobSearchInterface.criteriaFactory(); + @Test + @Transactional + @Rollback + public void testGetCriteria() { + JobSearchCriteria criteria = JobSearchInterface.criteriaFactory(); - JobSearchInterface jobSearch = jobSearchFactory.create(criteria); + JobSearchInterface jobSearch = jobSearchFactory.create(criteria); - assertEquals(criteria, jobSearch.getCriteria()); - } + assertEquals(criteria, jobSearch.getCriteria()); + } - @Test - @Transactional - @Rollback - public void testSetCriteria() { - JobSearchCriteria criteria = - JobSearchInterface.criteriaFactory().toBuilder().addIds("fake-job-id").build(); - JobSearchInterface jobSearch = jobSearchFactory.create(); + @Test + @Transactional + @Rollback + public void testSetCriteria() { + JobSearchCriteria criteria = + JobSearchInterface.criteriaFactory().toBuilder().addIds("fake-job-id").build(); + JobSearchInterface jobSearch = jobSearchFactory.create(); - // Ensure we can distinguish between the default and non-default criteria. - assertNotEquals(criteria, jobSearch.getCriteria()); + // Ensure we can distinguish between the default and non-default criteria. + assertNotEquals(criteria, jobSearch.getCriteria()); - jobSearch.setCriteria(criteria); + jobSearch.setCriteria(criteria); - assertEquals(criteria, jobSearch.getCriteria()); - } + assertEquals(criteria, jobSearch.getCriteria()); + } - @Test - @Transactional - @Rollback - public void testFilterByShow() { - JobSearchCriteria criteria = - JobSearchInterface.criteriaFactory().toBuilder().setIncludeFinished(true).build(); - JobSearchInterface jobSearch = jobSearchFactory.create(criteria); - jobSearch.filterByShow(showDao.findShowDetail("pipe")); + @Test + @Transactional + @Rollback + public void testFilterByShow() { + JobSearchCriteria criteria = + JobSearchInterface.criteriaFactory().toBuilder().setIncludeFinished(true).build(); + JobSearchInterface jobSearch = jobSearchFactory.create(criteria); + jobSearch.filterByShow(showDao.findShowDetail("pipe")); - List jobs = whiteboardDao.getJobs(jobSearch).getJobsList(); + List jobs = whiteboardDao.getJobs(jobSearch).getJobsList(); - assertEquals(1, jobs.size()); - } + assertEquals(1, jobs.size()); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/ProcSearchTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/ProcSearchTests.java index 45c4f462b..1f6f384ea 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/ProcSearchTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/ProcSearchTests.java @@ -58,169 +58,170 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class ProcSearchTests extends AbstractTransactionalJUnit4SpringContextTests { - private static final String FIRST_HOST = "beta01"; - private static final String SECOND_HOST = "beta02"; - private static final String FIRST_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; - private static final String SECOND_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; - private static final String DEFAULT_GROUP_NAME = "pipe"; - private static final String NEW_GROUP_NAME = "arbitrary-group-name"; + private static final String FIRST_HOST = "beta01"; + private static final String SECOND_HOST = "beta02"; + private static final String FIRST_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; + private static final String SECOND_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; + private static final String DEFAULT_GROUP_NAME = "pipe"; + private static final String NEW_GROUP_NAME = "arbitrary-group-name"; - @Resource - ProcSearchFactory procSearchFactory; + @Resource + ProcSearchFactory procSearchFactory; - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - AdminManager adminManager; + @Resource + AdminManager adminManager; - @Resource - HostManager hostManager; + @Resource + HostManager hostManager; - @Resource - Dispatcher dispatcher; + @Resource + Dispatcher dispatcher; - @Resource - JobManager jobManager; + @Resource + JobManager jobManager; - @Resource - WhiteboardDao whiteboardDao; + @Resource + WhiteboardDao whiteboardDao; - @Resource - GroupManager groupManager; + @Resource + GroupManager groupManager; - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } - @Test - @Transactional - @Rollback - public void testGetCriteria() { - ProcSearchCriteria criteria = ProcSearchInterface.criteriaFactory(); + @Test + @Transactional + @Rollback + public void testGetCriteria() { + ProcSearchCriteria criteria = ProcSearchInterface.criteriaFactory(); - ProcSearchInterface procSearch = procSearchFactory.create(criteria); - - assertEquals(criteria, procSearch.getCriteria()); - } - - @Test - @Transactional - @Rollback - public void testSetCriteria() { - ProcSearchCriteria criteria = - ProcSearchInterface.criteriaFactory().toBuilder().addHosts("test-host").build(); - ProcSearchInterface procSearch = procSearchFactory.create(); - - // Ensure we can distinguish between the default and non-default criteria. - assertNotEquals(criteria, procSearch.getCriteria()); - - procSearch.setCriteria(criteria); - - assertEquals(criteria, procSearch.getCriteria()); - } - - @Test - @Transactional - @Rollback - public void testNotJobs() { - createHostsJobsAndProcs(); - - JobDetail firstJob = jobManager.findJobDetail(FIRST_JOB); - ProcSearchInterface procSearch = procSearchFactory.create(); - procSearch.notJobs(ImmutableList.of(firstJob)); - - List foundProcs = whiteboardDao.getProcs(procSearch).getProcsList(); - - assertEquals(1, foundProcs.size()); - assertThat(foundProcs.stream().map(Proc::getJobName).collect(Collectors.toList())) - .containsOnly(SECOND_JOB); - } - - @Test - @Transactional - @Rollback - public void testNotGroups() { - createHostsJobsAndProcs(); - - JobDetail firstJob = jobManager.findJobDetail(FIRST_JOB); - GroupDetail newGroup = createGroup(whiteboardDao.getShow(firstJob.getShowId())); - Inherit[] emptyInherits = {}; - groupManager.reparentJob(firstJob, newGroup, emptyInherits); - - ProcSearchInterface procSearch = procSearchFactory.create(); - procSearch.notGroups(ImmutableList.of(newGroup)); - - List foundProcs = whiteboardDao.getProcs(procSearch).getProcsList(); - - assertEquals(1, foundProcs.size()); - assertThat(foundProcs.stream().map(Proc::getGroupName).collect(Collectors.toList())) - .containsOnly(DEFAULT_GROUP_NAME); - } - - @Test - @Transactional - @Rollback - public void testFilterByHost() { - createHostsJobsAndProcs(); - - ProcSearchInterface procSearch = procSearchFactory.create(); - procSearch.filterByHost(hostManager.findDispatchHost(FIRST_HOST)); - - List foundProcs = whiteboardDao.getProcs(procSearch).getProcsList(); - - assertEquals(1, foundProcs.size()); - assertThat(foundProcs.stream().map(proc -> hostManager.getVirtualProc(proc.getId()).hostName) - .collect(Collectors.toList())).containsOnly(FIRST_HOST); - } - - // TODO: test by duration range - - private void createHostsJobsAndProcs() { - createHosts(); - launchJobs(); - - DispatchHost firstHost = hostManager.findDispatchHost(FIRST_HOST); - DispatchHost secondHost = hostManager.findDispatchHost(SECOND_HOST); - JobDetail firstJob = jobManager.findJobDetail(FIRST_JOB); - JobDetail secondJob = jobManager.findJobDetail(SECOND_JOB); - - dispatcher.dispatchHost(firstHost, firstJob); - dispatcher.dispatchHost(secondHost, secondJob); - } - - private void launchJobs() { - ClassLoader classLoader = getClass().getClassLoader(); - jobLauncher.testMode = true; - File file = - new File(classLoader.getResource("conf/jobspec/jobspec_dispatch_test.xml").getFile()); - jobLauncher.launch(file); - } - - private RenderHost.Builder buildRenderHost() { - return RenderHost.newBuilder().setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) - .setNumProcs(1).setCoresPerProc(100).addTags("test").setState(HardwareState.UP) - .setFacility("spi").putAttributes("SP_OS", "Linux"); - } - - private void createHosts() { - RenderHost host1 = buildRenderHost().setName(FIRST_HOST).build(); - RenderHost host2 = buildRenderHost().setName(SECOND_HOST).build(); - - hostManager.createHost(host1, adminManager.findAllocationDetail("spi", "general")); - hostManager.createHost(host2, adminManager.findAllocationDetail("spi", "general")); - } - - private GroupDetail createGroup(Show show) { - GroupDetail newGroupDetail = new GroupDetail(); - newGroupDetail.name = NEW_GROUP_NAME; - newGroupDetail.showId = show.getId(); - groupManager.createGroup(newGroupDetail, null); - return groupManager - .getGroupDetail(whiteboardDao.findGroup(show.getName(), NEW_GROUP_NAME).getId()); - } + ProcSearchInterface procSearch = procSearchFactory.create(criteria); + + assertEquals(criteria, procSearch.getCriteria()); + } + + @Test + @Transactional + @Rollback + public void testSetCriteria() { + ProcSearchCriteria criteria = + ProcSearchInterface.criteriaFactory().toBuilder().addHosts("test-host").build(); + ProcSearchInterface procSearch = procSearchFactory.create(); + + // Ensure we can distinguish between the default and non-default criteria. + assertNotEquals(criteria, procSearch.getCriteria()); + + procSearch.setCriteria(criteria); + + assertEquals(criteria, procSearch.getCriteria()); + } + + @Test + @Transactional + @Rollback + public void testNotJobs() { + createHostsJobsAndProcs(); + + JobDetail firstJob = jobManager.findJobDetail(FIRST_JOB); + ProcSearchInterface procSearch = procSearchFactory.create(); + procSearch.notJobs(ImmutableList.of(firstJob)); + + List foundProcs = whiteboardDao.getProcs(procSearch).getProcsList(); + + assertEquals(1, foundProcs.size()); + assertThat(foundProcs.stream().map(Proc::getJobName).collect(Collectors.toList())) + .containsOnly(SECOND_JOB); + } + + @Test + @Transactional + @Rollback + public void testNotGroups() { + createHostsJobsAndProcs(); + + JobDetail firstJob = jobManager.findJobDetail(FIRST_JOB); + GroupDetail newGroup = createGroup(whiteboardDao.getShow(firstJob.getShowId())); + Inherit[] emptyInherits = {}; + groupManager.reparentJob(firstJob, newGroup, emptyInherits); + + ProcSearchInterface procSearch = procSearchFactory.create(); + procSearch.notGroups(ImmutableList.of(newGroup)); + + List foundProcs = whiteboardDao.getProcs(procSearch).getProcsList(); + + assertEquals(1, foundProcs.size()); + assertThat(foundProcs.stream().map(Proc::getGroupName).collect(Collectors.toList())) + .containsOnly(DEFAULT_GROUP_NAME); + } + + @Test + @Transactional + @Rollback + public void testFilterByHost() { + createHostsJobsAndProcs(); + + ProcSearchInterface procSearch = procSearchFactory.create(); + procSearch.filterByHost(hostManager.findDispatchHost(FIRST_HOST)); + + List foundProcs = whiteboardDao.getProcs(procSearch).getProcsList(); + + assertEquals(1, foundProcs.size()); + assertThat( + foundProcs.stream().map(proc -> hostManager.getVirtualProc(proc.getId()).hostName) + .collect(Collectors.toList())).containsOnly(FIRST_HOST); + } + + // TODO: test by duration range + + private void createHostsJobsAndProcs() { + createHosts(); + launchJobs(); + + DispatchHost firstHost = hostManager.findDispatchHost(FIRST_HOST); + DispatchHost secondHost = hostManager.findDispatchHost(SECOND_HOST); + JobDetail firstJob = jobManager.findJobDetail(FIRST_JOB); + JobDetail secondJob = jobManager.findJobDetail(SECOND_JOB); + + dispatcher.dispatchHost(firstHost, firstJob); + dispatcher.dispatchHost(secondHost, secondJob); + } + + private void launchJobs() { + ClassLoader classLoader = getClass().getClassLoader(); + jobLauncher.testMode = true; + File file = new File( + classLoader.getResource("conf/jobspec/jobspec_dispatch_test.xml").getFile()); + jobLauncher.launch(file); + } + + private RenderHost.Builder buildRenderHost() { + return RenderHost.newBuilder().setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960) + .setNimbyEnabled(false).setNumProcs(1).setCoresPerProc(100).addTags("test") + .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux"); + } + + private void createHosts() { + RenderHost host1 = buildRenderHost().setName(FIRST_HOST).build(); + RenderHost host2 = buildRenderHost().setName(SECOND_HOST).build(); + + hostManager.createHost(host1, adminManager.findAllocationDetail("spi", "general")); + hostManager.createHost(host2, adminManager.findAllocationDetail("spi", "general")); + } + + private GroupDetail createGroup(Show show) { + GroupDetail newGroupDetail = new GroupDetail(); + newGroupDetail.name = NEW_GROUP_NAME; + newGroupDetail.showId = show.getId(); + groupManager.createGroup(newGroupDetail, null); + return groupManager + .getGroupDetail(whiteboardDao.findGroup(show.getName(), NEW_GROUP_NAME).getId()); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ActionDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ActionDaoTests.java index ce40a5749..cc8a70265 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ActionDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ActionDaoTests.java @@ -46,153 +46,153 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class ActionDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - ActionDao actionDao; - - @Resource - FilterDao filterDao; - - @Resource - ShowDao showDao; - - @Resource - GroupDao groupDao; - - @Resource - JobManager jobManager; - - private static String FILTER_NAME = "test_filter"; - - public ShowInterface getShow() { - return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); - } - - public FilterEntity buildFilter() { - FilterEntity filter = new FilterEntity(); - filter.name = FILTER_NAME; - filter.showId = "00000000-0000-0000-0000-000000000000"; - filter.type = FilterType.MATCH_ANY; - filter.enabled = true; - - return filter; - } - - @Test - @Transactional - @Rollback(true) - public void testCreateAction() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.PAUSE_JOB; - a1.filterId = f.getFilterId(); - a1.booleanValue = true; - a1.valueType = ActionValueType.BOOLEAN_TYPE; - actionDao.createAction(a1); - - ActionEntity a2 = new ActionEntity(); - a2.type = ActionType.MOVE_JOB_TO_GROUP; - a2.filterId = f.getFilterId(); - a2.groupValue = groupDao.getRootGroupId(getShow()); - a2.valueType = ActionValueType.GROUP_TYPE; - actionDao.createAction(a2); - - ActionEntity a3 = new ActionEntity(); - a3.type = ActionType.SET_JOB_MAX_CORES; - a3.filterId = f.getFilterId(); - a3.floatValue = 1f; - a3.valueType = ActionValueType.FLOAT_TYPE; - actionDao.createAction(a3); - - ActionEntity a4 = new ActionEntity(); - a4.type = ActionType.SET_JOB_MIN_CORES; - a4.filterId = f.getFilterId(); - a4.floatValue = 1; - a4.valueType = ActionValueType.FLOAT_TYPE; - actionDao.createAction(a4); - - ActionEntity a5 = new ActionEntity(); - a5.type = ActionType.STOP_PROCESSING; - a5.filterId = f.getFilterId(); - a5.valueType = ActionValueType.NONE_TYPE; - actionDao.createAction(a5); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteAction() { - - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a = new ActionEntity(); - a.type = ActionType.STOP_PROCESSING; - a.filterId = f.getFilterId(); - a.valueType = ActionValueType.NONE_TYPE; - actionDao.createAction(a); - actionDao.deleteAction(a); - } - - @Test - @Transactional - @Rollback(true) - public void testGetAction() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a = new ActionEntity(); - a.type = ActionType.STOP_PROCESSING; - a.filterId = f.getFilterId(); - a.valueType = ActionValueType.NONE_TYPE; - actionDao.createAction(a); - actionDao.getAction(a); - actionDao.getAction(a.getActionId()); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateAction() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a = new ActionEntity(); - a.type = ActionType.STOP_PROCESSING; - a.filterId = f.getFilterId(); - a.name = null; - a.valueType = ActionValueType.NONE_TYPE; - actionDao.createAction(a); - - a.floatValue = 1f; - a.type = ActionType.SET_JOB_MIN_CORES; - a.valueType = ActionValueType.FLOAT_TYPE; - - actionDao.updateAction(a); - - assertEquals(Integer.valueOf(1), jdbcTemplate - .queryForObject("SELECT float_value FROM action WHERE pk_action=?", Integer.class, a.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetActions() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a = new ActionEntity(); - a.type = ActionType.STOP_PROCESSING; - a.filterId = f.getFilterId(); - a.name = null; - a.valueType = ActionValueType.NONE_TYPE; - actionDao.createAction(a); - - actionDao.getActions(f); - } + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + ActionDao actionDao; + + @Resource + FilterDao filterDao; + + @Resource + ShowDao showDao; + + @Resource + GroupDao groupDao; + + @Resource + JobManager jobManager; + + private static String FILTER_NAME = "test_filter"; + + public ShowInterface getShow() { + return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); + } + + public FilterEntity buildFilter() { + FilterEntity filter = new FilterEntity(); + filter.name = FILTER_NAME; + filter.showId = "00000000-0000-0000-0000-000000000000"; + filter.type = FilterType.MATCH_ANY; + filter.enabled = true; + + return filter; + } + + @Test + @Transactional + @Rollback(true) + public void testCreateAction() { + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.PAUSE_JOB; + a1.filterId = f.getFilterId(); + a1.booleanValue = true; + a1.valueType = ActionValueType.BOOLEAN_TYPE; + actionDao.createAction(a1); + + ActionEntity a2 = new ActionEntity(); + a2.type = ActionType.MOVE_JOB_TO_GROUP; + a2.filterId = f.getFilterId(); + a2.groupValue = groupDao.getRootGroupId(getShow()); + a2.valueType = ActionValueType.GROUP_TYPE; + actionDao.createAction(a2); + + ActionEntity a3 = new ActionEntity(); + a3.type = ActionType.SET_JOB_MAX_CORES; + a3.filterId = f.getFilterId(); + a3.floatValue = 1f; + a3.valueType = ActionValueType.FLOAT_TYPE; + actionDao.createAction(a3); + + ActionEntity a4 = new ActionEntity(); + a4.type = ActionType.SET_JOB_MIN_CORES; + a4.filterId = f.getFilterId(); + a4.floatValue = 1; + a4.valueType = ActionValueType.FLOAT_TYPE; + actionDao.createAction(a4); + + ActionEntity a5 = new ActionEntity(); + a5.type = ActionType.STOP_PROCESSING; + a5.filterId = f.getFilterId(); + a5.valueType = ActionValueType.NONE_TYPE; + actionDao.createAction(a5); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteAction() { + + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a = new ActionEntity(); + a.type = ActionType.STOP_PROCESSING; + a.filterId = f.getFilterId(); + a.valueType = ActionValueType.NONE_TYPE; + actionDao.createAction(a); + actionDao.deleteAction(a); + } + + @Test + @Transactional + @Rollback(true) + public void testGetAction() { + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a = new ActionEntity(); + a.type = ActionType.STOP_PROCESSING; + a.filterId = f.getFilterId(); + a.valueType = ActionValueType.NONE_TYPE; + actionDao.createAction(a); + actionDao.getAction(a); + actionDao.getAction(a.getActionId()); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateAction() { + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a = new ActionEntity(); + a.type = ActionType.STOP_PROCESSING; + a.filterId = f.getFilterId(); + a.name = null; + a.valueType = ActionValueType.NONE_TYPE; + actionDao.createAction(a); + + a.floatValue = 1f; + a.type = ActionType.SET_JOB_MIN_CORES; + a.valueType = ActionValueType.FLOAT_TYPE; + + actionDao.updateAction(a); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT float_value FROM action WHERE pk_action=?", Integer.class, a.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetActions() { + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a = new ActionEntity(); + a.type = ActionType.STOP_PROCESSING; + a.filterId = f.getFilterId(); + a.name = null; + a.valueType = ActionValueType.NONE_TYPE; + actionDao.createAction(a); + + actionDao.getActions(f); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/AllocationDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/AllocationDaoTests.java index f0f07a78e..2c8acff29 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/AllocationDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/AllocationDaoTests.java @@ -44,149 +44,149 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class AllocationDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - AllocationDao allocDao; - - @Resource - FacilityDao facilityDao; - - @Resource - AdminManager adminManager; - - public static final String ALLOC_FQN = "spi.test_alloc"; - public static final String ALLOC_NAME = "test_alloc"; - public static final String ALLOC_TAG = "test"; - - private AllocationEntity alloc; - - @Before - public void before() { - - alloc = new AllocationEntity(); - alloc.name = ALLOC_NAME; - alloc.tag = ALLOC_TAG; - - allocDao.insertAllocation(facilityDao.getFacility("spi"), alloc); - } - - @Test - @Transactional - @Rollback(true) - public void testGetAllocation() { - allocDao.getAllocationEntity(alloc.getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindAllocation() { - FacilityInterface f = facilityDao.getFacility("spi"); - allocDao.findAllocationEntity(f.getName(), ALLOC_NAME); - } - - @Test - @Transactional - @Rollback(true) - public void testFindAllocation2() { - FacilityInterface f = facilityDao.getFacility("spi"); - allocDao.findAllocationEntity(ALLOC_FQN); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteAllocation() { - allocDao.deleteAllocation(alloc); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteAllocationWithProc() { - - // Use the alloc so deleting triggers it just to be disaled. - ShowEntity show = adminManager.getShowEntity("00000000-0000-0000-0000-000000000000"); - adminManager.createSubscription(show, alloc, 10, 10); - allocDao.deleteAllocation(alloc); - - assertEquals(Integer.valueOf(1), - jdbcTemplate.queryForObject( - "SELECT COUNT(1) FROM alloc WHERE pk_alloc=? AND b_enabled = false", Integer.class, - alloc.getAllocationId())); - - assertEquals(ALLOC_FQN, - jdbcTemplate.queryForObject( - "SELECT str_name FROM alloc WHERE pk_alloc=? AND b_enabled = false", String.class, - alloc.getAllocationId())); - - // Now re-enable it. - allocDao.insertAllocation(facilityDao.getDefaultFacility(), alloc); - assertEquals(Integer.valueOf(1), - jdbcTemplate.queryForObject( - "SELECT COUNT(1) FROM alloc WHERE pk_alloc=? AND b_enabled = true", Integer.class, - alloc.getAllocationId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateAllocationName() { - allocDao.updateAllocationName(alloc, "frickjack"); - assertEquals("spi.frickjack", jdbcTemplate.queryForObject( - "SELECT str_name FROM alloc WHERE pk_alloc=?", String.class, alloc.getId())); - } - - @Test(expected = IllegalArgumentException.class) - @Transactional - @Rollback(true) - public void testUpdateAllocationNameBad() { - allocDao.updateAllocationName(alloc, "spi.frickjack"); - assertEquals("spi.frickjack", jdbcTemplate.queryForObject( - "SELECT str_name FROM alloc WHERE pk_alloc=?", String.class, alloc.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateAllocationTag() { - allocDao.updateAllocationTag(alloc, "foo"); - assertEquals("foo", jdbcTemplate.queryForObject("SELECT str_tag FROM alloc WHERE pk_alloc=?", - String.class, alloc.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateAllocationBillable() { - allocDao.updateAllocationBillable(alloc, false); - - assertFalse(jdbcTemplate.queryForObject("SELECT b_billable FROM alloc WHERE pk_alloc=?", - Boolean.class, alloc.getId())); - - allocDao.updateAllocationBillable(alloc, true); - - assertTrue(jdbcTemplate.queryForObject("SELECT b_billable FROM alloc WHERE pk_alloc=?", - Boolean.class, alloc.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testSetDefaultAllocation() { - AllocationEntity newAlloc = new AllocationEntity(); - newAlloc.name = "spi.new_alloc"; - newAlloc.tag = "new_alloc"; - allocDao.insertAllocation(facilityDao.getFacility("spi"), newAlloc); - - allocDao.setDefaultAllocation(newAlloc); - AllocationEntity defaultAlloc = allocDao.getDefaultAllocationEntity(); - assertEquals(newAlloc.getAllocationId(), defaultAlloc.getAllocationId()); - assertEquals(newAlloc.name, defaultAlloc.name); - assertEquals(newAlloc.tag, defaultAlloc.tag); - assertEquals(facilityDao.getFacility("spi").getFacilityId(), defaultAlloc.getFacilityId()); - } + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + AllocationDao allocDao; + + @Resource + FacilityDao facilityDao; + + @Resource + AdminManager adminManager; + + public static final String ALLOC_FQN = "spi.test_alloc"; + public static final String ALLOC_NAME = "test_alloc"; + public static final String ALLOC_TAG = "test"; + + private AllocationEntity alloc; + + @Before + public void before() { + + alloc = new AllocationEntity(); + alloc.name = ALLOC_NAME; + alloc.tag = ALLOC_TAG; + + allocDao.insertAllocation(facilityDao.getFacility("spi"), alloc); + } + + @Test + @Transactional + @Rollback(true) + public void testGetAllocation() { + allocDao.getAllocationEntity(alloc.getId()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindAllocation() { + FacilityInterface f = facilityDao.getFacility("spi"); + allocDao.findAllocationEntity(f.getName(), ALLOC_NAME); + } + + @Test + @Transactional + @Rollback(true) + public void testFindAllocation2() { + FacilityInterface f = facilityDao.getFacility("spi"); + allocDao.findAllocationEntity(ALLOC_FQN); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteAllocation() { + allocDao.deleteAllocation(alloc); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteAllocationWithProc() { + + // Use the alloc so deleting triggers it just to be disaled. + ShowEntity show = adminManager.getShowEntity("00000000-0000-0000-0000-000000000000"); + adminManager.createSubscription(show, alloc, 10, 10); + allocDao.deleteAllocation(alloc); + + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject( + "SELECT COUNT(1) FROM alloc WHERE pk_alloc=? AND b_enabled = false", + Integer.class, alloc.getAllocationId())); + + assertEquals(ALLOC_FQN, + jdbcTemplate.queryForObject( + "SELECT str_name FROM alloc WHERE pk_alloc=? AND b_enabled = false", + String.class, alloc.getAllocationId())); + + // Now re-enable it. + allocDao.insertAllocation(facilityDao.getDefaultFacility(), alloc); + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject( + "SELECT COUNT(1) FROM alloc WHERE pk_alloc=? AND b_enabled = true", + Integer.class, alloc.getAllocationId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateAllocationName() { + allocDao.updateAllocationName(alloc, "frickjack"); + assertEquals("spi.frickjack", jdbcTemplate.queryForObject( + "SELECT str_name FROM alloc WHERE pk_alloc=?", String.class, alloc.getId())); + } + + @Test(expected = IllegalArgumentException.class) + @Transactional + @Rollback(true) + public void testUpdateAllocationNameBad() { + allocDao.updateAllocationName(alloc, "spi.frickjack"); + assertEquals("spi.frickjack", jdbcTemplate.queryForObject( + "SELECT str_name FROM alloc WHERE pk_alloc=?", String.class, alloc.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateAllocationTag() { + allocDao.updateAllocationTag(alloc, "foo"); + assertEquals("foo", jdbcTemplate.queryForObject( + "SELECT str_tag FROM alloc WHERE pk_alloc=?", String.class, alloc.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateAllocationBillable() { + allocDao.updateAllocationBillable(alloc, false); + + assertFalse(jdbcTemplate.queryForObject("SELECT b_billable FROM alloc WHERE pk_alloc=?", + Boolean.class, alloc.getId())); + + allocDao.updateAllocationBillable(alloc, true); + + assertTrue(jdbcTemplate.queryForObject("SELECT b_billable FROM alloc WHERE pk_alloc=?", + Boolean.class, alloc.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void testSetDefaultAllocation() { + AllocationEntity newAlloc = new AllocationEntity(); + newAlloc.name = "spi.new_alloc"; + newAlloc.tag = "new_alloc"; + allocDao.insertAllocation(facilityDao.getFacility("spi"), newAlloc); + + allocDao.setDefaultAllocation(newAlloc); + AllocationEntity defaultAlloc = allocDao.getDefaultAllocationEntity(); + assertEquals(newAlloc.getAllocationId(), defaultAlloc.getAllocationId()); + assertEquals(newAlloc.name, defaultAlloc.name); + assertEquals(newAlloc.tag, defaultAlloc.tag); + assertEquals(facilityDao.getFacility("spi").getFacilityId(), defaultAlloc.getFacilityId()); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/BookingDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/BookingDaoTests.java index fe173ba3d..bf5ffbf9c 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/BookingDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/BookingDaoTests.java @@ -56,353 +56,369 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class BookingDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + HostManager hostManager; - @Resource - HostManager hostManager; + @Resource + AdminManager adminManager; + + @Resource + JobLauncher jobLauncher; - @Resource - AdminManager adminManager; + @Resource + JobManager jobManager; + + @Resource + HostDao hostDao; - @Resource - JobLauncher jobLauncher; + @Resource + BookingDao bookingDao; - @Resource - JobManager jobManager; + @Resource + DispatcherDao dispatcherDao; - @Resource - HostDao hostDao; + @Resource + ProcDao procDao; - @Resource - BookingDao bookingDao; + @Resource + Whiteboard whiteboard; - @Resource - DispatcherDao dispatcherDao; + public DispatchHost createHost() { + RenderHost host = RenderHost.newBuilder().setName("test_host").setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16) + .setTotalSwap((int) CueUtil.GB16).setNimbyEnabled(false).setNumProcs(2) + .setCoresPerProc(100).setState(HardwareState.UP).setFacility("spi") + .addTags("general").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); + DispatchHost dh = hostManager.createHost(host); + hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); - @Resource - ProcDao procDao; + return dh; + } - @Resource - Whiteboard whiteboard; + public JobDetail launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail d = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + jobManager.setJobPaused(d, false); + return d; + } - public DispatchHost createHost() { - RenderHost host = RenderHost.newBuilder().setName("test_host").setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16).setTotalSwap((int) CueUtil.GB16) - .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(100).setState(HardwareState.UP) - .setFacility("spi").addTags("general").setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512).build(); - DispatchHost dh = hostManager.createHost(host); - hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); + @Test + @Transactional + @Rollback(true) + public void insertLocalJobAssignment() { - return dh; - } + DispatchHost h = createHost(); + JobDetail j = launchJob(); - public JobDetail launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail d = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - jobManager.setJobPaused(d, false); - return d; - } + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setMaxGpuMemory(1); + lja.setThreads(2); + + bookingDao.insertLocalHostAssignment(h, j, lja); - @Test - @Transactional - @Rollback(true) - public void insertLocalJobAssignment() { + assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( + "SELECT int_threads FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); - DispatchHost h = createHost(); - JobDetail j = launchJob(); + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject("SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", + Integer.class, j.getJobId())); - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setMaxGpuMemory(1); - lja.setThreads(2); + assertEquals(Integer.valueOf(200), + jdbcTemplate.queryForObject("SELECT int_cores_max FROM host_local WHERE pk_job=?", + Integer.class, j.getJobId())); - bookingDao.insertLocalHostAssignment(h, j, lja); + assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( + "SELECT int_mem_max FROM host_local WHERE pk_job=?", Long.class, j.getJobId())); - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT int_threads FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject("SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", + Integer.class, j.getJobId())); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); + assertEquals(Integer.valueOf(200), + jdbcTemplate.queryForObject("SELECT int_cores_idle FROM host_local WHERE pk_job=?", + Integer.class, j.getJobId())); - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_cores_max FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); + assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( + "SELECT int_mem_idle FROM host_local WHERE pk_job=?", Long.class, j.getJobId())); + } - assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( - "SELECT int_mem_max FROM host_local WHERE pk_job=?", Long.class, j.getJobId())); + @Test + @Transactional + @Rollback(true) + public void insertLocalLayerAssignment() { - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); + DispatchHost h = createHost(); + JobDetail j = launchJob(); + LayerInterface layer = jobManager.getLayers(j).get(0); - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_cores_idle FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setMaxGpuMemory(1); + lja.setThreads(2); - assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( - "SELECT int_mem_idle FROM host_local WHERE pk_job=?", Long.class, j.getJobId())); - } + bookingDao.insertLocalHostAssignment(h, layer, lja); - @Test - @Transactional - @Rollback(true) - public void insertLocalLayerAssignment() { + assertEquals(layer.getLayerId(), + jdbcTemplate.queryForObject("SELECT pk_layer FROM host_local WHERE pk_host_local=?", + String.class, lja.getId())); - DispatchHost h = createHost(); - JobDetail j = launchJob(); - LayerInterface layer = jobManager.getLayers(j).get(0); + assertEquals(RenderPartitionType.LAYER_PARTITION.toString(), + jdbcTemplate.queryForObject("SELECT str_type FROM host_local WHERE pk_host_local=?", + String.class, lja.getId())); - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setMaxGpuMemory(1); - lja.setThreads(2); + assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( + "SELECT int_threads FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); - bookingDao.insertLocalHostAssignment(h, layer, lja); + assertEquals(Integer.valueOf(200), + jdbcTemplate.queryForObject("SELECT int_cores_max FROM host_local WHERE pk_job=?", + Integer.class, j.getJobId())); + + assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( + "SELECT int_mem_max FROM host_local WHERE pk_job=?", Long.class, j.getJobId())); + + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject("SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", + Integer.class, j.getJobId())); - assertEquals(layer.getLayerId(), jdbcTemplate.queryForObject( - "SELECT pk_layer FROM host_local WHERE pk_host_local=?", String.class, lja.getId())); + assertEquals(Integer.valueOf(200), + jdbcTemplate.queryForObject("SELECT int_cores_idle FROM host_local WHERE pk_job=?", + Integer.class, j.getJobId())); - assertEquals(RenderPartitionType.LAYER_PARTITION.toString(), jdbcTemplate.queryForObject( - "SELECT str_type FROM host_local WHERE pk_host_local=?", String.class, lja.getId())); + assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( + "SELECT int_mem_idle FROM host_local WHERE pk_job=?", Long.class, j.getJobId())); + } - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT int_threads FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); + @Test + @Transactional + @Rollback(true) + public void insertLocalFrameAssignment() { - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_cores_max FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); + DispatchHost h = createHost(); + JobDetail j = launchJob(); + LayerInterface layer = jobManager.getLayers(j).get(0); + FrameInterface frame = jobManager.findFrame(layer, 1); - assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( - "SELECT int_mem_max FROM host_local WHERE pk_job=?", Long.class, j.getJobId())); + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setMaxGpuMemory(1); + lja.setThreads(2); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); + bookingDao.insertLocalHostAssignment(h, frame, lja); - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_cores_idle FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); + assertEquals(frame.getFrameId(), + jdbcTemplate.queryForObject("SELECT pk_frame FROM host_local WHERE pk_host_local=?", + String.class, lja.getId())); - assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( - "SELECT int_mem_idle FROM host_local WHERE pk_job=?", Long.class, j.getJobId())); - } + assertEquals(RenderPartitionType.FRAME_PARTITION.toString(), + jdbcTemplate.queryForObject("SELECT str_type FROM host_local WHERE pk_host_local=?", + String.class, lja.getId())); - @Test - @Transactional - @Rollback(true) - public void insertLocalFrameAssignment() { + assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( + "SELECT int_threads FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); - DispatchHost h = createHost(); - JobDetail j = launchJob(); - LayerInterface layer = jobManager.getLayers(j).get(0); - FrameInterface frame = jobManager.findFrame(layer, 1); + assertEquals(Integer.valueOf(200), + jdbcTemplate.queryForObject("SELECT int_cores_max FROM host_local WHERE pk_job=?", + Integer.class, j.getJobId())); - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setMaxGpuMemory(1); - lja.setThreads(2); + assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( + "SELECT int_mem_max FROM host_local WHERE pk_job=?", Long.class, j.getJobId())); - bookingDao.insertLocalHostAssignment(h, frame, lja); + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject("SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", + Integer.class, j.getJobId())); - assertEquals(frame.getFrameId(), jdbcTemplate.queryForObject( - "SELECT pk_frame FROM host_local WHERE pk_host_local=?", String.class, lja.getId())); + assertEquals(Integer.valueOf(200), + jdbcTemplate.queryForObject("SELECT int_cores_idle FROM host_local WHERE pk_job=?", + Integer.class, j.getJobId())); - assertEquals(RenderPartitionType.FRAME_PARTITION.toString(), jdbcTemplate.queryForObject( - "SELECT str_type FROM host_local WHERE pk_host_local=?", String.class, lja.getId())); + assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( + "SELECT int_mem_idle FROM host_local WHERE pk_job=?", Long.class, j.getJobId())); + } - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT int_threads FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); + @Test + @Transactional + @Rollback(true) + public void testGetLocalJobAssignment() { - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_cores_max FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); + DispatchHost h = createHost(); + JobDetail j = launchJob(); - assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( - "SELECT int_mem_max FROM host_local WHERE pk_job=?", Long.class, j.getJobId())); + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + lja.setMaxGpuMemory(1); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); + bookingDao.insertLocalHostAssignment(h, j, lja); - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_cores_idle FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); + LocalHostAssignment lja2 = bookingDao.getLocalJobAssignment(h.getHostId(), j.getJobId()); - assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( - "SELECT int_mem_idle FROM host_local WHERE pk_job=?", Long.class, j.getJobId())); - } + assertEquals(lja.getMaxCoreUnits(), lja2.getMaxCoreUnits()); + assertEquals(lja.getMaxMemory(), lja2.getMaxMemory()); + assertEquals(lja.getMaxGpuMemory(), lja2.getMaxGpuMemory()); + assertEquals(lja.getThreads(), lja2.getThreads()); - @Test - @Transactional - @Rollback(true) - public void testGetLocalJobAssignment() { + } - DispatchHost h = createHost(); - JobDetail j = launchJob(); + @Test + @Transactional + @Rollback(true) + public void testGetRenderPartition() { - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - lja.setMaxGpuMemory(1); + DispatchHost h = createHost(); + JobDetail j = launchJob(); - bookingDao.insertLocalHostAssignment(h, j, lja); + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + lja.setMaxGpuMemory(1); - LocalHostAssignment lja2 = bookingDao.getLocalJobAssignment(h.getHostId(), j.getJobId()); + bookingDao.insertLocalHostAssignment(h, j, lja); - assertEquals(lja.getMaxCoreUnits(), lja2.getMaxCoreUnits()); - assertEquals(lja.getMaxMemory(), lja2.getMaxMemory()); - assertEquals(lja.getMaxGpuMemory(), lja2.getMaxGpuMemory()); - assertEquals(lja.getThreads(), lja2.getThreads()); + LocalHostAssignment lja2 = bookingDao.getLocalJobAssignment(h.getHostId(), j.getJobId()); - } + assertEquals(lja.getMaxCoreUnits(), lja2.getMaxCoreUnits()); + assertEquals(lja.getMaxMemory(), lja2.getMaxMemory()); + assertEquals(lja.getThreads(), lja2.getThreads()); + assertEquals(lja.getMaxGpuMemory(), lja2.getMaxGpuMemory()); - @Test - @Transactional - @Rollback(true) - public void testGetRenderPartition() { + RenderPartition rp = whiteboard.getRenderPartition(lja2); - DispatchHost h = createHost(); - JobDetail j = launchJob(); + assertEquals(lja2.getMaxCoreUnits(), rp.getMaxCores()); + assertEquals(lja2.getMaxMemory(), rp.getMaxMemory()); + assertEquals(lja2.getThreads(), rp.getThreads()); + logger.info("--------------------"); + logger.info(lja2.getMaxGpuMemory()); + logger.info(rp.getMaxGpuMemory()); + assertEquals(lja2.getMaxGpuMemory(), rp.getMaxGpuMemory()); + assertEquals(h.getName(), rp.getHost()); + assertEquals(j.getName(), rp.getJob()); + } - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - lja.setMaxGpuMemory(1); + @Test + @Transactional + @Rollback(true) + public void testGetProcs() { - bookingDao.insertLocalHostAssignment(h, j, lja); + DispatchHost h = createHost(); + JobDetail j = launchJob(); - LocalHostAssignment lja2 = bookingDao.getLocalJobAssignment(h.getHostId(), j.getJobId()); + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + lja.setMaxGpuMemory(1); - assertEquals(lja.getMaxCoreUnits(), lja2.getMaxCoreUnits()); - assertEquals(lja.getMaxMemory(), lja2.getMaxMemory()); - assertEquals(lja.getThreads(), lja2.getThreads()); - assertEquals(lja.getMaxGpuMemory(), lja2.getMaxGpuMemory()); + bookingDao.insertLocalHostAssignment(h, j, lja); - RenderPartition rp = whiteboard.getRenderPartition(lja2); + assertEquals(0, procDao.findVirtualProcs(lja).size()); + } - assertEquals(lja2.getMaxCoreUnits(), rp.getMaxCores()); - assertEquals(lja2.getMaxMemory(), rp.getMaxMemory()); - assertEquals(lja2.getThreads(), rp.getThreads()); - logger.info("--------------------"); - logger.info(lja2.getMaxGpuMemory()); - logger.info(rp.getMaxGpuMemory()); - assertEquals(lja2.getMaxGpuMemory(), rp.getMaxGpuMemory()); - assertEquals(h.getName(), rp.getHost()); - assertEquals(j.getName(), rp.getJob()); - } + @Test + @Transactional + @Rollback(true) + public void updateMaxCores() { - @Test - @Transactional - @Rollback(true) - public void testGetProcs() { + DispatchHost h = createHost(); + JobDetail j = launchJob(); - DispatchHost h = createHost(); - JobDetail j = launchJob(); + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + lja.setMaxGpuMemory(1); - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - lja.setMaxGpuMemory(1); + bookingDao.insertLocalHostAssignment(h, j, lja); + assertTrue(bookingDao.updateMaxCores(lja, 100)); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject("SELECT int_cores_max FROM host_local WHERE pk_host=?", + Integer.class, h.getHostId())); - bookingDao.insertLocalHostAssignment(h, j, lja); + LocalHostAssignment lj2 = bookingDao.getLocalJobAssignment(lja.id); - assertEquals(0, procDao.findVirtualProcs(lja).size()); - } + assertEquals(100, lj2.getIdleCoreUnits()); + assertEquals(100, lj2.getMaxCoreUnits()); - @Test - @Transactional - @Rollback(true) - public void updateMaxCores() { + bookingDao.updateMaxCores(lja, 200); - DispatchHost h = createHost(); - JobDetail j = launchJob(); + lj2 = bookingDao.getLocalJobAssignment(lja.id); - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - lja.setMaxGpuMemory(1); + assertEquals(200, lj2.getIdleCoreUnits()); + assertEquals(200, lj2.getMaxCoreUnits()); + } - bookingDao.insertLocalHostAssignment(h, j, lja); - assertTrue(bookingDao.updateMaxCores(lja, 100)); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_cores_max FROM host_local WHERE pk_host=?", Integer.class, h.getHostId())); + @Test + @Transactional + @Rollback(true) + public void updateMaxMemory() { - LocalHostAssignment lj2 = bookingDao.getLocalJobAssignment(lja.id); + DispatchHost h = createHost(); + JobDetail j = launchJob(); - assertEquals(100, lj2.getIdleCoreUnits()); - assertEquals(100, lj2.getMaxCoreUnits()); + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + lja.setMaxGpuMemory(1); - bookingDao.updateMaxCores(lja, 200); + bookingDao.insertLocalHostAssignment(h, j, lja); + bookingDao.updateMaxMemory(lja, CueUtil.GB2); - lj2 = bookingDao.getLocalJobAssignment(lja.id); + LocalHostAssignment lj2 = bookingDao.getLocalJobAssignment(lja.id); - assertEquals(200, lj2.getIdleCoreUnits()); - assertEquals(200, lj2.getMaxCoreUnits()); - } + assertEquals(CueUtil.GB2, lj2.getIdleMemory()); + assertEquals(CueUtil.GB2, lj2.getMaxMemory()); - @Test - @Transactional - @Rollback(true) - public void updateMaxMemory() { + bookingDao.updateMaxMemory(lja, CueUtil.GB4); - DispatchHost h = createHost(); - JobDetail j = launchJob(); + lj2 = bookingDao.getLocalJobAssignment(lja.id); - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - lja.setMaxGpuMemory(1); + assertEquals(CueUtil.GB4, lj2.getIdleMemory()); + assertEquals(CueUtil.GB4, lj2.getMaxMemory()); + } - bookingDao.insertLocalHostAssignment(h, j, lja); - bookingDao.updateMaxMemory(lja, CueUtil.GB2); + @Test + @Transactional + @Rollback(true) + public void updateMaxGpuMemory() { - LocalHostAssignment lj2 = bookingDao.getLocalJobAssignment(lja.id); + DispatchHost h = createHost(); + JobDetail j = launchJob(); - assertEquals(CueUtil.GB2, lj2.getIdleMemory()); - assertEquals(CueUtil.GB2, lj2.getMaxMemory()); + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + lja.setMaxGpuMemory(1); - bookingDao.updateMaxMemory(lja, CueUtil.GB4); + bookingDao.insertLocalHostAssignment(h, j, lja); + bookingDao.updateMaxMemory(lja, CueUtil.GB2); - lj2 = bookingDao.getLocalJobAssignment(lja.id); + LocalHostAssignment lj2 = bookingDao.getLocalJobAssignment(lja.id); - assertEquals(CueUtil.GB4, lj2.getIdleMemory()); - assertEquals(CueUtil.GB4, lj2.getMaxMemory()); - } + assertEquals(CueUtil.GB2, lj2.getIdleMemory()); + assertEquals(CueUtil.GB2, lj2.getMaxMemory()); + assertEquals(1, lj2.getMaxGpuMemory()); - @Test - @Transactional - @Rollback(true) - public void updateMaxGpuMemory() { + bookingDao.updateMaxGpuMemory(lja, 2); - DispatchHost h = createHost(); - JobDetail j = launchJob(); + lj2 = bookingDao.getLocalJobAssignment(lja.id); - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - lja.setMaxGpuMemory(1); - - bookingDao.insertLocalHostAssignment(h, j, lja); - bookingDao.updateMaxMemory(lja, CueUtil.GB2); - - LocalHostAssignment lj2 = bookingDao.getLocalJobAssignment(lja.id); - - assertEquals(CueUtil.GB2, lj2.getIdleMemory()); - assertEquals(CueUtil.GB2, lj2.getMaxMemory()); - assertEquals(1, lj2.getMaxGpuMemory()); - - bookingDao.updateMaxGpuMemory(lja, 2); - - lj2 = bookingDao.getLocalJobAssignment(lja.id); - - assertEquals(CueUtil.GB2, lj2.getIdleMemory()); - assertEquals(CueUtil.GB2, lj2.getMaxMemory()); - assertEquals(2, lj2.getMaxGpuMemory()); - } + assertEquals(CueUtil.GB2, lj2.getIdleMemory()); + assertEquals(CueUtil.GB2, lj2.getMaxMemory()); + assertEquals(2, lj2.getMaxGpuMemory()); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/CommentDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/CommentDaoTests.java index 5bdfd6544..7f2d7ef2d 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/CommentDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/CommentDaoTests.java @@ -48,174 +48,174 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class CommentDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; - @Resource - CommentDao commentDao; + @Resource + CommentDao commentDao; - @Resource - JobManager jobManager; + @Resource + JobManager jobManager; - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - HostManager hostManager; + @Resource + HostManager hostManager; - @Before - public void testMode() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - } + @Before + public void testMode() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + } - @Test - @Transactional - @Rollback(true) - public void testDeleteComment() { + @Test + @Transactional + @Rollback(true) + public void testDeleteComment() { - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; + CommentDetail d = new CommentDetail(); + d.message = "a message"; + d.subject = "a subject"; + d.user = "user"; - commentDao.insertComment(job, d); - commentDao.deleteComment(d.getId()); - } + commentDao.insertComment(job, d); + commentDao.deleteComment(d.getId()); + } - @Test - @Transactional - @Rollback(true) - public void testGetComment() { + @Test + @Transactional + @Rollback(true) + public void testGetComment() { - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; + CommentDetail d = new CommentDetail(); + d.message = "a message"; + d.subject = "a subject"; + d.user = "user"; - commentDao.insertComment(job, d); + commentDao.insertComment(job, d); - CommentDetail nd = commentDao.getCommentDetail(d.getId()); + CommentDetail nd = commentDao.getCommentDetail(d.getId()); - assertEquals(d.message, nd.message); - assertEquals(d.subject, nd.subject); - assertEquals(d.user, nd.user); - } + assertEquals(d.message, nd.message); + assertEquals(d.subject, nd.subject); + assertEquals(d.user, nd.user); + } - @Test - @Transactional - @Rollback(true) - public void testInsertCommentOnJob() { + @Test + @Transactional + @Rollback(true) + public void testInsertCommentOnJob() { - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; + CommentDetail d = new CommentDetail(); + d.message = "a message"; + d.subject = "a subject"; + d.user = "user"; - commentDao.insertComment(job, d); + commentDao.insertComment(job, d); - CommentDetail nd = commentDao.getCommentDetail(d.getId()); + CommentDetail nd = commentDao.getCommentDetail(d.getId()); - assertEquals(d.message, nd.message); - assertEquals(d.subject, nd.subject); - assertEquals(d.user, nd.user); - } + assertEquals(d.message, nd.message); + assertEquals(d.subject, nd.subject); + assertEquals(d.user, nd.user); + } - @Test - @Transactional - @Rollback(true) - public void testInsertCommentOnHost() { + @Test + @Transactional + @Rollback(true) + public void testInsertCommentOnHost() { - RenderHost host = RenderHost.newBuilder().setName("boo").setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(15290520).setFreeSwap(2076).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem(15290520).setTotalSwap(2096).setNimbyEnabled(false) - .setNumProcs(2).setCoresPerProc(400).addTags("linux").setState(HardwareState.UP) - .setFacility("spi").setFreeGpuMem((int) CueUtil.MB512).setTotalGpuMem((int) CueUtil.MB512) - .build(); + RenderHost host = RenderHost.newBuilder().setName("boo").setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(15290520).setFreeSwap(2076).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem(15290520).setTotalSwap(2096) + .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(400).addTags("linux") + .setState(HardwareState.UP).setFacility("spi").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; + CommentDetail d = new CommentDetail(); + d.message = "a message"; + d.subject = "a subject"; + d.user = "user"; - DispatchHost h = hostManager.createHost(host); - commentDao.insertComment(h, d); + DispatchHost h = hostManager.createHost(host); + commentDao.insertComment(h, d); - assertNotNull(d.id); + assertNotNull(d.id); - CommentDetail nd = commentDao.getCommentDetail(d.getId()); + CommentDetail nd = commentDao.getCommentDetail(d.getId()); - assertEquals(d.message, nd.message); - assertEquals(d.subject, nd.subject); - assertEquals(d.user, nd.user); - } + assertEquals(d.message, nd.message); + assertEquals(d.subject, nd.subject); + assertEquals(d.user, nd.user); + } - @Test - @Transactional - @Rollback(true) - public void testUpdateComment() { + @Test + @Transactional + @Rollback(true) + public void testUpdateComment() { - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; + CommentDetail d = new CommentDetail(); + d.message = "a message"; + d.subject = "a subject"; + d.user = "user"; - commentDao.insertComment(job, d); + commentDao.insertComment(job, d); - d.message = "no"; - d.subject = "no"; + d.message = "no"; + d.subject = "no"; - commentDao.updateComment(d); + commentDao.updateComment(d); - CommentDetail nd = commentDao.getCommentDetail(d.getId()); + CommentDetail nd = commentDao.getCommentDetail(d.getId()); - assertEquals("no", nd.message); - assertEquals("no", nd.subject); - } + assertEquals("no", nd.message); + assertEquals("no", nd.subject); + } - @Test - @Transactional - @Rollback(true) - public void testUpdateCommentMessage() { + @Test + @Transactional + @Rollback(true) + public void testUpdateCommentMessage() { - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; + CommentDetail d = new CommentDetail(); + d.message = "a message"; + d.subject = "a subject"; + d.user = "user"; - commentDao.insertComment(job, d); - commentDao.updateCommentMessage(d.getId(), "no"); - CommentDetail nd = commentDao.getCommentDetail(d.getId()); - assertEquals("no", nd.message); - } + commentDao.insertComment(job, d); + commentDao.updateCommentMessage(d.getId(), "no"); + CommentDetail nd = commentDao.getCommentDetail(d.getId()); + assertEquals("no", nd.message); + } - @Test - @Transactional - @Rollback(true) - public void testUpdateCommentSubject() { + @Test + @Transactional + @Rollback(true) + public void testUpdateCommentSubject() { - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; + CommentDetail d = new CommentDetail(); + d.message = "a message"; + d.subject = "a subject"; + d.user = "user"; - commentDao.insertComment(job, d); - commentDao.updateCommentSubject(d.getId(), "no"); - CommentDetail nd = commentDao.getCommentDetail(d.getId()); - assertEquals("no", nd.subject); - } + commentDao.insertComment(job, d); + commentDao.updateCommentSubject(d.getId(), "no"); + CommentDetail nd = commentDao.getCommentDetail(d.getId()); + assertEquals("no", nd.subject); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DeedDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DeedDaoTests.java index 2ea1d215d..6d9b0f775 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DeedDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DeedDaoTests.java @@ -48,101 +48,102 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class DeedDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; - @Resource - OwnerManager ownerManager; + @Resource + OwnerManager ownerManager; - @Resource - DeedDao deedDao; + @Resource + DeedDao deedDao; - @Resource - AdminManager adminManager; + @Resource + AdminManager adminManager; - @Resource - HostManager hostManager; + @Resource + HostManager hostManager; - public DispatchHost createHost() { + public DispatchHost createHost() { - RenderHost host = RenderHost.newBuilder().setName("test_host").setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(15290520).setFreeSwap(2076).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16).setTotalSwap((int) CueUtil.GB16) - .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(100).addTags("general") - .setState(HardwareState.UP).setFacility("spi").setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512).build(); + RenderHost host = RenderHost.newBuilder().setName("test_host").setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(15290520).setFreeSwap(2076).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16) + .setTotalSwap((int) CueUtil.GB16).setNimbyEnabled(false).setNumProcs(2) + .setCoresPerProc(100).addTags("general").setState(HardwareState.UP) + .setFacility("spi").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); - DispatchHost dh = hostManager.createHost(host); - hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); + DispatchHost dh = hostManager.createHost(host); + hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); - return dh; - } + return dh; + } - @Test - @Transactional - @Rollback(true) - public void testInsertDeed() { + @Test + @Transactional + @Rollback(true) + public void testInsertDeed() { - DispatchHost host = createHost(); - ShowInterface s = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("squarepants", s); - DeedEntity d = deedDao.insertDeed(o, host); + DispatchHost host = createHost(); + ShowInterface s = adminManager.findShowEntity("pipe"); + OwnerEntity o = ownerManager.createOwner("squarepants", s); + DeedEntity d = deedDao.insertDeed(o, host); - assertEquals(Integer.valueOf(1), jdbcTemplate - .queryForObject("SELECT COUNT(1) FROM deed WHERE pk_deed=?", Integer.class, d.getId())); + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT COUNT(1) FROM deed WHERE pk_deed=?", Integer.class, d.getId())); - assertEquals(host.getName(), d.host); - } + assertEquals(host.getName(), d.host); + } - @Test - @Transactional - @Rollback(true) - public void tesDeleteDeed() { + @Test + @Transactional + @Rollback(true) + public void tesDeleteDeed() { - DispatchHost host = createHost(); - ShowInterface s = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("squarepants", s); - DeedEntity d = deedDao.insertDeed(o, host); + DispatchHost host = createHost(); + ShowInterface s = adminManager.findShowEntity("pipe"); + OwnerEntity o = ownerManager.createOwner("squarepants", s); + DeedEntity d = deedDao.insertDeed(o, host); - assertEquals(Integer.valueOf(1), jdbcTemplate - .queryForObject("SELECT COUNT(1) FROM deed WHERE pk_deed=?", Integer.class, d.getId())); + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT COUNT(1) FROM deed WHERE pk_deed=?", Integer.class, d.getId())); - assertTrue(deedDao.deleteDeed(d)); + assertTrue(deedDao.deleteDeed(d)); - assertEquals(Integer.valueOf(0), jdbcTemplate - .queryForObject("SELECT COUNT(1) FROM deed WHERE pk_deed=?", Integer.class, d.getId())); + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT COUNT(1) FROM deed WHERE pk_deed=?", Integer.class, d.getId())); - assertFalse(deedDao.deleteDeed(d)); - } + assertFalse(deedDao.deleteDeed(d)); + } - @Test - @Transactional - @Rollback(true) - public void tesGetDeed() { + @Test + @Transactional + @Rollback(true) + public void tesGetDeed() { - DispatchHost host = createHost(); - ShowInterface s = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("squarepants", s); - DeedEntity d = deedDao.insertDeed(o, host); + DispatchHost host = createHost(); + ShowInterface s = adminManager.findShowEntity("pipe"); + OwnerEntity o = ownerManager.createOwner("squarepants", s); + DeedEntity d = deedDao.insertDeed(o, host); - DeedEntity d2 = deedDao.getDeed(d.id); + DeedEntity d2 = deedDao.getDeed(d.id); - assertEquals(d, d2); - } + assertEquals(d, d2); + } - @Test - @Transactional - @Rollback(true) - public void tesGetDeeds() { + @Test + @Transactional + @Rollback(true) + public void tesGetDeeds() { - DispatchHost host = createHost(); - ShowInterface s = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("squarepants", s); - DeedEntity d = deedDao.insertDeed(o, host); + DispatchHost host = createHost(); + ShowInterface s = adminManager.findShowEntity("pipe"); + OwnerEntity o = ownerManager.createOwner("squarepants", s); + DeedEntity d = deedDao.insertDeed(o, host); - assertEquals(1, deedDao.getDeeds(o).size()); - assertEquals(d, deedDao.getDeeds(o).get(0)); - } + assertEquals(1, deedDao.getDeeds(o).size()); + assertEquals(d, deedDao.getDeeds(o).get(0)); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DepartmentDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DepartmentDaoTests.java index b11af87dd..20e1ed9ca 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DepartmentDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DepartmentDaoTests.java @@ -39,68 +39,67 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class DepartmentDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - DepartmentDao departmentDao; - - @Resource - AdminManager adminManager; - - @Test - @Transactional - @Rollback(true) - public void testGetDepartment() { - String dept = "AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"; - assertEquals(dept, departmentDao.getDepartment(dept).getId()); - assertEquals(dept, departmentDao.getDepartment(dept).getDepartmentId()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindDepartment() { - String dept = "Hair"; - assertEquals(dept, departmentDao.findDepartment(dept).getName()); - } - - @Test - @Transactional - @Rollback(true) - public void testgetDefaultDepartment() { - assertEquals( - jdbcTemplate.queryForObject("SELECT pk_dept FROM dept WHERE b_default=true", String.class), - departmentDao.getDefaultDepartment().getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testDepartmentExists() { - String dept = "Cloth"; - assertTrue(departmentDao.departmentExists(dept)); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertDepartment() { - String deptName = "TestDept"; - departmentDao.insertDepartment(deptName); - DepartmentInterface d = departmentDao.findDepartment(deptName); - assertEquals(d.getName(), deptName); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteDepartment() { - String deptName = "TestDept"; - departmentDao.insertDepartment(deptName); - DepartmentInterface d = departmentDao.findDepartment(deptName); - assertEquals(d.getName(), deptName); - departmentDao.deleteDepartment(d); - } + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + DepartmentDao departmentDao; + + @Resource + AdminManager adminManager; + + @Test + @Transactional + @Rollback(true) + public void testGetDepartment() { + String dept = "AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"; + assertEquals(dept, departmentDao.getDepartment(dept).getId()); + assertEquals(dept, departmentDao.getDepartment(dept).getDepartmentId()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindDepartment() { + String dept = "Hair"; + assertEquals(dept, departmentDao.findDepartment(dept).getName()); + } + + @Test + @Transactional + @Rollback(true) + public void testgetDefaultDepartment() { + assertEquals(jdbcTemplate.queryForObject("SELECT pk_dept FROM dept WHERE b_default=true", + String.class), departmentDao.getDefaultDepartment().getId()); + } + + @Test + @Transactional + @Rollback(true) + public void testDepartmentExists() { + String dept = "Cloth"; + assertTrue(departmentDao.departmentExists(dept)); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertDepartment() { + String deptName = "TestDept"; + departmentDao.insertDepartment(deptName); + DepartmentInterface d = departmentDao.findDepartment(deptName); + assertEquals(d.getName(), deptName); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteDepartment() { + String deptName = "TestDept"; + departmentDao.insertDepartment(deptName); + DepartmentInterface d = departmentDao.findDepartment(deptName); + assertEquals(d.getName(), deptName); + departmentDao.deleteDepartment(d); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DependDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DependDaoTests.java index 698f162ce..5b8413138 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DependDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DependDaoTests.java @@ -63,387 +63,387 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class DependDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - DependDao dependDao; - - @Resource - FrameDao frameDao; - - @Resource - LayerDao layerDao; - - @Resource - JobManager jobManager; - - @Resource - DependManager dependManager; - - @Resource - JobManagerSupport jobManagerSupport; - - @Resource - JobLauncher jobLauncher; - - @Before - public void launchTestJobs() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_depend_test.xml")); - } - - public JobDetail getJobA() { - return jobManager.findJobDetail("pipe-dev.cue-testuser_depend_test_a"); - } - - public JobDetail getJobB() { - return jobManager.findJobDetail("pipe-dev.cue-testuser_depend_test_b"); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertJobOnJob() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - JobOnJob depend = new JobOnJob(job_a, job_b); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.JOB_ON_JOB, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertJobOnLayer() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - LayerInterface layer = layerDao.findLayer(job_b, "pass_1"); - JobOnLayer depend = new JobOnLayer(job_a, layer); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.JOB_ON_LAYER, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertJobOnFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); - JobOnFrame depend = new JobOnFrame(job_a, frame); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.JOB_ON_FRAME, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerOnJob() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer = layerDao.findLayer(job_b, "pass_1"); - - LayerOnJob depend = new LayerOnJob(layer, job_a); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.LAYER_ON_JOB, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerOnLayer() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - - LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.LAYER_ON_LAYER, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerOnFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer = layerDao.findLayer(job_a, "pass_1"); - FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); - - LayerOnFrame depend = new LayerOnFrame(layer, frame); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.LAYER_ON_FRAME, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertFrameOnJob() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); - - FrameOnJob depend = new FrameOnJob(frame, job_a); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.FRAME_ON_JOB, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertFrameOnLayer() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer = layerDao.findLayer(job_a, "pass_1"); - FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); - - FrameOnLayer depend = new FrameOnLayer(frame, layer); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.FRAME_ON_LAYER, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertFrameOnFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); - FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); - - FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.FRAME_ON_FRAME, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertFrameByFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - - FrameByFrame depend = new FrameByFrame(layer_a, layer_b); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.FRAME_BY_FRAME, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertPreviousFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - - PreviousFrame depend = new PreviousFrame(layer_a, layer_b); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.PREVIOUS_FRAME, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testReinsertFrameOnFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); - FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); - - FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.FRAME_ON_FRAME, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - - dependDao.setInactive(lwd); - - // Try to reinsert it now that the original is inactive. - depend = new FrameOnFrame(frame_a, frame_b); - dependDao.insertDepend(depend); - } - - @Test - @Transactional - @Rollback(true) - public void testGetWhatDependsOnJob() { - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - JobOnJob depend = new JobOnJob(job_a, job_b); - dependDao.insertDepend(depend); - - assertEquals(1, dependDao.getWhatDependsOn(job_b).size()); - assertEquals(0, dependDao.getWhatDependsOn(job_a).size()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetWhatDependsOnLayer() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - - LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); - dependDao.insertDepend(depend); + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + DependDao dependDao; + + @Resource + FrameDao frameDao; + + @Resource + LayerDao layerDao; + + @Resource + JobManager jobManager; + + @Resource + DependManager dependManager; + + @Resource + JobManagerSupport jobManagerSupport; + + @Resource + JobLauncher jobLauncher; + + @Before + public void launchTestJobs() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_depend_test.xml")); + } + + public JobDetail getJobA() { + return jobManager.findJobDetail("pipe-dev.cue-testuser_depend_test_a"); + } + + public JobDetail getJobB() { + return jobManager.findJobDetail("pipe-dev.cue-testuser_depend_test_b"); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertJobOnJob() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + + JobOnJob depend = new JobOnJob(job_a, job_b); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.JOB_ON_JOB, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertJobOnLayer() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + + LayerInterface layer = layerDao.findLayer(job_b, "pass_1"); + JobOnLayer depend = new JobOnLayer(job_a, layer); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.JOB_ON_LAYER, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertJobOnFrame() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + + FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); + JobOnFrame depend = new JobOnFrame(job_a, frame); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.JOB_ON_FRAME, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertLayerOnJob() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer = layerDao.findLayer(job_b, "pass_1"); + + LayerOnJob depend = new LayerOnJob(layer, job_a); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.LAYER_ON_JOB, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertLayerOnLayer() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + + LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.LAYER_ON_LAYER, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertLayerOnFrame() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer = layerDao.findLayer(job_a, "pass_1"); + FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); + + LayerOnFrame depend = new LayerOnFrame(layer, frame); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.LAYER_ON_FRAME, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertFrameOnJob() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); + + FrameOnJob depend = new FrameOnJob(frame, job_a); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.FRAME_ON_JOB, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertFrameOnLayer() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer = layerDao.findLayer(job_a, "pass_1"); + FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); + + FrameOnLayer depend = new FrameOnLayer(frame, layer); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.FRAME_ON_LAYER, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertFrameOnFrame() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + + FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); + FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); + + FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.FRAME_ON_FRAME, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertFrameByFrame() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + + FrameByFrame depend = new FrameByFrame(layer_a, layer_b); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.FRAME_BY_FRAME, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertPreviousFrame() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + + PreviousFrame depend = new PreviousFrame(layer_a, layer_b); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.PREVIOUS_FRAME, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + } + + @Test + @Transactional + @Rollback(true) + public void testReinsertFrameOnFrame() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + + FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); + FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); + + FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); + dependDao.insertDepend(depend); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + assertEquals(depend.getId(), lwd.getId()); + assertEquals(DependType.FRAME_ON_FRAME, lwd.type); + assertEquals(DependTarget.EXTERNAL, lwd.target); + assertTrue(lwd.active); + assertFalse(lwd.anyFrame); + + dependDao.setInactive(lwd); + + // Try to reinsert it now that the original is inactive. + depend = new FrameOnFrame(frame_a, frame_b); + dependDao.insertDepend(depend); + } + + @Test + @Transactional + @Rollback(true) + public void testGetWhatDependsOnJob() { + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + + JobOnJob depend = new JobOnJob(job_a, job_b); + dependDao.insertDepend(depend); + + assertEquals(1, dependDao.getWhatDependsOn(job_b).size()); + assertEquals(0, dependDao.getWhatDependsOn(job_a).size()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetWhatDependsOnLayer() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + + LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); + dependDao.insertDepend(depend); - assertEquals(1, dependDao.getWhatDependsOn(layer_b).size()); - assertEquals(0, dependDao.getWhatDependsOn(layer_a).size()); - } + assertEquals(1, dependDao.getWhatDependsOn(layer_b).size()); + assertEquals(0, dependDao.getWhatDependsOn(layer_a).size()); + } - @Test - @Transactional - @Rollback(true) - public void testGetWhatDependsOnLayerInactive() { + @Test + @Transactional + @Rollback(true) + public void testGetWhatDependsOnLayerInactive() { - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); - dependDao.insertDepend(depend); + LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); + dependDao.insertDepend(depend); - dependDao.setInactive(dependDao.getDepend(depend.getId())); + dependDao.setInactive(dependDao.getDepend(depend.getId())); - assertEquals(1, dependDao.getWhatDependsOn(layer_b, false).size()); - assertEquals(0, dependDao.getWhatDependsOn(layer_b, true).size()); - } + assertEquals(1, dependDao.getWhatDependsOn(layer_b, false).size()); + assertEquals(0, dependDao.getWhatDependsOn(layer_b, true).size()); + } - @Test - @Transactional - @Rollback(true) - public void testGetWhatDependsOnFrame() { + @Test + @Transactional + @Rollback(true) + public void testGetWhatDependsOnFrame() { - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); - FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); - FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); + FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); + FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); - FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); - dependDao.insertDepend(depend); + FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); + dependDao.insertDepend(depend); - assertEquals(1, dependDao.getWhatDependsOn(frame_b).size()); - assertEquals(0, dependDao.getWhatDependsOn(frame_a).size()); - } + assertEquals(1, dependDao.getWhatDependsOn(frame_b).size()); + assertEquals(0, dependDao.getWhatDependsOn(frame_a).size()); + } - @Test - @Transactional - @Rollback(true) - public void testGetWhatDependsOnFrameInactive() { + @Test + @Transactional + @Rollback(true) + public void testGetWhatDependsOnFrameInactive() { - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); - FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); - FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); + FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); + FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); - FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); - dependDao.insertDepend(depend); + FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); + dependDao.insertDepend(depend); - dependDao.setInactive(dependDao.getDepend(depend.getId())); + dependDao.setInactive(dependDao.getDepend(depend.getId())); - assertEquals(1, dependDao.getWhatDependsOn(frame_b, false).size()); - assertEquals(0, dependDao.getWhatDependsOn(frame_b, true).size()); - assertEquals(0, dependDao.getWhatDependsOn(frame_a, true).size()); - } + assertEquals(1, dependDao.getWhatDependsOn(frame_b, false).size()); + assertEquals(0, dependDao.getWhatDependsOn(frame_b, true).size()); + assertEquals(0, dependDao.getWhatDependsOn(frame_a, true).size()); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoFifoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoFifoTests.java index c77f04915..d523f0c6b 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoFifoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoFifoTests.java @@ -65,158 +65,162 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class DispatcherDaoFifoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - DispatcherDao dispatcherDao; - - @Resource - HostDao hostDao; - - @Resource - JobManager jobManager; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - GroupManager groupManager; - - @Resource - Dispatcher dispatcher; - - @Resource - JobLauncher jobLauncher; - - private static final String HOSTNAME = "beta"; - - public DispatchHost getHost() { - return hostDao.findDispatchHost(HOSTNAME); - } - - private void launchJobs(int count) throws Exception { - Document docTemplate = - new SAXBuilder(true).build(new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); - docTemplate.getDocType().setSystemID("http://localhost:8080/spcue/dtd/cjsl-1.12.dtd"); - Element root = docTemplate.getRootElement(); - Element jobTemplate = root.getChild("job"); - Element depends = root.getChild("depends"); - assertEquals(jobTemplate.getAttributeValue("name"), "test"); - root.removeContent(jobTemplate); - root.removeContent(depends); - - long t = System.currentTimeMillis(); - for (int i = 0; i < count; i++) { - Document doc = (Document) docTemplate.clone(); - root = doc.getRootElement(); - Element job = (Element) jobTemplate.clone(); - job.setAttribute("name", "job" + i); - root.addContent(job); - root.addContent((Element) depends.clone()); - jobLauncher.launch(new XMLOutputter().outputString(doc)); - - // Force to set incremental ts_started to the jobs - // because current_timestamp is not updated during test. - jdbcTemplate.update("UPDATE job SET ts_started = ? WHERE str_name = ?", new Timestamp(t + i), - "pipe-default-testuser_job" + i); + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + DispatcherDao dispatcherDao; + + @Resource + HostDao hostDao; + + @Resource + JobManager jobManager; + + @Resource + HostManager hostManager; + + @Resource + AdminManager adminManager; + + @Resource + GroupManager groupManager; + + @Resource + Dispatcher dispatcher; + + @Resource + JobLauncher jobLauncher; + + private static final String HOSTNAME = "beta"; + + public DispatchHost getHost() { + return hostDao.findDispatchHost(HOSTNAME); + } + + private void launchJobs(int count) throws Exception { + Document docTemplate = new SAXBuilder(true) + .build(new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); + docTemplate.getDocType().setSystemID("http://localhost:8080/spcue/dtd/cjsl-1.12.dtd"); + Element root = docTemplate.getRootElement(); + Element jobTemplate = root.getChild("job"); + Element depends = root.getChild("depends"); + assertEquals(jobTemplate.getAttributeValue("name"), "test"); + root.removeContent(jobTemplate); + root.removeContent(depends); + + long t = System.currentTimeMillis(); + for (int i = 0; i < count; i++) { + Document doc = (Document) docTemplate.clone(); + root = doc.getRootElement(); + Element job = (Element) jobTemplate.clone(); + job.setAttribute("name", "job" + i); + root.addContent(job); + root.addContent((Element) depends.clone()); + jobLauncher.launch(new XMLOutputter().outputString(doc)); + + // Force to set incremental ts_started to the jobs + // because current_timestamp is not updated during test. + jdbcTemplate.update("UPDATE job SET ts_started = ? WHERE str_name = ?", + new Timestamp(t + i), "pipe-default-testuser_job" + i); + } } - } - - @Before - public void launchJob() { - dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.FIFO); - - dispatcher.setTestMode(true); - jobLauncher.testMode = true; - } - - @After - public void resetFifoScheduling() { - dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.PRIORITY_ONLY); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) - .setNumProcs(2).setCoresPerProc(100).addTags("test").setState(HardwareState.UP) - .setFacility("spi").putAttributes("SP_OS", "Linux").build(); - - hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); - } - - @Test - @Transactional - @Rollback(true) - public void testFifoSchedulingEnabled() { - assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.FIFO); - dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.PRIORITY_ONLY); - assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.PRIORITY_ONLY); - dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.FIFO); - assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.FIFO); - } - - @Test - @Transactional - @Rollback(true) - public void testAllSorted() throws Exception { - int count = 10; - launchJobs(count); - - Set jobs = dispatcherDao.findDispatchJobs(getHost(), count); - assertEquals(count, jobs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testPortionSorted() throws Exception { - int count = 100; - launchJobs(count); - - int portion = 19; - Set jobs = dispatcherDao.findDispatchJobs(getHost(), (portion + 1) / 10); - assertEquals(portion, jobs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testFifoSchedulingDisabled() throws Exception { - dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.PRIORITY_ONLY); - - int count = 10; - launchJobs(count); - - Set jobs = dispatcherDao.findDispatchJobs(getHost(), count); - assertEquals(count, jobs.size()); - - List sortedJobs = new ArrayList(jobs); - Collections.sort(sortedJobs, Comparator.comparing(jobId -> jobManager.getJob(jobId).getName())); - - for (int i = 0; i < count; i++) { - assertEquals("pipe-default-testuser_job" + i, jobManager.getJob(sortedJobs.get(i)).getName()); + + @Before + public void launchJob() { + dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.FIFO); + + dispatcher.setTestMode(true); + jobLauncher.testMode = true; } - } - @Test - @Transactional - @Rollback(true) - public void testGroup() throws Exception { - int count = 10; - launchJobs(count); + @After + public void resetFifoScheduling() { + dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.PRIORITY_ONLY); + } - JobDetail job = jobManager.findJobDetail("pipe-default-testuser_job0"); - assertNotNull(job); + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960) + .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(100).addTags("test") + .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux") + .build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } - Set jobs = dispatcherDao.findDispatchJobs(getHost(), groupManager.getGroupDetail(job)); - assertEquals(count, jobs.size()); - } + @Test + @Transactional + @Rollback(true) + public void testFifoSchedulingEnabled() { + assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.FIFO); + dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.PRIORITY_ONLY); + assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.PRIORITY_ONLY); + dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.FIFO); + assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.FIFO); + } + + @Test + @Transactional + @Rollback(true) + public void testAllSorted() throws Exception { + int count = 10; + launchJobs(count); + + Set jobs = dispatcherDao.findDispatchJobs(getHost(), count); + assertEquals(count, jobs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testPortionSorted() throws Exception { + int count = 100; + launchJobs(count); + + int portion = 19; + Set jobs = dispatcherDao.findDispatchJobs(getHost(), (portion + 1) / 10); + assertEquals(portion, jobs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testFifoSchedulingDisabled() throws Exception { + dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.PRIORITY_ONLY); + + int count = 10; + launchJobs(count); + + Set jobs = dispatcherDao.findDispatchJobs(getHost(), count); + assertEquals(count, jobs.size()); + + List sortedJobs = new ArrayList(jobs); + Collections.sort(sortedJobs, + Comparator.comparing(jobId -> jobManager.getJob(jobId).getName())); + + for (int i = 0; i < count; i++) { + assertEquals("pipe-default-testuser_job" + i, + jobManager.getJob(sortedJobs.get(i)).getName()); + } + } + + @Test + @Transactional + @Rollback(true) + public void testGroup() throws Exception { + int count = 10; + launchJobs(count); + + JobDetail job = jobManager.findJobDetail("pipe-default-testuser_job0"); + assertNotNull(job); + + Set jobs = + dispatcherDao.findDispatchJobs(getHost(), groupManager.getGroupDetail(job)); + assertEquals(count, jobs.size()); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoTests.java index ef3719d9b..f1d3d7a7d 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoTests.java @@ -67,456 +67,459 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class DispatcherDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; - @Resource - DispatcherDao dispatcherDao; + @Resource + DispatcherDao dispatcherDao; - @Resource - HostDao hostDao; + @Resource + HostDao hostDao; - @Resource - ProcDao procDao; + @Resource + ProcDao procDao; - @Resource - LayerDao layerDao; + @Resource + LayerDao layerDao; - @Resource - JobDao jobDao; + @Resource + JobDao jobDao; - @Resource - AllocationDao allocationDao; + @Resource + AllocationDao allocationDao; - @Resource - JobManager jobManager; + @Resource + JobManager jobManager; - @Resource - DispatchSupport dispatchSupport; + @Resource + DispatchSupport dispatchSupport; - @Resource - HostManager hostManager; + @Resource + HostManager hostManager; - @Resource - AdminManager adminManager; + @Resource + AdminManager adminManager; - @Resource - GroupManager groupManager; + @Resource + GroupManager groupManager; - @Resource - Dispatcher dispatcher; + @Resource + Dispatcher dispatcher; - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - BookingDao bookingDao; + @Resource + BookingDao bookingDao; - private static final String HOSTNAME = "beta"; + private static final String HOSTNAME = "beta"; - public DispatchHost getHost() { - return hostDao.findDispatchHost(HOSTNAME); - } + public DispatchHost getHost() { + return hostDao.findDispatchHost(HOSTNAME); + } + + public JobDetail getJob1() { + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); + } + + public JobDetail getJob2() { + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v2"); + } - public JobDetail getJob1() { - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); - } + @Before + public void launchJob() { + dispatcher.setTestMode(true); + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + } - public JobDetail getJob2() { - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v2"); - } + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960) + .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(100).addTags("test") + .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux") + .build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } - @Before - public void launchJob() { - dispatcher.setTestMode(true); - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - } + @Test + @Transactional + @Rollback(true) + public void testFindNextDispatchFrameByHost() { + DispatchHost host = getHost(); + JobDetail job = getJob1(); - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) - .setNumProcs(2).setCoresPerProc(100).addTags("test").setState(HardwareState.UP) - .setFacility("spi").putAttributes("SP_OS", "Linux").build(); + for (LayerDetail layer : layerDao.getLayerDetails(job)) { + assertTrue(layer.tags.contains("general")); + } - hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); - } + assertTrue(jdbcTemplate + .queryForObject("SELECT str_tags FROM host WHERE pk_host=?", String.class, host.id) + .contains("general")); - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFrameByHost() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); + DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job, host); + assertNotNull(frame); + assertEquals(frame.name, "0001-pass_1"); + } - for (LayerDetail layer : layerDao.getLayerDetails(job)) { - assertTrue(layer.tags.contains("general")); + @Test + @Transactional + @Rollback(true) + public void testFindNextDispatchFrameByProc() { + DispatchHost host = getHost(); + JobDetail job = getJob1(); + + // TODO: fix the fact you can book the same proc on multiple frames + // probably just need to make sure you can't update a proc's frame + // assignment unless the frame id is null. + + DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job, host); + assertNotNull(frame); + assertEquals("0001-pass_1", frame.name); + + VirtualProc proc = VirtualProc.build(host, frame, job.os); + proc.coresReserved = 100; + dispatcher.dispatch(frame, proc); + + frame = dispatcherDao.findNextDispatchFrame(job, proc); + assertNotNull(frame); + assertEquals("0001-pass_2", frame.name); + dispatcher.dispatch(frame, proc); + + frame = dispatcherDao.findNextDispatchFrame(job, proc); + assertNotNull(frame); + assertEquals("0002-pass_1", frame.name); + dispatcher.dispatch(frame, proc); } - assertTrue(jdbcTemplate - .queryForObject("SELECT str_tags FROM host WHERE pk_host=?", String.class, host.id) - .contains("general")); - - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job, host); - assertNotNull(frame); - assertEquals(frame.name, "0001-pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFrameByProc() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - - // TODO: fix the fact you can book the same proc on multiple frames - // probably just need to make sure you can't update a proc's frame - // assignment unless the frame id is null. - - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job, host); - assertNotNull(frame); - assertEquals("0001-pass_1", frame.name); - - VirtualProc proc = VirtualProc.build(host, frame, job.os); - proc.coresReserved = 100; - dispatcher.dispatch(frame, proc); - - frame = dispatcherDao.findNextDispatchFrame(job, proc); - assertNotNull(frame); - assertEquals("0001-pass_2", frame.name); - dispatcher.dispatch(frame, proc); - - frame = dispatcherDao.findNextDispatchFrame(job, proc); - assertNotNull(frame); - assertEquals("0002-pass_1", frame.name); - dispatcher.dispatch(frame, proc); - } - - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFramesByProc() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - - // TODO: fix the fact you can book the same proc on multiple frames - // probably just need to make sure you can't update a proc's frame - // assignment unless the frame id is null. - - List frames = dispatcherDao.findNextDispatchFrames(job, host, 10); - assertEquals(10, frames.size()); - - DispatchFrame frame = frames.get(0); - - VirtualProc proc = VirtualProc.build(host, frame, job.os); - proc.coresReserved = 100; - dispatcher.dispatch(frame, proc); - - frame = dispatcherDao.findNextDispatchFrame(job, proc); - assertNotNull(frame); - assertEquals(frame.name, "0001-pass_2"); - dispatcher.dispatch(frame, proc); - - frame = dispatcherDao.findNextDispatchFrame(job, proc); - assertNotNull(frame); - assertEquals(frame.name, "0002-pass_1"); - dispatcher.dispatch(frame, proc); - } - - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFramesByHostAndJobLocal() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - host.isLocalDispatch = true; - List frames = dispatcherDao.findNextDispatchFrames(job, host, 10); - assertEquals(10, frames.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFramesByHostAndLayerLocal() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - LayerInterface layer = jobManager.getLayers(job).get(0); - host.isLocalDispatch = true; - - List frames = dispatcherDao.findNextDispatchFrames(layer, host, 10); - assertEquals(10, frames.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFramesByProcAndJobLocal() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - host.isLocalDispatch = true; - List frames = dispatcherDao.findNextDispatchFrames(job, host, 10); - assertEquals(10, frames.size()); - - DispatchFrame frame = frames.get(0); - VirtualProc proc = VirtualProc.build(host, frame, job.os); - proc.coresReserved = 100; - proc.isLocalDispatch = true; - - frames = dispatcherDao.findNextDispatchFrames(job, proc, 10); - assertEquals(10, frames.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFramesByProcAndLayerLocal() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - LayerInterface layer = jobManager.getLayers(job).get(0); - host.isLocalDispatch = true; - - List frames = dispatcherDao.findNextDispatchFrames(layer, host, 10); - assertEquals(10, frames.size()); - - DispatchFrame frame = frames.get(0); - VirtualProc proc = VirtualProc.build(host, frame, job.os); - proc.coresReserved = 100; - proc.isLocalDispatch = true; - - frames = dispatcherDao.findNextDispatchFrames(layer, proc, 10); - assertEquals(10, frames.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindDispatchJobs() { - DispatchHost host = getHost(); - - assertTrue(jdbcTemplate.queryForObject("SELECT COUNT(*) FROM job WHERE str_state='PENDING'", - Integer.class) > 0); - - Set jobs = dispatcherDao.findDispatchJobs(host, 10); - assertTrue(jobs.size() > 0); - } - - @Test - @Transactional - @Rollback(true) - public void testFindDispatchJobsByGroup() { - DispatchHost host = getHost(); - final JobDetail job = getJob1(); - - assertNotNull(job); - assertNotNull(job.groupId); - - Set jobs = dispatcherDao.findDispatchJobs(host, groupManager.getGroupDetail(job)); - assertTrue(jobs.size() > 0); - } - - @Test - @Transactional - @Rollback(true) - public void testFindDispatchJobsByShow() { - DispatchHost host = getHost(); - final JobDetail job = getJob1(); - assertNotNull(job); - - Set jobs = dispatcherDao.findDispatchJobs(host, adminManager.findShowEntity("pipe"), 5); - assertTrue(jobs.size() > 0); - } - - @Test - @Transactional - @Rollback(true) - public void testFindDispatchJobsByLocal() { - DispatchHost host = getHost(); - final JobDetail job = getJob1(); - assertNotNull(job); - - Set jobs = dispatcherDao.findLocalDispatchJobs(host); - assertEquals(0, jobs.size()); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setThreads(1); - lja.setMaxMemory(CueUtil.GB16); - lja.setMaxCoreUnits(200); - lja.setMaxGpuMemory(1); - bookingDao.insertLocalHostAssignment(host, job, lja); - - jobs = dispatcherDao.findLocalDispatchJobs(host); - assertTrue(jobs.size() > 0); - } - - @Test - @Transactional - @Rollback(true) - public void testfindUnderProcedJob() { - DispatchHost host = getHost(); - JobDetail job1 = getJob1(); - JobDetail job2 = getJob2(); - - jobDao.updateMinCores(job1, 0); - jobDao.updateMinCores(job2, 1000); - - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); - assertNotNull(frame); - - assertEquals(JobState.PENDING.toString(), jdbcTemplate - .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job1.id)); - - assertEquals(JobState.PENDING.toString(), jdbcTemplate - .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job2.id)); - - VirtualProc proc = VirtualProc.build(host, frame, job1.os); - proc.coresReserved = 100; - dispatcher.dispatch(frame, proc); - - boolean under = dispatcherDao.findUnderProcedJob(job1, proc); - assertTrue(under); - } - - @Test - @Transactional - @Rollback(true) - public void testHigherPriorityJobExistsTrue() { - DispatchHost host = getHost(); - JobDetail job1 = getJob1(); - JobDetail job2 = getJob2(); - job1.priority = 100; - job2.priority = 200; - - jobDao.updateMinCores(job1, 0); - jobDao.updateMinCores(job2, 0); - jobDao.updatePriority(job1, 100); - jobDao.updatePriority(job2, 200); - - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); - assertNotNull(frame); - - assertEquals(JobState.PENDING.toString(), jdbcTemplate - .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job1.id)); - - assertEquals(JobState.PENDING.toString(), jdbcTemplate - .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job2.id)); - - VirtualProc proc = VirtualProc.build(host, frame, job2.os); - proc.coresReserved = 100; - dispatcher.dispatch(frame, proc); - - boolean isHigher = dispatcherDao.higherPriorityJobExists(job1, proc); - assertTrue(isHigher); - } - - @Test - @Transactional - @Rollback(true) - public void testHigherPriorityJobExistsFalse() { - DispatchHost host = getHost(); - JobDetail job1 = getJob1(); - JobDetail job2 = getJob2(); - job1.priority = 20000; - job2.priority = 100; - - jobDao.updatePriority(job1, 20000); - jobDao.updatePriority(job2, 100); - - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); - assertNotNull(frame); - - assertEquals(JobState.PENDING.toString(), jdbcTemplate - .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job1.id)); - - assertEquals(JobState.PENDING.toString(), jdbcTemplate - .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job2.id)); - - VirtualProc proc = VirtualProc.build(host, frame, job2.os); - proc.coresReserved = 100; - dispatcher.dispatch(frame, proc); - - boolean isHigher = dispatcherDao.higherPriorityJobExists(job1, proc); - assertFalse(isHigher); - } - - @Test - @Transactional - @Rollback(true) - public void testHigherPriorityJobExistsMaxProcBound() { - DispatchHost host = getHost(); - JobDetail job1 = getJob1(); - JobDetail job2 = getJob2(); - job1.priority = 100; - job2.priority = 200; - - jobDao.updateMaxCores(job2, 0); - jobDao.updatePriority(job1, 100); - jobDao.updatePriority(job2, 200); - - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); - assertNotNull(frame); - - assertEquals(JobState.PENDING.toString(), jdbcTemplate - .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job1.id)); - - assertEquals(JobState.PENDING.toString(), jdbcTemplate - .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job2.id)); - - VirtualProc proc = VirtualProc.build(host, frame, job2.os); - proc.coresReserved = 100; - dispatcher.dispatch(frame, proc); - - boolean isHigher = dispatcherDao.higherPriorityJobExists(job1, proc); - assertFalse(isHigher); - } - - @Test - @Transactional - @Rollback(true) - public void testFifoSchedulingEnabled() { - assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.PRIORITY_ONLY); - } - - @Test - @Transactional - @Rollback(true) - public void testFindDispatchJobsByShowMultiOs() { - DispatchHost host = getHost(); - // Set multiple Os and confirm jobs with Linux are still being found - final JobDetail job = getJob1(); - assertNotNull(job); - - // Host with different os - host.setOs("centos7,SomethingElse"); - Set jobs = dispatcherDao.findDispatchJobs(host, adminManager.findShowEntity("pipe"), 5); - assertTrue(jobs.size() == 0); - - // Host with Linux Os (same as defined on spec) - host.setOs("centos7,Linux,rocky9"); - jobs = dispatcherDao.findDispatchJobs(host, adminManager.findShowEntity("pipe"), 5); - assertTrue(jobs.size() > 0); - } - - @Test - @Transactional - @Rollback(true) - public void testFindDispatchJobsAllShowsMultiOs() { - DispatchHost host = getHost(); - // Set multiple Os and confirm jobs with Linux are still being found - final JobDetail job = getJob1(); - assertNotNull(job); - - // Host with incompatible OS shouldn't find any job - host.setOs("centos7,SomethingElse"); - Set jobs = dispatcherDao.findDispatchJobs(host, 5); - assertTrue(jobs.size() == 0); - - // Host with Linux Os (same as defined on spec) should find jobs - host.setOs("centos7,Linux,rocky9"); - jobs = dispatcherDao.findDispatchJobs(host, 5); - assertTrue(jobs.size() > 0); - } + @Test + @Transactional + @Rollback(true) + public void testFindNextDispatchFramesByProc() { + DispatchHost host = getHost(); + JobDetail job = getJob1(); + + // TODO: fix the fact you can book the same proc on multiple frames + // probably just need to make sure you can't update a proc's frame + // assignment unless the frame id is null. + + List frames = dispatcherDao.findNextDispatchFrames(job, host, 10); + assertEquals(10, frames.size()); + + DispatchFrame frame = frames.get(0); + + VirtualProc proc = VirtualProc.build(host, frame, job.os); + proc.coresReserved = 100; + dispatcher.dispatch(frame, proc); + + frame = dispatcherDao.findNextDispatchFrame(job, proc); + assertNotNull(frame); + assertEquals(frame.name, "0001-pass_2"); + dispatcher.dispatch(frame, proc); + + frame = dispatcherDao.findNextDispatchFrame(job, proc); + assertNotNull(frame); + assertEquals(frame.name, "0002-pass_1"); + dispatcher.dispatch(frame, proc); + } + + @Test + @Transactional + @Rollback(true) + public void testFindNextDispatchFramesByHostAndJobLocal() { + DispatchHost host = getHost(); + JobDetail job = getJob1(); + host.isLocalDispatch = true; + List frames = dispatcherDao.findNextDispatchFrames(job, host, 10); + assertEquals(10, frames.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindNextDispatchFramesByHostAndLayerLocal() { + DispatchHost host = getHost(); + JobDetail job = getJob1(); + LayerInterface layer = jobManager.getLayers(job).get(0); + host.isLocalDispatch = true; + + List frames = dispatcherDao.findNextDispatchFrames(layer, host, 10); + assertEquals(10, frames.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindNextDispatchFramesByProcAndJobLocal() { + DispatchHost host = getHost(); + JobDetail job = getJob1(); + host.isLocalDispatch = true; + List frames = dispatcherDao.findNextDispatchFrames(job, host, 10); + assertEquals(10, frames.size()); + + DispatchFrame frame = frames.get(0); + VirtualProc proc = VirtualProc.build(host, frame, job.os); + proc.coresReserved = 100; + proc.isLocalDispatch = true; + + frames = dispatcherDao.findNextDispatchFrames(job, proc, 10); + assertEquals(10, frames.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindNextDispatchFramesByProcAndLayerLocal() { + DispatchHost host = getHost(); + JobDetail job = getJob1(); + LayerInterface layer = jobManager.getLayers(job).get(0); + host.isLocalDispatch = true; + + List frames = dispatcherDao.findNextDispatchFrames(layer, host, 10); + assertEquals(10, frames.size()); + + DispatchFrame frame = frames.get(0); + VirtualProc proc = VirtualProc.build(host, frame, job.os); + proc.coresReserved = 100; + proc.isLocalDispatch = true; + + frames = dispatcherDao.findNextDispatchFrames(layer, proc, 10); + assertEquals(10, frames.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindDispatchJobs() { + DispatchHost host = getHost(); + + assertTrue(jdbcTemplate.queryForObject("SELECT COUNT(*) FROM job WHERE str_state='PENDING'", + Integer.class) > 0); + + Set jobs = dispatcherDao.findDispatchJobs(host, 10); + assertTrue(jobs.size() > 0); + } + + @Test + @Transactional + @Rollback(true) + public void testFindDispatchJobsByGroup() { + DispatchHost host = getHost(); + final JobDetail job = getJob1(); + + assertNotNull(job); + assertNotNull(job.groupId); + + Set jobs = dispatcherDao.findDispatchJobs(host, groupManager.getGroupDetail(job)); + assertTrue(jobs.size() > 0); + } + + @Test + @Transactional + @Rollback(true) + public void testFindDispatchJobsByShow() { + DispatchHost host = getHost(); + final JobDetail job = getJob1(); + assertNotNull(job); + + Set jobs = + dispatcherDao.findDispatchJobs(host, adminManager.findShowEntity("pipe"), 5); + assertTrue(jobs.size() > 0); + } + + @Test + @Transactional + @Rollback(true) + public void testFindDispatchJobsByLocal() { + DispatchHost host = getHost(); + final JobDetail job = getJob1(); + assertNotNull(job); + + Set jobs = dispatcherDao.findLocalDispatchJobs(host); + assertEquals(0, jobs.size()); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setThreads(1); + lja.setMaxMemory(CueUtil.GB16); + lja.setMaxCoreUnits(200); + lja.setMaxGpuMemory(1); + bookingDao.insertLocalHostAssignment(host, job, lja); + + jobs = dispatcherDao.findLocalDispatchJobs(host); + assertTrue(jobs.size() > 0); + } + + @Test + @Transactional + @Rollback(true) + public void testfindUnderProcedJob() { + DispatchHost host = getHost(); + JobDetail job1 = getJob1(); + JobDetail job2 = getJob2(); + + jobDao.updateMinCores(job1, 0); + jobDao.updateMinCores(job2, 1000); + + DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); + assertNotNull(frame); + + assertEquals(JobState.PENDING.toString(), jdbcTemplate + .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job1.id)); + + assertEquals(JobState.PENDING.toString(), jdbcTemplate + .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job2.id)); + + VirtualProc proc = VirtualProc.build(host, frame, job1.os); + proc.coresReserved = 100; + dispatcher.dispatch(frame, proc); + + boolean under = dispatcherDao.findUnderProcedJob(job1, proc); + assertTrue(under); + } + + @Test + @Transactional + @Rollback(true) + public void testHigherPriorityJobExistsTrue() { + DispatchHost host = getHost(); + JobDetail job1 = getJob1(); + JobDetail job2 = getJob2(); + job1.priority = 100; + job2.priority = 200; + + jobDao.updateMinCores(job1, 0); + jobDao.updateMinCores(job2, 0); + jobDao.updatePriority(job1, 100); + jobDao.updatePriority(job2, 200); + + DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); + assertNotNull(frame); + + assertEquals(JobState.PENDING.toString(), jdbcTemplate + .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job1.id)); + + assertEquals(JobState.PENDING.toString(), jdbcTemplate + .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job2.id)); + + VirtualProc proc = VirtualProc.build(host, frame, job2.os); + proc.coresReserved = 100; + dispatcher.dispatch(frame, proc); + + boolean isHigher = dispatcherDao.higherPriorityJobExists(job1, proc); + assertTrue(isHigher); + } + + @Test + @Transactional + @Rollback(true) + public void testHigherPriorityJobExistsFalse() { + DispatchHost host = getHost(); + JobDetail job1 = getJob1(); + JobDetail job2 = getJob2(); + job1.priority = 20000; + job2.priority = 100; + + jobDao.updatePriority(job1, 20000); + jobDao.updatePriority(job2, 100); + + DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); + assertNotNull(frame); + + assertEquals(JobState.PENDING.toString(), jdbcTemplate + .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job1.id)); + + assertEquals(JobState.PENDING.toString(), jdbcTemplate + .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job2.id)); + + VirtualProc proc = VirtualProc.build(host, frame, job2.os); + proc.coresReserved = 100; + dispatcher.dispatch(frame, proc); + + boolean isHigher = dispatcherDao.higherPriorityJobExists(job1, proc); + assertFalse(isHigher); + } + + @Test + @Transactional + @Rollback(true) + public void testHigherPriorityJobExistsMaxProcBound() { + DispatchHost host = getHost(); + JobDetail job1 = getJob1(); + JobDetail job2 = getJob2(); + job1.priority = 100; + job2.priority = 200; + + jobDao.updateMaxCores(job2, 0); + jobDao.updatePriority(job1, 100); + jobDao.updatePriority(job2, 200); + + DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); + assertNotNull(frame); + + assertEquals(JobState.PENDING.toString(), jdbcTemplate + .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job1.id)); + + assertEquals(JobState.PENDING.toString(), jdbcTemplate + .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job2.id)); + + VirtualProc proc = VirtualProc.build(host, frame, job2.os); + proc.coresReserved = 100; + dispatcher.dispatch(frame, proc); + + boolean isHigher = dispatcherDao.higherPriorityJobExists(job1, proc); + assertFalse(isHigher); + } + + @Test + @Transactional + @Rollback(true) + public void testFifoSchedulingEnabled() { + assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.PRIORITY_ONLY); + } + + @Test + @Transactional + @Rollback(true) + public void testFindDispatchJobsByShowMultiOs() { + DispatchHost host = getHost(); + // Set multiple Os and confirm jobs with Linux are still being found + final JobDetail job = getJob1(); + assertNotNull(job); + + // Host with different os + host.setOs("centos7,SomethingElse"); + Set jobs = + dispatcherDao.findDispatchJobs(host, adminManager.findShowEntity("pipe"), 5); + assertTrue(jobs.size() == 0); + + // Host with Linux Os (same as defined on spec) + host.setOs("centos7,Linux,rocky9"); + jobs = dispatcherDao.findDispatchJobs(host, adminManager.findShowEntity("pipe"), 5); + assertTrue(jobs.size() > 0); + } + + @Test + @Transactional + @Rollback(true) + public void testFindDispatchJobsAllShowsMultiOs() { + DispatchHost host = getHost(); + // Set multiple Os and confirm jobs with Linux are still being found + final JobDetail job = getJob1(); + assertNotNull(job); + + // Host with incompatible OS shouldn't find any job + host.setOs("centos7,SomethingElse"); + Set jobs = dispatcherDao.findDispatchJobs(host, 5); + assertTrue(jobs.size() == 0); + + // Host with Linux Os (same as defined on spec) should find jobs + host.setOs("centos7,Linux,rocky9"); + jobs = dispatcherDao.findDispatchJobs(host, 5); + assertTrue(jobs.size() > 0); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FacilityDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FacilityDaoTests.java index 98f1d994f..4b06b0423 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FacilityDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FacilityDaoTests.java @@ -38,36 +38,37 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class FacilityDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; - @Resource - FacilityDao facilityDao; + @Resource + FacilityDao facilityDao; - @Test - @Transactional - @Rollback(true) - public void testGetDetaultFacility() { - assertEquals(jdbcTemplate - .queryForObject("SELECT pk_facility FROM facility WHERE b_default=true", String.class), - facilityDao.getDefaultFacility().getId()); - } + @Test + @Transactional + @Rollback(true) + public void testGetDetaultFacility() { + assertEquals( + jdbcTemplate.queryForObject("SELECT pk_facility FROM facility WHERE b_default=true", + String.class), + facilityDao.getDefaultFacility().getId()); + } - @Test - @Transactional - @Rollback(true) - public void testGetFacility() { - String id = "AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"; - assertEquals(id, facilityDao.getFacility(id).getId()); - assertEquals(id, facilityDao.getFacility("spi").getId()); - } + @Test + @Transactional + @Rollback(true) + public void testGetFacility() { + String id = "AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"; + assertEquals(id, facilityDao.getFacility(id).getId()); + assertEquals(id, facilityDao.getFacility("spi").getId()); + } - @Test - @Transactional - @Rollback(true) - public void testFacilityExists() { - assertTrue(facilityDao.facilityExists("spi")); - assertFalse(facilityDao.facilityExists("rambo")); - } + @Test + @Transactional + @Rollback(true) + public void testFacilityExists() { + assertTrue(facilityDao.facilityExists("spi")); + assertFalse(facilityDao.facilityExists("rambo")); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FilterDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FilterDaoTests.java index 3c80c037a..c806a8013 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FilterDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FilterDaoTests.java @@ -44,232 +44,232 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class FilterDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - FilterDao filterDao; - - @Resource - ShowDao showDao; - - @Resource - AdminManager adminManager; - - private static String FILTER_NAME = "test_filter"; - - public ShowInterface createShow() { - ShowEntity show = new ShowEntity(); - show.name = "testtest"; - adminManager.createShow(show); - return show; - } - - public ShowInterface getShow() { - return showDao.findShowDetail("testtest"); - } - - public FilterEntity buildFilter(ShowInterface show) { - FilterEntity filter = new FilterEntity(); - filter.name = FILTER_NAME; - filter.showId = show.getId(); - filter.type = FilterType.MATCH_ANY; - filter.enabled = true; - - return filter; - } - - @Test - @Transactional - @Rollback(true) - public void testGetActiveFilters() { - filterDao.getActiveFilters(createShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFilters() { - filterDao.getFilters(createShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateSetFilterEnabled() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - filterDao.updateSetFilterEnabled(f, false); - assertFalse(jdbcTemplate.queryForObject("SELECT b_enabled FROM filter WHERE pk_filter=?", - Boolean.class, f.getFilterId())); - filterDao.updateSetFilterEnabled(f, true); - assertTrue(jdbcTemplate.queryForObject("SELECT b_enabled FROM filter WHERE pk_filter=?", - Boolean.class, f.getFilterId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateSetFilterName() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - assertEquals(FILTER_NAME, jdbcTemplate.queryForObject( - "SELECT str_name FROM filter WHERE pk_filter=?", String.class, f.getFilterId())); - filterDao.updateSetFilterName(f, "TEST"); - assertEquals("TEST", jdbcTemplate.queryForObject( - "SELECT str_name FROM filter WHERE pk_filter=?", String.class, f.getFilterId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateSetFilterType() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - assertEquals(FilterType.MATCH_ANY.toString(), jdbcTemplate.queryForObject( - "SELECT str_type FROM filter WHERE pk_filter=?", String.class, f.getFilterId())); - filterDao.updateSetFilterType(f, FilterType.MATCH_ALL); - assertEquals(FilterType.MATCH_ALL.toString(), jdbcTemplate.queryForObject( - "SELECT str_type FROM filter WHERE pk_filter=?", String.class, f.getFilterId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateSetFilterOrder() { - - ShowInterface show = createShow(); - int currentFilters = jdbcTemplate.queryForObject("SELECT COUNT(*) FROM filter WHERE pk_show=?", - Integer.class, show.getShowId()); - - FilterEntity f1 = buildFilter(show); - filterDao.insertFilter(f1); - - FilterEntity f2 = buildFilter(show); - f2.name = "TEST"; - filterDao.insertFilter(f2); - - assertEquals(Integer.valueOf(currentFilters + 1), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f1.getFilterId())); - - assertEquals(Integer.valueOf(currentFilters + 2), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f2.getFilterId())); - - filterDao.updateSetFilterOrder(f2, 1); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f2.getFilterId())); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteFilter() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - filterDao.deleteFilter(f); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertFilter() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - } - - @Test - @Transactional - @Rollback(true) - public void testReorderFilters() { - buildFilter(createShow()); - filterDao.reorderFilters(getShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testLowerFilterOrder() { - - ShowInterface show = createShow(); - - FilterEntity f1 = buildFilter(show); - filterDao.insertFilter(f1); - - FilterEntity f2 = buildFilter(show); - f2.name = "TEST"; - filterDao.insertFilter(f2); - - /** - * These could fail if the test DB has other filters. - */ - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f1.getFilterId())); - - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f2.getFilterId())); - - filterDao.lowerFilterOrder(f2, 1); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f1.getFilterId())); - - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f2.getFilterId())); - } - - @Test - @Transactional - @Rollback(true) - public void testRaiseFilterOrder() { - - ShowInterface show = createShow(); - - FilterEntity f1 = buildFilter(show); - filterDao.insertFilter(f1); - - FilterEntity f2 = buildFilter(show); - f2.name = "TEST"; - filterDao.insertFilter(f2); - - /** - * These could fail if the test DB has other filters. - */ - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f1.getFilterId())); - - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f2.getFilterId())); - - filterDao.raiseFilterOrder(f1, 1); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f1.getFilterId())); - - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f2.getFilterId())); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFilter() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - - filterDao.getFilter(f); - filterDao.getFilter(f.getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFilter() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - - filterDao.findFilter(getShow(), FILTER_NAME); - } + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + FilterDao filterDao; + + @Resource + ShowDao showDao; + + @Resource + AdminManager adminManager; + + private static String FILTER_NAME = "test_filter"; + + public ShowInterface createShow() { + ShowEntity show = new ShowEntity(); + show.name = "testtest"; + adminManager.createShow(show); + return show; + } + + public ShowInterface getShow() { + return showDao.findShowDetail("testtest"); + } + + public FilterEntity buildFilter(ShowInterface show) { + FilterEntity filter = new FilterEntity(); + filter.name = FILTER_NAME; + filter.showId = show.getId(); + filter.type = FilterType.MATCH_ANY; + filter.enabled = true; + + return filter; + } + + @Test + @Transactional + @Rollback(true) + public void testGetActiveFilters() { + filterDao.getActiveFilters(createShow()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFilters() { + filterDao.getFilters(createShow()); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateSetFilterEnabled() { + FilterEntity f = buildFilter(createShow()); + filterDao.insertFilter(f); + filterDao.updateSetFilterEnabled(f, false); + assertFalse(jdbcTemplate.queryForObject("SELECT b_enabled FROM filter WHERE pk_filter=?", + Boolean.class, f.getFilterId())); + filterDao.updateSetFilterEnabled(f, true); + assertTrue(jdbcTemplate.queryForObject("SELECT b_enabled FROM filter WHERE pk_filter=?", + Boolean.class, f.getFilterId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateSetFilterName() { + FilterEntity f = buildFilter(createShow()); + filterDao.insertFilter(f); + assertEquals(FILTER_NAME, jdbcTemplate.queryForObject( + "SELECT str_name FROM filter WHERE pk_filter=?", String.class, f.getFilterId())); + filterDao.updateSetFilterName(f, "TEST"); + assertEquals("TEST", jdbcTemplate.queryForObject( + "SELECT str_name FROM filter WHERE pk_filter=?", String.class, f.getFilterId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateSetFilterType() { + FilterEntity f = buildFilter(createShow()); + filterDao.insertFilter(f); + assertEquals(FilterType.MATCH_ANY.toString(), jdbcTemplate.queryForObject( + "SELECT str_type FROM filter WHERE pk_filter=?", String.class, f.getFilterId())); + filterDao.updateSetFilterType(f, FilterType.MATCH_ALL); + assertEquals(FilterType.MATCH_ALL.toString(), jdbcTemplate.queryForObject( + "SELECT str_type FROM filter WHERE pk_filter=?", String.class, f.getFilterId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateSetFilterOrder() { + + ShowInterface show = createShow(); + int currentFilters = jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM filter WHERE pk_show=?", Integer.class, show.getShowId()); + + FilterEntity f1 = buildFilter(show); + filterDao.insertFilter(f1); + + FilterEntity f2 = buildFilter(show); + f2.name = "TEST"; + filterDao.insertFilter(f2); + + assertEquals(Integer.valueOf(currentFilters + 1), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f1.getFilterId())); + + assertEquals(Integer.valueOf(currentFilters + 2), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f2.getFilterId())); + + filterDao.updateSetFilterOrder(f2, 1); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f2.getFilterId())); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteFilter() { + FilterEntity f = buildFilter(createShow()); + filterDao.insertFilter(f); + filterDao.deleteFilter(f); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertFilter() { + FilterEntity f = buildFilter(createShow()); + filterDao.insertFilter(f); + } + + @Test + @Transactional + @Rollback(true) + public void testReorderFilters() { + buildFilter(createShow()); + filterDao.reorderFilters(getShow()); + } + + @Test + @Transactional + @Rollback(true) + public void testLowerFilterOrder() { + + ShowInterface show = createShow(); + + FilterEntity f1 = buildFilter(show); + filterDao.insertFilter(f1); + + FilterEntity f2 = buildFilter(show); + f2.name = "TEST"; + filterDao.insertFilter(f2); + + /** + * These could fail if the test DB has other filters. + */ + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f1.getFilterId())); + + assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f2.getFilterId())); + + filterDao.lowerFilterOrder(f2, 1); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f1.getFilterId())); + + assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f2.getFilterId())); + } + + @Test + @Transactional + @Rollback(true) + public void testRaiseFilterOrder() { + + ShowInterface show = createShow(); + + FilterEntity f1 = buildFilter(show); + filterDao.insertFilter(f1); + + FilterEntity f2 = buildFilter(show); + f2.name = "TEST"; + filterDao.insertFilter(f2); + + /** + * These could fail if the test DB has other filters. + */ + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f1.getFilterId())); + + assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f2.getFilterId())); + + filterDao.raiseFilterOrder(f1, 1); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f1.getFilterId())); + + assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( + "SELECT f_order FROM filter WHERE pk_filter=?", Integer.class, f2.getFilterId())); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFilter() { + FilterEntity f = buildFilter(createShow()); + filterDao.insertFilter(f); + + filterDao.getFilter(f); + filterDao.getFilter(f.getId()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindFilter() { + FilterEntity f = buildFilter(createShow()); + filterDao.insertFilter(f); + + filterDao.findFilter(getShow(), FILTER_NAME); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FrameDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FrameDaoTests.java index d05d4d68b..7c88af285 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FrameDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FrameDaoTests.java @@ -68,575 +68,581 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class FrameDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - FrameDao frameDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - HostDao hostDao; - - @Resource - ProcDao procDao; - - @Resource - HostManager hostManager; - - @Resource - DependManager dependManager; - - @Resource - DispatchSupport dispatchSupport; - - @Resource - FrameSearchFactory frameSearchFactory; - - private static final String HOST = "beta"; - - public DispatchHost createHost() { - return hostDao.findDispatchHost(HOST); - } - - @BeforeTransaction - public void create() { - - RenderHost host = RenderHost.newBuilder().setName(HOST).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) - .setNumProcs(1).setCoresPerProc(100).addAllTags(ImmutableList.of("mcore", "4core", "8g")) - .setState(HardwareState.UP).setFacility("spi").setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512).build(); - - hostManager.createHost(host); - } - - @AfterTransaction - public void destroy() { - jdbcTemplate.update("DELETE FROM host WHERE str_name=?", HOST); - } - - public JobDetail launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFrameDetail() { - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - FrameDetail frame = frameDao.getFrameDetail(f); - frame = frameDao.getFrameDetail(f.getFrameId()); - assertEquals("0001-pass_1", frame.name); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFrameDetail() { - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - assertEquals("0001-pass_1", frame.name); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFrame() { - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - FrameInterface frame = frameDao.getFrame(f.getFrameId()); - assertEquals("0001-pass_1", frame.getName()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFrameByLayer() { - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - FrameInterface f2 = frameDao.findFrame((LayerInterface) f, 1); - - assertEquals(f.getFrameId(), f2.getFrameId()); - assertEquals(f.getLayerId(), f2.getLayerId()); - assertEquals(f.getJobId(), f2.getJobId()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFrame() { - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - assertEquals(f.getName(), "0001-pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFrames() { - JobDetail job = launchJob(); - FrameSearchInterface r = frameSearchFactory.create(job); - FrameSearchCriteria criteria = r.getCriteria(); - r.setCriteria(criteria.toBuilder().addFrames("0001-pass_1").build()); - assertEquals(1, frameDao.findFrames(r).size()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFrameDetails() { - JobDetail job = launchJob(); - FrameSearchInterface r = frameSearchFactory.create(job); - FrameSearchCriteria criteria = r.getCriteria(); - r.setCriteria(criteria.toBuilder().addFrames("0001-pass_1").build()); - assertEquals(1, frameDao.findFrameDetails(r).size()); - } - - @Test - @Transactional - @Rollback(true) - public void testgetOrphanedFrames() { - assertEquals(0, frameDao.getOrphanedFrames().size()); - - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - - /* - * Update the first frame to the orphan state, which is a frame that is in the running state, - * has no corresponding proc entry and has not been updated in the last 5 min. - */ - jdbcTemplate.update( - "UPDATE frame SET str_state = 'RUNNING', " - + "ts_updated = current_timestamp - interval '301' second WHERE pk_frame = ?", - f.getFrameId()); - - assertEquals(1, frameDao.getOrphanedFrames().size()); - assertTrue(frameDao.isOrphan(f)); - - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameState() { - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - assertTrue(frameDao.updateFrameState(f, FrameState.RUNNING)); - - assertEquals(FrameState.RUNNING.toString(), jdbcTemplate.queryForObject( - "SELECT str_state FROM frame WHERE pk_frame=?", String.class, f.getFrameId())); - } - - @Test - @Transactional - @Rollback(true) - public void testFailUpdateFrameState() { - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - - /** Change the version so the update fails **/ - jdbcTemplate.update("UPDATE frame SET int_version = int_version + 1 WHERE pk_frame=?", - - f.getFrameId()); - - assertEquals(false, frameDao.updateFrameState(f, FrameState.RUNNING)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameStarted() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame fd = frameDao.getDispatchFrame(frame.getId()); - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.allocationId; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - assertEquals(FrameState.WAITING, frame.state); - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - frameDao.updateFrameStarted(proc, fd); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameStopped() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame fd = frameDao.getDispatchFrame(frame.getId()); - - assertEquals("0001-pass_1_preprocess", frame.getName()); - assertEquals(FrameState.WAITING, frame.state); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.allocationId; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - - frameDao.updateFrameStarted(proc, fd); - - try { - Thread.sleep(1001); - } catch (InterruptedException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - - DispatchFrame fd2 = frameDao.getDispatchFrame(frame.getId()); - assertTrue(frameDao.updateFrameStopped(fd2, FrameState.DEAD, 1, 1000l)); - - assertEquals(FrameState.DEAD.toString(), jdbcTemplate.queryForObject( - "SELECT str_state FROM frame WHERE pk_frame=?", String.class, frame.getFrameId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameFixed() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame fd = frameDao.getDispatchFrame(frame.getId()); - - assertEquals("0001-pass_1_preprocess", frame.getName()); - assertEquals(FrameState.WAITING, frame.state); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.allocationId; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - frameDao.updateFrameStarted(proc, fd); - - try { - Thread.sleep(1001); - } catch (InterruptedException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - frameDao.updateFrameState(frame, FrameState.WAITING); - frameDao.updateFrameFixed(proc, frame); - - assertEquals(FrameState.RUNNING.toString(), jdbcTemplate.queryForObject( - "SELECT str_state FROM frame WHERE pk_frame=?", String.class, frame.getFrameId())); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDispatchFrame() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.allocationId; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - - DispatchFrame dframe = frameDao.getDispatchFrame(frame.id); - assertEquals(dframe.id, frame.id); - } - - @Test - @Transactional - @Rollback(true) - public void testMarkFrameAsWaiting() { - JobDetail job = launchJob(); - - FrameInterface f = frameDao.findFrameDetail(job, "0001-pass_1"); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_depend_count FROM frame WHERE pk_frame=?", Integer.class, f.getFrameId())); - - frameDao.markFrameAsWaiting(f); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_depend_count FROM frame WHERE pk_frame=?", Integer.class, f.getFrameId())); - } - - @Test - @Transactional - @Rollback(true) - public void testMarkFrameAsDepend() { - JobDetail job = launchJob(); - - FrameInterface f = frameDao.findFrameDetail(job, "0001-pass_1"); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_depend_count FROM frame WHERE pk_frame=?", Integer.class, f.getFrameId())); - - assertTrue(jdbcTemplate.queryForObject("SELECT b_active FROM depend WHERE pk_layer_depend_er=?", - Boolean.class, f.getLayerId())); - - frameDao.markFrameAsWaiting(f); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_depend_count FROM frame WHERE pk_frame=?", Integer.class, f.getFrameId())); - - /* - * Need to grab new version of frame object once the state has changed. - */ - f = frameDao.findFrameDetail(job, "0001-pass_1"); - - frameDao.markFrameAsDepend(f); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_depend_count FROM frame WHERE pk_frame=?", Integer.class, f.getFrameId())); - } - - @Test(expected = org.springframework.dao.EmptyResultDataAccessException.class) - @Transactional - @Rollback(true) - public void testFindLongestFrame() { - JobDetail job = launchJob(); - frameDao.findLongestFrame(job); - } - - @Test(expected = org.springframework.dao.EmptyResultDataAccessException.class) - @Transactional - @Rollback(true) - public void testFindShortestFrame() { - JobDetail job = launchJob(); - frameDao.findShortestFrame(job); - } - - @Test(expected = org.springframework.dao.EmptyResultDataAccessException.class) - @Transactional - @Rollback(true) - public void findHighestMemoryFrame() { - JobDetail job = launchJob(); - frameDao.findHighestMemoryFrame(job); - } - - @Test(expected = org.springframework.dao.EmptyResultDataAccessException.class) - @Transactional - @Rollback(true) - public void findLowestMemoryFrame() { - JobDetail job = launchJob(); - frameDao.findLowestMemoryFrame(job); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDependentFrames() { - JobDetail job = launchJob(); - FrameInterface frame_a = frameDao.findFrame(job, "0001-pass_1"); - FrameInterface frame_b = frameDao.findFrame(job, "0002-pass_1"); - - dependManager.createDepend(new FrameOnFrame(frame_a, frame_b)); - - assertEquals(1, - frameDao.getDependentFrames(dependManager.getWhatDependsOn(frame_b).get(0)).size(), 1); - } - - @Test - @Transactional - @Rollback(true) - public void testGetResourceUsage() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.allocationId; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - - DispatchFrame dframe = frameDao.getDispatchFrame(frame.id); - frameDao.getResourceUsage(dframe); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameCleared() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.allocationId; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - - /* - * Only frames without active procs can be cleared. - */ - - DispatchFrame dframe = frameDao.getDispatchFrame(frame.id); - assertFalse(frameDao.updateFrameCleared(dframe)); - - dispatchSupport.unbookProc(proc); - assertTrue(frameDao.updateFrameCleared(dframe)); - - } - - @Test - @Transactional - @Rollback(true) - public void testGetStaleCheckpoints() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - assertEquals(0, frameDao.getStaleCheckpoints(300).size()); - jdbcTemplate.update( - "UPDATE frame SET str_state = ?, " - + "ts_stopped = current_timestamp - interval '400' second WHERE pk_frame = ?", - FrameState.CHECKPOINT.toString(), frame.getFrameId()); - assertEquals(1, frameDao.getStaleCheckpoints(300).size()); - } - - @Test - @Transactional - @Rollback(true) - public void testSetCheckpointState() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - frameDao.updateFrameCheckpointState(frame, CheckpointState.ENABLED); - - String state = - jdbcTemplate.queryForObject("SELECT str_checkpoint_state FROM frame WHERE pk_frame=?", - String.class, frame.getFrameId()); - - assertEquals(CheckpointState.ENABLED.toString(), state); - - /** - * To set a checkpoint complete the frame state must be in the checkpoint state. - */ - frameDao.updateFrameState(frame, FrameState.CHECKPOINT); - jdbcTemplate.update( - "UPDATE frame SET ts_started=current_timestamp, ts_stopped=current_timestamp + INTERVAL '20' second WHERE pk_frame=?", - frame.getFrameId()); - - assertTrue(frameDao.updateFrameCheckpointState(frame, CheckpointState.COMPLETE)); - Map result = jdbcTemplate - .queryForMap("SELECT int_checkpoint_count FROM frame WHERE pk_frame=?", frame.getFrameId()); - - Integer checkPointCount = (Integer) result.get("int_checkpoint_count"); - assertEquals(1, checkPointCount.intValue()); - } - - @Test - @Transactional - @Rollback(true) - public void testIsFrameComplete() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - frameDao.updateFrameState(frame, FrameState.EATEN); - assertTrue(frameDao.isFrameComplete(frame)); - - frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - frameDao.updateFrameState(frame, FrameState.SUCCEEDED); - assertTrue(frameDao.isFrameComplete(frame)); - - frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - frameDao.updateFrameState(frame, FrameState.WAITING); - assertFalse(frameDao.isFrameComplete(frame)); - } - - private FrameStateDisplayOverride createFrameStateDisplayOverride(FrameState state, String text, - int red, int green, int blue) { - FrameStateDisplayOverride override = FrameStateDisplayOverride.newBuilder().setState(state) - .setText(text).setColor(FrameStateDisplayOverride.RGB.newBuilder().setRed(red) - .setGreen(green).setBlue(blue).build()) - .build(); - - return override; - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameOverride() { - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - // Create override - FrameStateDisplayOverride override = - createFrameStateDisplayOverride(FrameState.SUCCEEDED, "FINISHED", 200, 200, 123); - frameDao.setFrameStateDisplayOverride(frame.getFrameId(), override); - FrameStateDisplayOverrideSeq results = - frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); - assertEquals(1, results.getOverridesCount()); - assertEquals(override, results.getOverridesList().get(0)); - - // Try to update override - FrameStateDisplayOverride overrideUpdate = - createFrameStateDisplayOverride(FrameState.SUCCEEDED, "DONE", 100, 100, 100); - frameDao.updateFrameStateDisplayOverride(frame.getFrameId(), overrideUpdate); - results = frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); - assertEquals(1, results.getOverridesCount()); - assertEquals(overrideUpdate, results.getOverridesList().get(0)); - } + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + FrameDao frameDao; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + HostDao hostDao; + + @Resource + ProcDao procDao; + + @Resource + HostManager hostManager; + + @Resource + DependManager dependManager; + + @Resource + DispatchSupport dispatchSupport; + + @Resource + FrameSearchFactory frameSearchFactory; + + private static final String HOST = "beta"; + + public DispatchHost createHost() { + return hostDao.findDispatchHost(HOST); + } + + @BeforeTransaction + public void create() { + + RenderHost host = RenderHost.newBuilder().setName(HOST).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960) + .setNimbyEnabled(false).setNumProcs(1).setCoresPerProc(100) + .addAllTags(ImmutableList.of("mcore", "4core", "8g")).setState(HardwareState.UP) + .setFacility("spi").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); + + hostManager.createHost(host); + } + + @AfterTransaction + public void destroy() { + jdbcTemplate.update("DELETE FROM host WHERE str_name=?", HOST); + } + + public JobDetail launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFrameDetail() { + JobDetail job = launchJob(); + FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); + FrameDetail frame = frameDao.getFrameDetail(f); + frame = frameDao.getFrameDetail(f.getFrameId()); + assertEquals("0001-pass_1", frame.name); + } + + @Test + @Transactional + @Rollback(true) + public void testFindFrameDetail() { + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + assertEquals("0001-pass_1", frame.name); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFrame() { + JobDetail job = launchJob(); + FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); + FrameInterface frame = frameDao.getFrame(f.getFrameId()); + assertEquals("0001-pass_1", frame.getName()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFrameByLayer() { + JobDetail job = launchJob(); + FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); + FrameInterface f2 = frameDao.findFrame((LayerInterface) f, 1); + + assertEquals(f.getFrameId(), f2.getFrameId()); + assertEquals(f.getLayerId(), f2.getLayerId()); + assertEquals(f.getJobId(), f2.getJobId()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindFrame() { + JobDetail job = launchJob(); + FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); + assertEquals(f.getName(), "0001-pass_1"); + } + + @Test + @Transactional + @Rollback(true) + public void testFindFrames() { + JobDetail job = launchJob(); + FrameSearchInterface r = frameSearchFactory.create(job); + FrameSearchCriteria criteria = r.getCriteria(); + r.setCriteria(criteria.toBuilder().addFrames("0001-pass_1").build()); + assertEquals(1, frameDao.findFrames(r).size()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindFrameDetails() { + JobDetail job = launchJob(); + FrameSearchInterface r = frameSearchFactory.create(job); + FrameSearchCriteria criteria = r.getCriteria(); + r.setCriteria(criteria.toBuilder().addFrames("0001-pass_1").build()); + assertEquals(1, frameDao.findFrameDetails(r).size()); + } + + @Test + @Transactional + @Rollback(true) + public void testgetOrphanedFrames() { + assertEquals(0, frameDao.getOrphanedFrames().size()); + + JobDetail job = launchJob(); + FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); + + /* + * Update the first frame to the orphan state, which is a frame that is in the running + * state, has no corresponding proc entry and has not been updated in the last 5 min. + */ + jdbcTemplate.update("UPDATE frame SET str_state = 'RUNNING', " + + "ts_updated = current_timestamp - interval '301' second WHERE pk_frame = ?", + f.getFrameId()); + + assertEquals(1, frameDao.getOrphanedFrames().size()); + assertTrue(frameDao.isOrphan(f)); + + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateFrameState() { + JobDetail job = launchJob(); + FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); + assertTrue(frameDao.updateFrameState(f, FrameState.RUNNING)); + + assertEquals(FrameState.RUNNING.toString(), jdbcTemplate.queryForObject( + "SELECT str_state FROM frame WHERE pk_frame=?", String.class, f.getFrameId())); + } + + @Test + @Transactional + @Rollback(true) + public void testFailUpdateFrameState() { + JobDetail job = launchJob(); + FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); + + /** Change the version so the update fails **/ + jdbcTemplate.update("UPDATE frame SET int_version = int_version + 1 WHERE pk_frame=?", + + f.getFrameId()); + + assertEquals(false, frameDao.updateFrameState(f, FrameState.RUNNING)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateFrameStarted() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + DispatchFrame fd = frameDao.getDispatchFrame(frame.getId()); + VirtualProc proc = new VirtualProc(); + proc.allocationId = host.allocationId; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + assertEquals(FrameState.WAITING, frame.state); + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + frameDao.updateFrameStarted(proc, fd); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateFrameStopped() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + DispatchFrame fd = frameDao.getDispatchFrame(frame.getId()); + + assertEquals("0001-pass_1_preprocess", frame.getName()); + assertEquals(FrameState.WAITING, frame.state); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = host.allocationId; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + + frameDao.updateFrameStarted(proc, fd); + + try { + Thread.sleep(1001); + } catch (InterruptedException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + + DispatchFrame fd2 = frameDao.getDispatchFrame(frame.getId()); + assertTrue(frameDao.updateFrameStopped(fd2, FrameState.DEAD, 1, 1000l)); + + assertEquals(FrameState.DEAD.toString(), jdbcTemplate.queryForObject( + "SELECT str_state FROM frame WHERE pk_frame=?", String.class, frame.getFrameId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateFrameFixed() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + DispatchFrame fd = frameDao.getDispatchFrame(frame.getId()); + + assertEquals("0001-pass_1_preprocess", frame.getName()); + assertEquals(FrameState.WAITING, frame.state); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = host.allocationId; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + frameDao.updateFrameStarted(proc, fd); + + try { + Thread.sleep(1001); + } catch (InterruptedException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + frameDao.updateFrameState(frame, FrameState.WAITING); + frameDao.updateFrameFixed(proc, frame); + + assertEquals(FrameState.RUNNING.toString(), jdbcTemplate.queryForObject( + "SELECT str_state FROM frame WHERE pk_frame=?", String.class, frame.getFrameId())); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDispatchFrame() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = host.allocationId; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + + DispatchFrame dframe = frameDao.getDispatchFrame(frame.id); + assertEquals(dframe.id, frame.id); + } + + @Test + @Transactional + @Rollback(true) + public void testMarkFrameAsWaiting() { + JobDetail job = launchJob(); + + FrameInterface f = frameDao.findFrameDetail(job, "0001-pass_1"); + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject("SELECT int_depend_count FROM frame WHERE pk_frame=?", + Integer.class, f.getFrameId())); + + frameDao.markFrameAsWaiting(f); + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject("SELECT int_depend_count FROM frame WHERE pk_frame=?", + Integer.class, f.getFrameId())); + } + + @Test + @Transactional + @Rollback(true) + public void testMarkFrameAsDepend() { + JobDetail job = launchJob(); + + FrameInterface f = frameDao.findFrameDetail(job, "0001-pass_1"); + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject("SELECT int_depend_count FROM frame WHERE pk_frame=?", + Integer.class, f.getFrameId())); + + assertTrue(jdbcTemplate.queryForObject( + "SELECT b_active FROM depend WHERE pk_layer_depend_er=?", Boolean.class, + f.getLayerId())); + + frameDao.markFrameAsWaiting(f); + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject("SELECT int_depend_count FROM frame WHERE pk_frame=?", + Integer.class, f.getFrameId())); + + /* + * Need to grab new version of frame object once the state has changed. + */ + f = frameDao.findFrameDetail(job, "0001-pass_1"); + + frameDao.markFrameAsDepend(f); + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject("SELECT int_depend_count FROM frame WHERE pk_frame=?", + Integer.class, f.getFrameId())); + } + + @Test(expected = org.springframework.dao.EmptyResultDataAccessException.class) + @Transactional + @Rollback(true) + public void testFindLongestFrame() { + JobDetail job = launchJob(); + frameDao.findLongestFrame(job); + } + + @Test(expected = org.springframework.dao.EmptyResultDataAccessException.class) + @Transactional + @Rollback(true) + public void testFindShortestFrame() { + JobDetail job = launchJob(); + frameDao.findShortestFrame(job); + } + + @Test(expected = org.springframework.dao.EmptyResultDataAccessException.class) + @Transactional + @Rollback(true) + public void findHighestMemoryFrame() { + JobDetail job = launchJob(); + frameDao.findHighestMemoryFrame(job); + } + + @Test(expected = org.springframework.dao.EmptyResultDataAccessException.class) + @Transactional + @Rollback(true) + public void findLowestMemoryFrame() { + JobDetail job = launchJob(); + frameDao.findLowestMemoryFrame(job); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDependentFrames() { + JobDetail job = launchJob(); + FrameInterface frame_a = frameDao.findFrame(job, "0001-pass_1"); + FrameInterface frame_b = frameDao.findFrame(job, "0002-pass_1"); + + dependManager.createDepend(new FrameOnFrame(frame_a, frame_b)); + + assertEquals(1, + frameDao.getDependentFrames(dependManager.getWhatDependsOn(frame_b).get(0)).size(), + 1); + } + + @Test + @Transactional + @Rollback(true) + public void testGetResourceUsage() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = host.allocationId; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + + DispatchFrame dframe = frameDao.getDispatchFrame(frame.id); + frameDao.getResourceUsage(dframe); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateFrameCleared() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = host.allocationId; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + + /* + * Only frames without active procs can be cleared. + */ + + DispatchFrame dframe = frameDao.getDispatchFrame(frame.id); + assertFalse(frameDao.updateFrameCleared(dframe)); + + dispatchSupport.unbookProc(proc); + assertTrue(frameDao.updateFrameCleared(dframe)); + + } + + @Test + @Transactional + @Rollback(true) + public void testGetStaleCheckpoints() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + assertEquals(0, frameDao.getStaleCheckpoints(300).size()); + jdbcTemplate.update("UPDATE frame SET str_state = ?, " + + "ts_stopped = current_timestamp - interval '400' second WHERE pk_frame = ?", + FrameState.CHECKPOINT.toString(), frame.getFrameId()); + assertEquals(1, frameDao.getStaleCheckpoints(300).size()); + } + + @Test + @Transactional + @Rollback(true) + public void testSetCheckpointState() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + frameDao.updateFrameCheckpointState(frame, CheckpointState.ENABLED); + + String state = jdbcTemplate.queryForObject( + "SELECT str_checkpoint_state FROM frame WHERE pk_frame=?", String.class, + frame.getFrameId()); + + assertEquals(CheckpointState.ENABLED.toString(), state); + + /** + * To set a checkpoint complete the frame state must be in the checkpoint state. + */ + frameDao.updateFrameState(frame, FrameState.CHECKPOINT); + jdbcTemplate.update( + "UPDATE frame SET ts_started=current_timestamp, ts_stopped=current_timestamp + INTERVAL '20' second WHERE pk_frame=?", + frame.getFrameId()); + + assertTrue(frameDao.updateFrameCheckpointState(frame, CheckpointState.COMPLETE)); + Map result = jdbcTemplate.queryForMap( + "SELECT int_checkpoint_count FROM frame WHERE pk_frame=?", frame.getFrameId()); + + Integer checkPointCount = (Integer) result.get("int_checkpoint_count"); + assertEquals(1, checkPointCount.intValue()); + } + + @Test + @Transactional + @Rollback(true) + public void testIsFrameComplete() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + frameDao.updateFrameState(frame, FrameState.EATEN); + assertTrue(frameDao.isFrameComplete(frame)); + + frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + frameDao.updateFrameState(frame, FrameState.SUCCEEDED); + assertTrue(frameDao.isFrameComplete(frame)); + + frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + frameDao.updateFrameState(frame, FrameState.WAITING); + assertFalse(frameDao.isFrameComplete(frame)); + } + + private FrameStateDisplayOverride createFrameStateDisplayOverride(FrameState state, String text, + int red, int green, int blue) { + FrameStateDisplayOverride override = FrameStateDisplayOverride.newBuilder().setState(state) + .setText(text).setColor(FrameStateDisplayOverride.RGB.newBuilder().setRed(red) + .setGreen(green).setBlue(blue).build()) + .build(); + + return override; + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateFrameOverride() { + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + // Create override + FrameStateDisplayOverride override = + createFrameStateDisplayOverride(FrameState.SUCCEEDED, "FINISHED", 200, 200, 123); + frameDao.setFrameStateDisplayOverride(frame.getFrameId(), override); + FrameStateDisplayOverrideSeq results = + frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); + assertEquals(1, results.getOverridesCount()); + assertEquals(override, results.getOverridesList().get(0)); + + // Try to update override + FrameStateDisplayOverride overrideUpdate = + createFrameStateDisplayOverride(FrameState.SUCCEEDED, "DONE", 100, 100, 100); + frameDao.updateFrameStateDisplayOverride(frame.getFrameId(), overrideUpdate); + results = frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); + assertEquals(1, results.getOverridesCount()); + assertEquals(overrideUpdate, results.getOverridesList().get(0)); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/GroupDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/GroupDaoTests.java index 8db486696..c0c593660 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/GroupDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/GroupDaoTests.java @@ -50,399 +50,417 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class GroupDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - GroupDao groupDao; - - @Resource - ShowDao showDao; - - @Resource - DepartmentDao departmentDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Before - public void before() { - jobLauncher.testMode = true; - } - - public ShowInterface getShow() { - return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); - } - - public JobDetail launchJob() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - public GroupDetail createGroup() { - GroupDetail group = new GroupDetail(); - group.name = "Shit"; - group.parentId = groupDao.getRootGroupId(getShow()); - group.showId = getShow().getId(); - group.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(group, groupDao.getRootGroupDetail(getShow())); - return group; - } - - public GroupDetail createSubGroup(GroupDetail parent) { - GroupDetail group = new GroupDetail(); - group.name = "SubShit"; - group.parentId = parent.id; - group.showId = getShow().getId(); - group.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(group, groupDao.getGroup(parent.id)); - return group; - } - - @Test - @Transactional - @Rollback(true) - public void testGetGroup() { - GroupDetail group = createGroup(); - GroupInterface g = groupDao.getGroup(group.id); - assertEquals(group.id, g.getGroupId()); - assertEquals(group.id, g.getId()); - assertEquals(group.name, g.getName()); - assertEquals(group.showId, g.getShowId()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetGroups() { - GroupDetail group = createGroup(); - List l = new ArrayList(); - l.add(group.id); - List g = groupDao.getGroups(l); - assertEquals(1, g.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetRootGroupId() { - groupDao.getRootGroupId(getShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertGroup() { - GroupDetail group = createGroup(); - assertFalse(group.isNew()); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertGroupAlternateMethod() { - GroupDetail group = new GroupDetail(); - group.name = "Shit"; - group.parentId = groupDao.getRootGroupId(getShow()); - group.showId = getShow().getId(); - group.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(group); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteGroup() { - // Can't delete groups yet, will fail - GroupDetail group = createGroup(); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM folder WHERE pk_folder=?", Integer.class, group.getId())); - - groupDao.deleteGroup(group); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM folder WHERE pk_folder=?", Integer.class, group.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateGroupParent() { - GroupDetail group = createGroup(); - GroupDetail subgroup = createSubGroup(group); - groupDao.updateGroupParent(subgroup, - groupDao.getGroupDetail(groupDao.getRootGroupId(getShow()))); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_level FROM folder_level WHERE pk_folder=?", Integer.class, subgroup.getId())); - - groupDao.updateGroupParent(subgroup, group); - - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT int_level FROM folder_level WHERE pk_folder=?", Integer.class, subgroup.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateDefaultJobMaxCores() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(-1), - jdbcTemplate.queryForObject("SELECT int_job_max_cores FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateDefaultJobMaxCores(group, 100); - assertEquals(Integer.valueOf(100), - jdbcTemplate.queryForObject("SELECT int_job_max_cores FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateDefaultJobMaxCores(group, -1); - assertEquals(Integer.valueOf(-1), - jdbcTemplate.queryForObject("SELECT int_job_max_cores FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateDefaultJobMinCores() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(-1), - jdbcTemplate.queryForObject("SELECT int_job_min_cores FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateDefaultJobMinCores(group, 100); - assertEquals(Integer.valueOf(100), - jdbcTemplate.queryForObject("SELECT int_job_min_cores FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateDefaultJobMaxGpus() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(-1), - jdbcTemplate.queryForObject("SELECT int_job_max_gpus FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateDefaultJobMaxGpus(group, 100); - assertEquals(Integer.valueOf(100), - jdbcTemplate.queryForObject("SELECT int_job_max_gpus FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateDefaultJobMaxGpus(group, -1); - assertEquals(Integer.valueOf(-1), - jdbcTemplate.queryForObject("SELECT int_job_max_gpus FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateDefaultJobMinGpus() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(-1), - jdbcTemplate.queryForObject("SELECT int_job_min_gpus FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateDefaultJobMinGpus(group, 100); - assertEquals(Integer.valueOf(100), - jdbcTemplate.queryForObject("SELECT int_job_min_gpus FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateDefaultJobPriority() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(-1), - jdbcTemplate.queryForObject("SELECT int_job_priority FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateDefaultJobPriority(group, 1000); - assertEquals(Integer.valueOf(1000), - jdbcTemplate.queryForObject("SELECT int_job_priority FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateMinCores() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(0), - jdbcTemplate.queryForObject("SELECT int_min_cores FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateMinCores(group, 10); - assertEquals(Integer.valueOf(10), - jdbcTemplate.queryForObject("SELECT int_min_cores FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateMaxCores() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(-1), - jdbcTemplate.queryForObject("SELECT int_max_cores FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateMaxCores(group, 100); - assertEquals(Integer.valueOf(100), - jdbcTemplate.queryForObject("SELECT int_max_cores FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateMaxCores(group, -5); - assertEquals(Integer.valueOf(-1), - jdbcTemplate.queryForObject("SELECT int_max_cores FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateMinGpus() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(0), - jdbcTemplate.queryForObject("SELECT int_min_gpus FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateMinGpus(group, 10); - assertEquals(Integer.valueOf(10), - jdbcTemplate.queryForObject("SELECT int_min_gpus FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateMaxGpus() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(-1), - jdbcTemplate.queryForObject("SELECT int_max_gpus FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateMaxGpus(group, 100); - assertEquals(Integer.valueOf(100), - jdbcTemplate.queryForObject("SELECT int_max_gpus FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateMaxGpus(group, -5); - assertEquals(Integer.valueOf(-1), - jdbcTemplate.queryForObject("SELECT int_max_gpus FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - } - - @Test - @Transactional - @Rollback(true) - public void testIsManaged() { - GroupDetail group = createGroup(); - assertEquals(false, groupDao.isManaged(group)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateName() { - GroupDetail group = createGroup(); - groupDao.updateName(group, "NewName"); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateDepartment() { - GroupDetail group = createGroup(); - groupDao.updateDepartment(group, departmentDao.findDepartment("Lighting")); - } - - @Test - @Transactional - @Rollback(true) - public void testGetGroupDetail() { - GroupDetail group = createGroup(); - GroupDetail group2 = groupDao.getGroupDetail(group.id); - } - - @Test - @Transactional - @Rollback(true) - public void testGetChildrenRecursive() { - boolean is_test2 = false; - boolean is_test3 = false; - - GroupDetail g1 = new GroupDetail(); - g1.name = "Test1"; - g1.showId = getShow().getId(); - g1.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(g1, groupDao.getRootGroupDetail(getShow())); - - GroupDetail g2 = new GroupDetail(); - g2.name = "Test2"; - g2.showId = getShow().getId(); - g2.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(g2, groupDao.getRootGroupDetail(getShow())); - - for (GroupInterface g : groupDao - .getChildrenRecursive(groupDao.getGroup("A0000000-0000-0000-0000-000000000000"))) { - if (g.getName().equals("Test1")) { - is_test2 = true; - } - if (g.getName().equals("Test2")) { - is_test3 = true; - } + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + GroupDao groupDao; + + @Resource + ShowDao showDao; + + @Resource + DepartmentDao departmentDao; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Before + public void before() { + jobLauncher.testMode = true; + } + + public ShowInterface getShow() { + return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); + } + + public JobDetail launchJob() { + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + } + + public GroupDetail createGroup() { + GroupDetail group = new GroupDetail(); + group.name = "Shit"; + group.parentId = groupDao.getRootGroupId(getShow()); + group.showId = getShow().getId(); + group.deptId = departmentDao.getDefaultDepartment().getId(); + groupDao.insertGroup(group, groupDao.getRootGroupDetail(getShow())); + return group; + } + + public GroupDetail createSubGroup(GroupDetail parent) { + GroupDetail group = new GroupDetail(); + group.name = "SubShit"; + group.parentId = parent.id; + group.showId = getShow().getId(); + group.deptId = departmentDao.getDefaultDepartment().getId(); + groupDao.insertGroup(group, groupDao.getGroup(parent.id)); + return group; + } + + @Test + @Transactional + @Rollback(true) + public void testGetGroup() { + GroupDetail group = createGroup(); + GroupInterface g = groupDao.getGroup(group.id); + assertEquals(group.id, g.getGroupId()); + assertEquals(group.id, g.getId()); + assertEquals(group.name, g.getName()); + assertEquals(group.showId, g.getShowId()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetGroups() { + GroupDetail group = createGroup(); + List l = new ArrayList(); + l.add(group.id); + List g = groupDao.getGroups(l); + assertEquals(1, g.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetRootGroupId() { + groupDao.getRootGroupId(getShow()); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertGroup() { + GroupDetail group = createGroup(); + assertFalse(group.isNew()); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertGroupAlternateMethod() { + GroupDetail group = new GroupDetail(); + group.name = "Shit"; + group.parentId = groupDao.getRootGroupId(getShow()); + group.showId = getShow().getId(); + group.deptId = departmentDao.getDefaultDepartment().getId(); + groupDao.insertGroup(group); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteGroup() { + // Can't delete groups yet, will fail + GroupDetail group = createGroup(); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM folder WHERE pk_folder=?", Integer.class, group.getId())); + + groupDao.deleteGroup(group); + + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM folder WHERE pk_folder=?", Integer.class, group.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateGroupParent() { + GroupDetail group = createGroup(); + GroupDetail subgroup = createSubGroup(group); + groupDao.updateGroupParent(subgroup, + groupDao.getGroupDetail(groupDao.getRootGroupId(getShow()))); + + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject("SELECT int_level FROM folder_level WHERE pk_folder=?", + Integer.class, subgroup.getId())); + + groupDao.updateGroupParent(subgroup, group); + + assertEquals(Integer.valueOf(2), + jdbcTemplate.queryForObject("SELECT int_level FROM folder_level WHERE pk_folder=?", + Integer.class, subgroup.getId())); } - assertTrue(is_test2); - assertTrue(is_test3); - } - - @Test - @Transactional - @Rollback(true) - public void testGetChildren() { - boolean is_testuserA = false; - boolean is_testuserB = false; - - GroupDetail g1 = new GroupDetail(); - g1.name = "testuserA"; - g1.showId = getShow().getId(); - g1.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(g1, groupDao.getRootGroupDetail(getShow())); - - GroupDetail g2 = new GroupDetail(); - g2.name = "testuserB"; - g2.showId = getShow().getId(); - g2.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(g2, groupDao.getRootGroupDetail(getShow())); - - List groups = - groupDao.getChildren(groupDao.getGroup("A0000000-0000-0000-0000-000000000000")); - for (GroupInterface g : groups) { - if (g.getName().equals("testuserA")) { - is_testuserA = true; - } - if (g.getName().equals("testuserB")) { - is_testuserB = true; - } + + @Test + @Transactional + @Rollback(true) + public void testUpdateDefaultJobMaxCores() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject( + "SELECT int_job_max_cores FROM folder WHERE pk_folder=?", Integer.class, + group.getGroupId())); + groupDao.updateDefaultJobMaxCores(group, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject( + "SELECT int_job_max_cores FROM folder WHERE pk_folder=?", Integer.class, + group.getGroupId())); + groupDao.updateDefaultJobMaxCores(group, -1); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject( + "SELECT int_job_max_cores FROM folder WHERE pk_folder=?", Integer.class, + group.getGroupId())); + } - assertTrue(is_testuserA); - assertTrue(is_testuserB); - } - @Test - @Transactional - @Rollback(true) - public void testIsOverMinCores() { + @Test + @Transactional + @Rollback(true) + public void testUpdateDefaultJobMinCores() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject( + "SELECT int_job_min_cores FROM folder WHERE pk_folder=?", Integer.class, + group.getGroupId())); + groupDao.updateDefaultJobMinCores(group, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject( + "SELECT int_job_min_cores FROM folder WHERE pk_folder=?", Integer.class, + group.getGroupId())); + } - JobDetail job = launchJob(); - assertFalse(groupDao.isOverMinCores(job)); + @Test + @Transactional + @Rollback(true) + public void testUpdateDefaultJobMaxGpus() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject("SELECT int_job_max_gpus FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateDefaultJobMaxGpus(group, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject("SELECT int_job_max_gpus FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateDefaultJobMaxGpus(group, -1); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject("SELECT int_job_max_gpus FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); - String groupid = jdbcTemplate.queryForObject("SELECT pk_folder FROM job WHERE pk_job=?", - String.class, job.getJobId()); + } - // Now update some values so it returns true. - jdbcTemplate.update( - "UPDATE folder_resource SET int_cores = int_min_cores + 1 WHERE pk_folder=?", groupid); + @Test + @Transactional + @Rollback(true) + public void testUpdateDefaultJobMinGpus() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject("SELECT int_job_min_gpus FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateDefaultJobMinGpus(group, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject("SELECT int_job_min_gpus FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateDefaultJobPriority() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject("SELECT int_job_priority FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateDefaultJobPriority(group, 1000); + assertEquals(Integer.valueOf(1000), + jdbcTemplate.queryForObject("SELECT int_job_priority FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + } - assertTrue(groupDao.isOverMinCores(job)); - } + @Test + @Transactional + @Rollback(true) + public void testUpdateMinCores() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject( + "SELECT int_min_cores FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateMinCores(group, 10); + assertEquals(Integer.valueOf(10), + jdbcTemplate.queryForObject( + "SELECT int_min_cores FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateMaxCores() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject( + "SELECT int_max_cores FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateMaxCores(group, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject( + "SELECT int_max_cores FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateMaxCores(group, -5); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject( + "SELECT int_max_cores FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateMinGpus() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject( + "SELECT int_min_gpus FROM folder_resource WHERE pk_folder=?", Integer.class, + group.getGroupId())); + groupDao.updateMinGpus(group, 10); + assertEquals(Integer.valueOf(10), + jdbcTemplate.queryForObject( + "SELECT int_min_gpus FROM folder_resource WHERE pk_folder=?", Integer.class, + group.getGroupId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateMaxGpus() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject( + "SELECT int_max_gpus FROM folder_resource WHERE pk_folder=?", Integer.class, + group.getGroupId())); + groupDao.updateMaxGpus(group, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject( + "SELECT int_max_gpus FROM folder_resource WHERE pk_folder=?", Integer.class, + group.getGroupId())); + groupDao.updateMaxGpus(group, -5); + assertEquals(Integer.valueOf(-1), + jdbcTemplate.queryForObject( + "SELECT int_max_gpus FROM folder_resource WHERE pk_folder=?", Integer.class, + group.getGroupId())); + } + + @Test + @Transactional + @Rollback(true) + public void testIsManaged() { + GroupDetail group = createGroup(); + assertEquals(false, groupDao.isManaged(group)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateName() { + GroupDetail group = createGroup(); + groupDao.updateName(group, "NewName"); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateDepartment() { + GroupDetail group = createGroup(); + groupDao.updateDepartment(group, departmentDao.findDepartment("Lighting")); + } + + @Test + @Transactional + @Rollback(true) + public void testGetGroupDetail() { + GroupDetail group = createGroup(); + GroupDetail group2 = groupDao.getGroupDetail(group.id); + } + + @Test + @Transactional + @Rollback(true) + public void testGetChildrenRecursive() { + boolean is_test2 = false; + boolean is_test3 = false; + + GroupDetail g1 = new GroupDetail(); + g1.name = "Test1"; + g1.showId = getShow().getId(); + g1.deptId = departmentDao.getDefaultDepartment().getId(); + groupDao.insertGroup(g1, groupDao.getRootGroupDetail(getShow())); + + GroupDetail g2 = new GroupDetail(); + g2.name = "Test2"; + g2.showId = getShow().getId(); + g2.deptId = departmentDao.getDefaultDepartment().getId(); + groupDao.insertGroup(g2, groupDao.getRootGroupDetail(getShow())); + + for (GroupInterface g : groupDao + .getChildrenRecursive(groupDao.getGroup("A0000000-0000-0000-0000-000000000000"))) { + if (g.getName().equals("Test1")) { + is_test2 = true; + } + if (g.getName().equals("Test2")) { + is_test3 = true; + } + } + assertTrue(is_test2); + assertTrue(is_test3); + } + + @Test + @Transactional + @Rollback(true) + public void testGetChildren() { + boolean is_testuserA = false; + boolean is_testuserB = false; + + GroupDetail g1 = new GroupDetail(); + g1.name = "testuserA"; + g1.showId = getShow().getId(); + g1.deptId = departmentDao.getDefaultDepartment().getId(); + groupDao.insertGroup(g1, groupDao.getRootGroupDetail(getShow())); + + GroupDetail g2 = new GroupDetail(); + g2.name = "testuserB"; + g2.showId = getShow().getId(); + g2.deptId = departmentDao.getDefaultDepartment().getId(); + groupDao.insertGroup(g2, groupDao.getRootGroupDetail(getShow())); + + List groups = + groupDao.getChildren(groupDao.getGroup("A0000000-0000-0000-0000-000000000000")); + for (GroupInterface g : groups) { + if (g.getName().equals("testuserA")) { + is_testuserA = true; + } + if (g.getName().equals("testuserB")) { + is_testuserB = true; + } + } + assertTrue(is_testuserA); + assertTrue(is_testuserB); + } + + @Test + @Transactional + @Rollback(true) + public void testIsOverMinCores() { + + JobDetail job = launchJob(); + assertFalse(groupDao.isOverMinCores(job)); + + String groupid = jdbcTemplate.queryForObject("SELECT pk_folder FROM job WHERE pk_job=?", + String.class, job.getJobId()); + + // Now update some values so it returns true. + jdbcTemplate.update( + "UPDATE folder_resource SET int_cores = int_min_cores + 1 WHERE pk_folder=?", + groupid); + + assertTrue(groupDao.isOverMinCores(job)); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HistoricalDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HistoricalDaoTests.java index 30b8f1406..f7d296f3b 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HistoricalDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HistoricalDaoTests.java @@ -40,36 +40,36 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class HistoricalDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; - @Resource - private JobManager jobManager; + @Resource + private JobManager jobManager; - @Resource - private JobLauncher jobLauncher; + @Resource + private JobLauncher jobLauncher; - @Resource - private HistoricalDao historicalDao; + @Resource + private HistoricalDao historicalDao; - @Test - @Transactional - @Rollback(true) - public void testGetFinishedJobs() { - historicalDao.getFinishedJobs(24); - } + @Test + @Transactional + @Rollback(true) + public void testGetFinishedJobs() { + historicalDao.getFinishedJobs(24); + } - @Test - @Transactional - @Rollback(true) - public void testTransferJob() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail j = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - jobManager.shutdownJob(j); - historicalDao.transferJob(j); + @Test + @Transactional + @Rollback(true) + public void testTransferJob() { + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail j = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + jobManager.shutdownJob(j); + historicalDao.transferJob(j); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM job_history WHERE pk_job=?", Integer.class, j.getJobId())); - } + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM job_history WHERE pk_job=?", Integer.class, j.getJobId())); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HostDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HostDaoTests.java index 5dca888c3..77f0b2799 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HostDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HostDaoTests.java @@ -57,490 +57,495 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class HostDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - private static final String TEST_HOST = "beta"; - - @Resource - protected AllocationDao allocationDao; - - @Resource - protected HostDao hostDao; - - @Resource - protected HostManager hostManager; - - @Resource - protected FacilityDao facilityDao; - - public HostDaoTests() {} - - // Hardcoded value of dispatcher.memory.mem_reserved_system - // to avoid having to read opencue.properties on a test setting - private final long MEM_RESERVED_SYSTEM = 524288; - - public static RenderHost buildRenderHost(String name) { - RenderHost host = RenderHost.newBuilder().setName(name).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(15290520).setFreeSwap((int) CueUtil.MB512).setLoad(1) - .setNimbyEnabled(false).setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16) - .setTotalSwap((int) CueUtil.GB2).setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(400) - .addAllTags(ImmutableList.of("linux", "64bit")).setState(HardwareState.UP) - .setFacility("spi").setFreeGpuMem((int) CueUtil.MB512).setTotalGpuMem((int) CueUtil.MB512) - .build(); - - return host; - } - - @Test - public void testInit() {} - - @BeforeTransaction - public void clear() { - jdbcTemplate.update("DELETE FROM host WHERE str_name=?", TEST_HOST); - } - - @AfterTransaction - public void destroy() { - jdbcTemplate.update("DELETE FROM host WHERE str_name=?", TEST_HOST); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHost() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), - false); - - assertEquals(Long.valueOf(CueUtil.GB16 - this.MEM_RESERVED_SYSTEM), jdbcTemplate - .queryForObject("SELECT int_mem FROM host WHERE str_name=?", Long.class, TEST_HOST)); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostFQDN1() { - String TEST_HOST_NEW = "ice-ns1.yvr"; - String FQDN_HOST = TEST_HOST_NEW + ".spimageworks.com"; - hostDao.insertRenderHost(buildRenderHost(FQDN_HOST), hostManager.getDefaultAllocationDetail(), - true); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); - assertEquals(TEST_HOST_NEW, hostDetail.name); - - HostInterface host = hostDao.findHost(FQDN_HOST); - HostEntity hostDetail2 = hostDao.getHostDetail(host); - assertEquals(TEST_HOST_NEW, hostDetail2.name); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostFQDN2() { - String TEST_HOST_NEW = "compile21"; - String FQDN_HOST = TEST_HOST_NEW + ".spimageworks.com"; - hostDao.insertRenderHost(buildRenderHost(FQDN_HOST), hostManager.getDefaultAllocationDetail(), - false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); - assertEquals(TEST_HOST_NEW, hostDetail.name); - - HostInterface host = hostDao.findHost(FQDN_HOST); - HostEntity hostDetail2 = hostDao.getHostDetail(host); - assertEquals(TEST_HOST_NEW, hostDetail2.name); - - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostFQDN3() { - String TEST_HOST_NEW = "hostname"; - String FQDN_HOST = TEST_HOST_NEW + ".fake.co.uk"; - hostDao.insertRenderHost(buildRenderHost(FQDN_HOST), hostManager.getDefaultAllocationDetail(), - false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); - assertEquals(TEST_HOST_NEW, hostDetail.name); - - HostInterface host = hostDao.findHost(FQDN_HOST); - HostEntity hostDetail2 = hostDao.getHostDetail(host); - assertEquals(TEST_HOST_NEW, hostDetail2.name); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostFQDN4() { - String TEST_HOST_NEW = "10.0.1.18"; - hostDao.insertRenderHost(buildRenderHost(TEST_HOST_NEW), - hostManager.getDefaultAllocationDetail(), false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); - assertEquals(TEST_HOST_NEW, hostDetail.name); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostIPv61() { - String TEST_HOST_NEW = "::1"; - hostDao.insertRenderHost(buildRenderHost(TEST_HOST_NEW), - hostManager.getDefaultAllocationDetail(), false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); - assertEquals(TEST_HOST_NEW, hostDetail.name); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostIPv62() { - String TEST_HOST_NEW = "ABCD:ABCD:ABCD:ABCD:ABCD:ABCD:ABCD:ABCD"; - hostDao.insertRenderHost(buildRenderHost(TEST_HOST_NEW), - hostManager.getDefaultAllocationDetail(), false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); - assertEquals(TEST_HOST_NEW, hostDetail.name); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostIPv63() { - String TEST_HOST_NEW = "ABCD:ABCD:ABCD:ABCD:ABCD:ABCD:192.168.100.180"; - hostDao.insertRenderHost(buildRenderHost(TEST_HOST_NEW), - hostManager.getDefaultAllocationDetail(), false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); - assertEquals(TEST_HOST_NEW, hostDetail.name); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostAlternateOS() { - - RenderHost host = - buildRenderHost(TEST_HOST).toBuilder().putAttributes("SP_OS", "spinux1").build(); - - hostDao.insertRenderHost(host, hostManager.getDefaultAllocationDetail(), false); - - assertEquals("spinux1", - jdbcTemplate.queryForObject("SELECT str_os FROM host_stat, host " - + "WHERE host.pk_host = host_stat.pk_host " + "AND host.str_name=?", String.class, - TEST_HOST), - "spinux1"); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostDesktop() { - - RenderHost host = buildRenderHost(TEST_HOST); - hostDao.insertRenderHost(host, hostManager.getDefaultAllocationDetail(), false); - - assertEquals(Long.valueOf(CueUtil.GB16 - this.MEM_RESERVED_SYSTEM), jdbcTemplate - .queryForObject("SELECT int_mem FROM host WHERE str_name=?", Long.class, TEST_HOST)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateThreadMode() { - - RenderHost host = buildRenderHost(TEST_HOST); - host.toBuilder().setNimbyEnabled(true).build(); - hostDao.insertRenderHost(host, hostManager.getDefaultAllocationDetail(), false); - - HostEntity d = hostDao.findHostDetail(TEST_HOST); - hostDao.updateThreadMode(d, ThreadMode.AUTO); - - assertEquals(Integer.valueOf(ThreadMode.AUTO_VALUE), jdbcTemplate - .queryForObject("SELECT int_thread_mode FROM host WHERE pk_host=?", Integer.class, d.id)); - - hostDao.updateThreadMode(d, ThreadMode.ALL); - - assertEquals(Integer.valueOf(ThreadMode.ALL_VALUE), jdbcTemplate - .queryForObject("SELECT int_thread_mode FROM host WHERE pk_host=?", Integer.class, d.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetHostDetail() { - - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), - false); - - HostEntity host = hostDao.findHostDetail(TEST_HOST); - hostDao.getHostDetail(host); - hostDao.getHostDetail(host.getHostId()); - } - - @Test - @Transactional - @Rollback(true) - public void testIsHostLocked() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), - false); - - HostEntity host = hostDao.findHostDetail(TEST_HOST); - assertEquals(hostDao.isHostLocked(host), false); - - hostDao.updateHostLock(host, LockState.LOCKED, new Source("TEST")); - assertEquals(hostDao.isHostLocked(host), true); - } - - @Test - @Transactional - @Rollback(true) - public void testIsHostUp() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), - false); - - assertTrue(hostDao.isHostUp(hostDao.findHostDetail(TEST_HOST))); - - hostDao.updateHostState(hostDao.findHostDetail(TEST_HOST), HardwareState.DOWN); - assertFalse(hostDao.isHostUp(hostDao.findHostDetail(TEST_HOST))); - } - - @Test - @Transactional - @Rollback(true) - public void testHostExists() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), - false); - - assertEquals(hostDao.hostExists(TEST_HOST), true); - assertEquals(hostDao.hostExists("frickjack"), false); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteHost() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), - false); - - HostEntity host = hostDao.findHostDetail(TEST_HOST); - assertEquals(hostDao.hostExists(TEST_HOST), true); - hostDao.deleteHost(host); - assertEquals(hostDao.hostExists(TEST_HOST), false); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteDownHosts() { - for (int i = 0; i < 3; i++) { - String name = TEST_HOST + i; - hostDao.insertRenderHost(buildRenderHost(name), hostManager.getDefaultAllocationDetail(), - false); - if (i != 1) { - HostEntity host = hostDao.findHostDetail(name); - assertEquals(name, host.name); - hostDao.updateHostState(host, HardwareState.DOWN); - } + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + private static final String TEST_HOST = "beta"; + + @Resource + protected AllocationDao allocationDao; + + @Resource + protected HostDao hostDao; + + @Resource + protected HostManager hostManager; + + @Resource + protected FacilityDao facilityDao; + + public HostDaoTests() {} + + // Hardcoded value of dispatcher.memory.mem_reserved_system + // to avoid having to read opencue.properties on a test setting + private final long MEM_RESERVED_SYSTEM = 524288; + + public static RenderHost buildRenderHost(String name) { + RenderHost host = RenderHost.newBuilder().setName(name).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(15290520).setFreeSwap((int) CueUtil.MB512) + .setLoad(1).setNimbyEnabled(false).setTotalMcp(CueUtil.GB4) + .setTotalMem((int) CueUtil.GB16).setTotalSwap((int) CueUtil.GB2) + .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(400) + .addAllTags(ImmutableList.of("linux", "64bit")).setState(HardwareState.UP) + .setFacility("spi").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); + + return host; + } + + @Test + public void testInit() {} + + @BeforeTransaction + public void clear() { + jdbcTemplate.update("DELETE FROM host WHERE str_name=?", TEST_HOST); } - hostDao.deleteDownHosts(); + @AfterTransaction + public void destroy() { + jdbcTemplate.update("DELETE FROM host WHERE str_name=?", TEST_HOST); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHost() { + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), + hostManager.getDefaultAllocationDetail(), false); + + assertEquals(Long.valueOf(CueUtil.GB16 - this.MEM_RESERVED_SYSTEM), + jdbcTemplate.queryForObject("SELECT int_mem FROM host WHERE str_name=?", Long.class, + TEST_HOST)); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHostFQDN1() { + String TEST_HOST_NEW = "ice-ns1.yvr"; + String FQDN_HOST = TEST_HOST_NEW + ".spimageworks.com"; + hostDao.insertRenderHost(buildRenderHost(FQDN_HOST), + hostManager.getDefaultAllocationDetail(), true); + + HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); + assertEquals(TEST_HOST_NEW, hostDetail.name); + + HostInterface host = hostDao.findHost(FQDN_HOST); + HostEntity hostDetail2 = hostDao.getHostDetail(host); + assertEquals(TEST_HOST_NEW, hostDetail2.name); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHostFQDN2() { + String TEST_HOST_NEW = "compile21"; + String FQDN_HOST = TEST_HOST_NEW + ".spimageworks.com"; + hostDao.insertRenderHost(buildRenderHost(FQDN_HOST), + hostManager.getDefaultAllocationDetail(), false); + + HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); + assertEquals(TEST_HOST_NEW, hostDetail.name); + + HostInterface host = hostDao.findHost(FQDN_HOST); + HostEntity hostDetail2 = hostDao.getHostDetail(host); + assertEquals(TEST_HOST_NEW, hostDetail2.name); + + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHostFQDN3() { + String TEST_HOST_NEW = "hostname"; + String FQDN_HOST = TEST_HOST_NEW + ".fake.co.uk"; + hostDao.insertRenderHost(buildRenderHost(FQDN_HOST), + hostManager.getDefaultAllocationDetail(), false); + + HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); + assertEquals(TEST_HOST_NEW, hostDetail.name); + + HostInterface host = hostDao.findHost(FQDN_HOST); + HostEntity hostDetail2 = hostDao.getHostDetail(host); + assertEquals(TEST_HOST_NEW, hostDetail2.name); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHostFQDN4() { + String TEST_HOST_NEW = "10.0.1.18"; + hostDao.insertRenderHost(buildRenderHost(TEST_HOST_NEW), + hostManager.getDefaultAllocationDetail(), false); + + HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); + assertEquals(TEST_HOST_NEW, hostDetail.name); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHostIPv61() { + String TEST_HOST_NEW = "::1"; + hostDao.insertRenderHost(buildRenderHost(TEST_HOST_NEW), + hostManager.getDefaultAllocationDetail(), false); + + HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); + assertEquals(TEST_HOST_NEW, hostDetail.name); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHostIPv62() { + String TEST_HOST_NEW = "ABCD:ABCD:ABCD:ABCD:ABCD:ABCD:ABCD:ABCD"; + hostDao.insertRenderHost(buildRenderHost(TEST_HOST_NEW), + hostManager.getDefaultAllocationDetail(), false); + + HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); + assertEquals(TEST_HOST_NEW, hostDetail.name); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHostIPv63() { + String TEST_HOST_NEW = "ABCD:ABCD:ABCD:ABCD:ABCD:ABCD:192.168.100.180"; + hostDao.insertRenderHost(buildRenderHost(TEST_HOST_NEW), + hostManager.getDefaultAllocationDetail(), false); + + HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); + assertEquals(TEST_HOST_NEW, hostDetail.name); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHostAlternateOS() { + + RenderHost host = + buildRenderHost(TEST_HOST).toBuilder().putAttributes("SP_OS", "spinux1").build(); + + hostDao.insertRenderHost(host, hostManager.getDefaultAllocationDetail(), false); + + assertEquals("spinux1", + jdbcTemplate.queryForObject( + "SELECT str_os FROM host_stat, host " + + "WHERE host.pk_host = host_stat.pk_host " + "AND host.str_name=?", + String.class, TEST_HOST), + "spinux1"); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertHostDesktop() { + + RenderHost host = buildRenderHost(TEST_HOST); + hostDao.insertRenderHost(host, hostManager.getDefaultAllocationDetail(), false); + + assertEquals(Long.valueOf(CueUtil.GB16 - this.MEM_RESERVED_SYSTEM), + jdbcTemplate.queryForObject("SELECT int_mem FROM host WHERE str_name=?", Long.class, + TEST_HOST)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateThreadMode() { + + RenderHost host = buildRenderHost(TEST_HOST); + host.toBuilder().setNimbyEnabled(true).build(); + hostDao.insertRenderHost(host, hostManager.getDefaultAllocationDetail(), false); + + HostEntity d = hostDao.findHostDetail(TEST_HOST); + hostDao.updateThreadMode(d, ThreadMode.AUTO); + + assertEquals(Integer.valueOf(ThreadMode.AUTO_VALUE), jdbcTemplate.queryForObject( + "SELECT int_thread_mode FROM host WHERE pk_host=?", Integer.class, d.id)); + + hostDao.updateThreadMode(d, ThreadMode.ALL); + + assertEquals(Integer.valueOf(ThreadMode.ALL_VALUE), jdbcTemplate.queryForObject( + "SELECT int_thread_mode FROM host WHERE pk_host=?", Integer.class, d.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetHostDetail() { + + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), + hostManager.getDefaultAllocationDetail(), false); + + HostEntity host = hostDao.findHostDetail(TEST_HOST); + hostDao.getHostDetail(host); + hostDao.getHostDetail(host.getHostId()); + } + + @Test + @Transactional + @Rollback(true) + public void testIsHostLocked() { + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), + hostManager.getDefaultAllocationDetail(), false); + + HostEntity host = hostDao.findHostDetail(TEST_HOST); + assertEquals(hostDao.isHostLocked(host), false); + + hostDao.updateHostLock(host, LockState.LOCKED, new Source("TEST")); + assertEquals(hostDao.isHostLocked(host), true); + } + + @Test + @Transactional + @Rollback(true) + public void testIsHostUp() { + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), + hostManager.getDefaultAllocationDetail(), false); + + assertTrue(hostDao.isHostUp(hostDao.findHostDetail(TEST_HOST))); + + hostDao.updateHostState(hostDao.findHostDetail(TEST_HOST), HardwareState.DOWN); + assertFalse(hostDao.isHostUp(hostDao.findHostDetail(TEST_HOST))); + } + + @Test + @Transactional + @Rollback(true) + public void testHostExists() { + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), + hostManager.getDefaultAllocationDetail(), false); + + assertEquals(hostDao.hostExists(TEST_HOST), true); + assertEquals(hostDao.hostExists("frickjack"), false); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteHost() { + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), + hostManager.getDefaultAllocationDetail(), false); + + HostEntity host = hostDao.findHostDetail(TEST_HOST); + assertEquals(hostDao.hostExists(TEST_HOST), true); + hostDao.deleteHost(host); + assertEquals(hostDao.hostExists(TEST_HOST), false); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteDownHosts() { + for (int i = 0; i < 3; i++) { + String name = TEST_HOST + i; + hostDao.insertRenderHost(buildRenderHost(name), + hostManager.getDefaultAllocationDetail(), false); + if (i != 1) { + HostEntity host = hostDao.findHostDetail(name); + assertEquals(name, host.name); + hostDao.updateHostState(host, HardwareState.DOWN); + } + } + + hostDao.deleteDownHosts(); + + for (int i = 0; i < 3; i++) { + String name = TEST_HOST + i; + assertEquals(hostDao.hostExists(name), i == 1); + } + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateHostRebootWhenIdle() { + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), + hostManager.getDefaultAllocationDetail(), false); + + HostEntity host = hostDao.findHostDetail(TEST_HOST); + assertFalse(jdbcTemplate.queryForObject("SELECT b_reboot_idle FROM host WHERE pk_host=?", + Boolean.class, host.getHostId())); + hostDao.updateHostRebootWhenIdle(host, true); + assertTrue(jdbcTemplate.queryForObject("SELECT b_reboot_idle FROM host WHERE pk_host=?", + Boolean.class, host.getHostId())); + } + + @Test + @Transactional + @Rollback(true) + public void updateHostStats() { + + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), + hostManager.getDefaultAllocationDetail(), false); + + DispatchHost dispatchHost = hostDao.findDispatchHost(TEST_HOST); + hostDao.updateHostStats(dispatchHost, CueUtil.GB8, CueUtil.GB8, CueUtil.GB8, CueUtil.GB8, + CueUtil.GB8, CueUtil.GB8, 1, 1, 100, new Timestamp(1247526000 * 1000l), "spinux1"); + + Map result = jdbcTemplate + .queryForMap("SELECT * FROM host_stat WHERE pk_host=?", dispatchHost.getHostId()); + + assertEquals(CueUtil.GB8, ((Long) (result.get("int_mem_total"))).longValue()); + assertEquals(CueUtil.GB8, ((Long) (result.get("int_mem_free"))).longValue()); + assertEquals(CueUtil.GB8, ((Long) (result.get("int_swap_total"))).longValue()); + assertEquals(CueUtil.GB8, ((Long) (result.get("int_swap_free"))).longValue()); + assertEquals(CueUtil.GB8, ((Long) (result.get("int_mcp_total"))).longValue()); + assertEquals(CueUtil.GB8, ((Long) (result.get("int_mcp_free"))).longValue()); + assertEquals(100, ((Long) (result.get("int_load"))).intValue()); + assertEquals(new Timestamp(1247526000 * 1000l), (Timestamp) result.get("ts_booted")); + + } + + @Test + @Transactional + @Rollback(true) + public void updateHostResources() { + + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), + hostManager.getDefaultAllocationDetail(), false); + + DispatchHost dispatchHost = hostDao.findDispatchHost(TEST_HOST); + HostReport report = HostReport.newBuilder().setHost(buildRenderHost(TEST_HOST).toBuilder() + .setCoresPerProc(1200).setNumProcs(2).setTotalMem((int) CueUtil.GB32)).build(); + hostDao.updateHostResources(dispatchHost, report); + + // Verify what the original values are + assertEquals(800, dispatchHost.cores); + assertEquals(800, dispatchHost.idleCores); + assertEquals(CueUtil.GB16 - this.MEM_RESERVED_SYSTEM, dispatchHost.idleMemory); + assertEquals(CueUtil.GB16 - this.MEM_RESERVED_SYSTEM, dispatchHost.memory); + + dispatchHost = hostDao.findDispatchHost(TEST_HOST); + + // Now verify they've changed. + assertEquals(2400, dispatchHost.cores); + assertEquals(2400, dispatchHost.idleCores); + assertEquals(CueUtil.GB32 - this.MEM_RESERVED_SYSTEM, dispatchHost.idleMemory); + assertEquals(CueUtil.GB32 - this.MEM_RESERVED_SYSTEM, dispatchHost.memory); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDispatchHost() { + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), + hostManager.getDefaultAllocationDetail(), false); + + HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST); + DispatchHost dispatchHost = hostDao.findDispatchHost(TEST_HOST); + + assertEquals(dispatchHost.name, TEST_HOST); + assertEquals(dispatchHost.allocationId, hostDetail.getAllocationId()); + assertEquals(dispatchHost.id, hostDetail.getHostId()); + assertEquals(dispatchHost.cores, hostDetail.cores); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateHostSetAllocation() { + + hostDao.insertRenderHost(buildRenderHost(TEST_HOST), + hostManager.getDefaultAllocationDetail(), false); + + HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST); + + hostDao.updateHostSetAllocation(hostDetail, hostManager.getDefaultAllocationDetail()); + + hostDetail = hostDao.findHostDetail(TEST_HOST); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateHostSetManualTags() { + DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); + + hostDao.tagHost(host, "frick", HostTagType.MANUAL); + hostDao.tagHost(host, "jack", HostTagType.MANUAL); + hostDao.recalcuateTags(host.id); + + String tag = jdbcTemplate.queryForObject("SELECT str_tags FROM host WHERE pk_host=?", + String.class, host.id); + assertEquals("unassigned beta 64bit frick jack linux", tag); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateHostSetOS() { + DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); + hostDao.updateHostOs(host, "foo"); + String tag = jdbcTemplate.queryForObject("SELECT str_os FROM host_stat WHERE pk_host=?", + String.class, host.id); + assertEquals("foo", tag); + } + + @Test + @Transactional + @Rollback(true) + public void testChangeTags() { + DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); + + String tag = jdbcTemplate.queryForObject("SELECT str_tags FROM host WHERE pk_host=?", + String.class, host.id); + assertEquals("unassigned beta 64bit linux", tag); + + hostDao.removeTag(host, "linux"); + hostDao.recalcuateTags(host.id); + + assertEquals("unassigned beta 64bit", jdbcTemplate.queryForObject( + "SELECT str_tags FROM host WHERE pk_host=?", String.class, host.id)); + + hostDao.tagHost(host, "32bit", HostTagType.MANUAL); + hostDao.recalcuateTags(host.id); + + assertEquals("unassigned beta 32bit 64bit", jdbcTemplate.queryForObject( + "SELECT str_tags FROM host WHERE pk_host=?", String.class, host.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetStrandedCoreUnits() { + DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); + + jdbcTemplate.update("UPDATE host SET int_mem_idle = ? WHERE pk_host = ?", CueUtil.GB, + host.getHostId()); + + assertEquals(host.idleCores, hostDao.getStrandedCoreUnits(host)); + + jdbcTemplate.update("UPDATE host SET int_mem_idle = ? WHERE pk_host = ?", CueUtil.GB2, + host.getHostId()); + + assertEquals(0, hostDao.getStrandedCoreUnits(host)); + + // Check to see if fractional cores is rounded to the lowest + // whole core properly. + jdbcTemplate.update( + "UPDATE host SET int_cores_idle=150, int_mem_idle = ? WHERE pk_host = ?", + CueUtil.GB, host.getHostId()); + + assertEquals(100, hostDao.getStrandedCoreUnits(host)); + } + + @Test + @Transactional + @Rollback(true) + public void testIsPreferShow() { + DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); + assertFalse(hostDao.isPreferShow(host)); + } - for (int i = 0; i < 3; i++) { - String name = TEST_HOST + i; - assertEquals(hostDao.hostExists(name), i == 1); + @Test + @Transactional + @Rollback(true) + public void testIsNimby() { + DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); + assertFalse(hostDao.isNimbyHost(host)); } - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateHostRebootWhenIdle() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), - false); - - HostEntity host = hostDao.findHostDetail(TEST_HOST); - assertFalse(jdbcTemplate.queryForObject("SELECT b_reboot_idle FROM host WHERE pk_host=?", - Boolean.class, host.getHostId())); - hostDao.updateHostRebootWhenIdle(host, true); - assertTrue(jdbcTemplate.queryForObject("SELECT b_reboot_idle FROM host WHERE pk_host=?", - Boolean.class, host.getHostId())); - } - - @Test - @Transactional - @Rollback(true) - public void updateHostStats() { - - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), - false); - - DispatchHost dispatchHost = hostDao.findDispatchHost(TEST_HOST); - hostDao.updateHostStats(dispatchHost, CueUtil.GB8, CueUtil.GB8, CueUtil.GB8, CueUtil.GB8, - CueUtil.GB8, CueUtil.GB8, 1, 1, 100, new Timestamp(1247526000 * 1000l), "spinux1"); - - Map result = jdbcTemplate.queryForMap("SELECT * FROM host_stat WHERE pk_host=?", - dispatchHost.getHostId()); - - assertEquals(CueUtil.GB8, ((Long) (result.get("int_mem_total"))).longValue()); - assertEquals(CueUtil.GB8, ((Long) (result.get("int_mem_free"))).longValue()); - assertEquals(CueUtil.GB8, ((Long) (result.get("int_swap_total"))).longValue()); - assertEquals(CueUtil.GB8, ((Long) (result.get("int_swap_free"))).longValue()); - assertEquals(CueUtil.GB8, ((Long) (result.get("int_mcp_total"))).longValue()); - assertEquals(CueUtil.GB8, ((Long) (result.get("int_mcp_free"))).longValue()); - assertEquals(100, ((Long) (result.get("int_load"))).intValue()); - assertEquals(new Timestamp(1247526000 * 1000l), (Timestamp) result.get("ts_booted")); - - } - - @Test - @Transactional - @Rollback(true) - public void updateHostResources() { - - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), - false); - - DispatchHost dispatchHost = hostDao.findDispatchHost(TEST_HOST); - HostReport report = HostReport.newBuilder().setHost(buildRenderHost(TEST_HOST).toBuilder() - .setCoresPerProc(1200).setNumProcs(2).setTotalMem((int) CueUtil.GB32)).build(); - hostDao.updateHostResources(dispatchHost, report); - - // Verify what the original values are - assertEquals(800, dispatchHost.cores); - assertEquals(800, dispatchHost.idleCores); - assertEquals(CueUtil.GB16 - this.MEM_RESERVED_SYSTEM, dispatchHost.idleMemory); - assertEquals(CueUtil.GB16 - this.MEM_RESERVED_SYSTEM, dispatchHost.memory); - - dispatchHost = hostDao.findDispatchHost(TEST_HOST); - - // Now verify they've changed. - assertEquals(2400, dispatchHost.cores); - assertEquals(2400, dispatchHost.idleCores); - assertEquals(CueUtil.GB32 - this.MEM_RESERVED_SYSTEM, dispatchHost.idleMemory); - assertEquals(CueUtil.GB32 - this.MEM_RESERVED_SYSTEM, dispatchHost.memory); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDispatchHost() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), - false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST); - DispatchHost dispatchHost = hostDao.findDispatchHost(TEST_HOST); - - assertEquals(dispatchHost.name, TEST_HOST); - assertEquals(dispatchHost.allocationId, hostDetail.getAllocationId()); - assertEquals(dispatchHost.id, hostDetail.getHostId()); - assertEquals(dispatchHost.cores, hostDetail.cores); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateHostSetAllocation() { - - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), hostManager.getDefaultAllocationDetail(), - false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST); - - hostDao.updateHostSetAllocation(hostDetail, hostManager.getDefaultAllocationDetail()); - - hostDetail = hostDao.findHostDetail(TEST_HOST); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateHostSetManualTags() { - DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); - - hostDao.tagHost(host, "frick", HostTagType.MANUAL); - hostDao.tagHost(host, "jack", HostTagType.MANUAL); - hostDao.recalcuateTags(host.id); - - String tag = jdbcTemplate.queryForObject("SELECT str_tags FROM host WHERE pk_host=?", - String.class, host.id); - assertEquals("unassigned beta 64bit frick jack linux", tag); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateHostSetOS() { - DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); - hostDao.updateHostOs(host, "foo"); - String tag = jdbcTemplate.queryForObject("SELECT str_os FROM host_stat WHERE pk_host=?", - String.class, host.id); - assertEquals("foo", tag); - } - - @Test - @Transactional - @Rollback(true) - public void testChangeTags() { - DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); - - String tag = jdbcTemplate.queryForObject("SELECT str_tags FROM host WHERE pk_host=?", - String.class, host.id); - assertEquals("unassigned beta 64bit linux", tag); - - hostDao.removeTag(host, "linux"); - hostDao.recalcuateTags(host.id); - - assertEquals("unassigned beta 64bit", jdbcTemplate - .queryForObject("SELECT str_tags FROM host WHERE pk_host=?", String.class, host.id)); - - hostDao.tagHost(host, "32bit", HostTagType.MANUAL); - hostDao.recalcuateTags(host.id); - - assertEquals("unassigned beta 32bit 64bit", jdbcTemplate - .queryForObject("SELECT str_tags FROM host WHERE pk_host=?", String.class, host.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetStrandedCoreUnits() { - DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); - - jdbcTemplate.update("UPDATE host SET int_mem_idle = ? WHERE pk_host = ?", CueUtil.GB, - host.getHostId()); - - assertEquals(host.idleCores, hostDao.getStrandedCoreUnits(host)); - - jdbcTemplate.update("UPDATE host SET int_mem_idle = ? WHERE pk_host = ?", CueUtil.GB2, - host.getHostId()); - - assertEquals(0, hostDao.getStrandedCoreUnits(host)); - - // Check to see if fractional cores is rounded to the lowest - // whole core properly. - jdbcTemplate.update("UPDATE host SET int_cores_idle=150, int_mem_idle = ? WHERE pk_host = ?", - CueUtil.GB, host.getHostId()); - - assertEquals(100, hostDao.getStrandedCoreUnits(host)); - } - - @Test - @Transactional - @Rollback(true) - public void testIsPreferShow() { - DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); - assertFalse(hostDao.isPreferShow(host)); - } - - @Test - @Transactional - @Rollback(true) - public void testIsNimby() { - DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); - assertFalse(hostDao.isNimbyHost(host)); - } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/JobDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/JobDaoTests.java index 9d8c9a89b..7c93a3b5a 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/JobDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/JobDaoTests.java @@ -64,633 +64,652 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class JobDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - JobDao jobDao; - - @Resource - PointDao pointDao; - - @Resource - ShowDao showDao; - - @Resource - TaskDao taskDao; - - @Resource - GroupDao groupDao; - - @Resource - FacilityDao facilityDao; - - @Resource - DepartmentDao departmentDao; - - @Resource - JobLogUtil jobLogUtil; - - private static String ROOT_FOLDER = "A0000000-0000-0000-0000-000000000000"; - private static String ROOT_SHOW = "00000000-0000-0000-0000-000000000000"; - private static String JOB_NAME = "pipe-dev.cue-testuser_shell_v1"; - - @Before - public void testMode() { - jobLauncher.testMode = true; - } - - public JobDetail buildJobDetail() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return spec.getJobs().get(0).detail; - } - - public JobDetail insertJob() { - JobDetail job = this.buildJobDetail(); - job.groupId = ROOT_FOLDER; - job.showId = ROOT_SHOW; - job.showName = "pipe"; - job.logDir = jobLogUtil.getJobLogPath(job); - job.deptId = departmentDao.getDefaultDepartment().getId(); - job.deptName = departmentDao.getDefaultDepartment().getName(); - job.facilityId = facilityDao.getDefaultFacility().getId(); - job.facilityName = facilityDao.getDefaultFacility().getName(); - job.state = JobState.PENDING; - job.maxCoreUnits = 10000; - jobDao.insertJob(job, jobLogUtil); - return job; - } - - public JobDetail launchJob() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDispatchJob() { - JobDetail job = insertJob(); - DispatchJob djob = jobDao.getDispatchJob(job.id); - assertEquals(djob.id, job.id); - } - - @Test - @Transactional - @Rollback(true) - public void testIsJobComplete() { - JobDetail job = insertJob(); - // returns true because there are no dispatchable frames - assertEquals(true, jobDao.isJobComplete(job)); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertJob() { - JobDetail job = this.buildJobDetail(); - job.groupId = ROOT_FOLDER; - job.showId = ROOT_SHOW; - job.logDir = jobLogUtil.getJobLogPath(job); - job.deptId = departmentDao.getDefaultDepartment().getId(); - job.facilityId = facilityDao.getDefaultFacility().getId(); - jobDao.insertJob(job, jobLogUtil); - assertNotNull(job.id); - } - - @Test - @Transactional - @Rollback(true) - public void testFindJob() { - JobDetail job = insertJob(); - JobInterface j1 = jobDao.findJob(job.name); - JobDetail j2 = jobDao.findJobDetail(job.name); - assertEquals(job.name, j1.getName()); - assertEquals(job.name, j2.getName()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJob() { - JobDetail job = insertJob(); - jobDao.getJobDetail(job.id); - jobDao.getJob(job.id); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJobDetail() { - JobDetail src = insertJob(); - JobDetail job = jobDao.getJobDetail(src.id); - assertEquals(job.id, src.id); - assertEquals(job.name, src.name); - assertEquals(job.showId, src.showId); - assertEquals(job.facilityId, src.facilityId); - assertEquals(job.groupId, src.groupId); - assertEquals(job.deptId, src.deptId); - assertEquals(job.state, src.state); - assertEquals(job.shot, src.shot); - assertEquals(job.user, src.user); - assertEquals(job.email, src.email); - assertEquals(job.uid, src.uid); - assertEquals(job.logDir, src.logDir); - assertEquals(job.isPaused, src.isPaused); - assertEquals(job.isAutoEat, src.isAutoEat); - assertEquals(job.totalFrames, src.totalFrames); - assertEquals(job.totalLayers, src.totalLayers); - assertEquals(job.startTime, src.startTime); - assertEquals(job.stopTime, src.stopTime); - assertEquals(job.maxRetries, src.maxRetries); - assertEquals(job.os, src.os); - assertEquals(job.facilityName, src.facilityName); - assertEquals(job.deptName, src.deptName); - assertEquals(job.showName, src.showName); - assertEquals(job.priority, src.priority); - assertEquals(job.minCoreUnits, src.minCoreUnits); - assertEquals(job.maxCoreUnits, src.maxCoreUnits); - assertEquals(job.isLocal, src.isLocal); - assertEquals(job.localHostName, src.localHostName); - assertEquals(job.localMaxCores, src.localMaxCores); - assertEquals(job.localMaxMemory, src.localMaxMemory); - assertEquals(job.localThreadNumber, src.localThreadNumber); - assertEquals(job.localMaxGpus, src.localMaxGpus); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJobsByTask() { - - PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - taskDao.insertTask(t); - jobDao.getJobs(t); - } - - @Test - @Transactional - @Rollback(true) - public void testJobExists() { - assertFalse(jobDao.exists(JOB_NAME)); - JobDetail job = insertJob(); - jdbcTemplate.update("UPDATE job SET str_state='PENDING' WHERE pk_job=?", job.id); - assertTrue(jobDao.exists(JOB_NAME)); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteJob() { - jobDao.deleteJob(insertJob()); - } - - @Test - @Transactional - @Rollback(true) - public void testActivateJob() { - jobDao.activateJob(insertJob(), JobState.PENDING); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobState() { - JobDetail job = insertJob(); - assertEquals(JobState.PENDING, job.state); - jobDao.updateState(job, JobState.FINISHED); - assertEquals(JobState.FINISHED.toString(), jdbcTemplate - .queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobFinished() { - jobDao.updateJobFinished(insertJob()); - } - - @Test - @Transactional - @Rollback(true) - public void testIsJobOverMinProc() { - JobDetail job = insertJob(); - assertFalse(jobDao.isOverMinCores(job)); - } - - @Test - @Transactional - @Rollback(true) - public void testHasPendingFrames() { - assertFalse(jobDao.hasPendingFrames(insertJob())); - } - - @Test - @Transactional - @Rollback(true) - public void testIsJobOverMaxProc() { - JobDetail job = insertJob(); - assertFalse(jobDao.isOverMaxCores(job)); - } - - @Test - @Transactional - @Rollback(true) - public void testIsJobAtMaxCores() { - JobDetail job = insertJob(); - assertFalse(jobDao.isAtMaxCores(job)); - - jdbcTemplate.update("UPDATE job_resource SET int_cores = int_max_cores WHERE pk_job=?", - job.getJobId()); - - assertTrue(jobDao.isAtMaxCores(job)); - - } - - @Test - @Transactional - @Rollback(true) - public void testIsOverMaxCores() { - JobDetail job = insertJob(); - jobDao.updateMaxCores(job, 500); - jdbcTemplate.update("UPDATE job_resource SET int_cores = 450 WHERE pk_job=?", job.getJobId()); - - assertFalse(jobDao.isOverMaxCores(job)); - assertFalse(jobDao.isOverMaxCores(job, 50)); - assertTrue(jobDao.isOverMaxCores(job, 100)); - - jdbcTemplate.update("UPDATE job_resource SET int_max_cores = 200 WHERE pk_job=?", - job.getJobId()); - assertTrue(jobDao.isOverMaxCores(job)); - } - - @Test(expected = org.springframework.jdbc.UncategorizedSQLException.class) - @Transactional - @Rollback(true) - public void testMaxCoreTrigger() { - JobDetail job = insertJob(); - int maxCores = jdbcTemplate.queryForObject( - "SELECT int_max_cores FROM job_resource WHERE pk_job=?", Integer.class, job.getJobId()); - - jdbcTemplate.update("UPDATE job_resource SET int_cores = ? WHERE pk_job=?", maxCores + 1, - job.getJobId()); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobPriority() { - JobDetail job = insertJob(); - jobDao.updatePriority(job, 199); - assertEquals(Integer.valueOf(199), jdbcTemplate.queryForObject( - "SELECT int_priority FROM job_resource WHERE pk_job=?", Integer.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobMinCores() { - JobDetail job = insertJob(); - jobDao.updateMinCores(job, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM job_resource WHERE pk_job=?", Integer.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobMaxCores() { - JobDetail job = insertJob(); - jobDao.updateMaxCores(job, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_max_cores FROM job_resource WHERE pk_job=?", Integer.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobMinCoresByGroup() { - JobDetail job = insertJob(); - GroupInterface g = groupDao.getGroup(job.groupId); - jobDao.updateMinCores(g, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM job_resource WHERE pk_job=?", Integer.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobMaxCoresByGroup() { - JobDetail job = insertJob(); - GroupInterface g = groupDao.getGroup(job.groupId); - jobDao.updateMaxCores(g, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_max_cores FROM job_resource WHERE pk_job=?", Integer.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobPriorityByGroup() { - JobDetail job = insertJob(); - GroupInterface g = groupDao.getGroup(job.groupId); - jobDao.updatePriority(g, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_priority FROM job_resource WHERE pk_job=?", Integer.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobMaxRss() { - long maxRss = 100000; - JobDetail job = insertJob(); - jobDao.updateMaxRSS(job, maxRss); - assertEquals(Long.valueOf(maxRss), jdbcTemplate.queryForObject( - "SELECT int_max_rss FROM job_mem WHERE pk_job=?", Long.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobPaused() { - JobDetail job = insertJob(); - - assertTrue(jdbcTemplate.queryForObject("SELECT b_paused FROM job WHERE pk_job=?", Boolean.class, - job.getJobId())); - - jobDao.updatePaused(job, false); - - assertFalse(jdbcTemplate.queryForObject("SELECT b_paused FROM job WHERE pk_job=?", - Boolean.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobAutoEat() { - JobDetail job = insertJob(); - - assertFalse(jdbcTemplate.queryForObject("SELECT b_autoeat FROM job WHERE pk_job=?", - Boolean.class, job.getJobId())); - - jobDao.updateAutoEat(job, true); - - assertTrue(jdbcTemplate.queryForObject("SELECT b_autoeat FROM job WHERE pk_job=?", - Boolean.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobMaxRetries() { - JobDetail job = insertJob(); - jobDao.updateMaxFrameRetries(job, 10); - assertEquals(Integer.valueOf(10), jdbcTemplate.queryForObject( - "SELECT int_max_retries FROM job WHERE pk_job=?", Integer.class, job.getJobId())); - } - - @Test(expected = IllegalArgumentException.class) - @Transactional - @Rollback(true) - public void testUpdateJobMaxRetriesTooLow() { - JobDetail job = insertJob(); - jobDao.updateMaxFrameRetries(job, -1); - } - - @Test(expected = IllegalArgumentException.class) - @Transactional - @Rollback(true) - public void testUpdateJobMaxRetriesTooHigh() { - JobDetail job = insertJob(); - jobDao.updateMaxFrameRetries(job, 100000); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFrameStateTotals() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - jobLauncher.launch(spec); - jobDao.getFrameStateTotals(spec.getJobs().get(0).detail); - } - - @Test - @Transactional - @Rollback(true) - public void testGetExecutionSummary() { - JobDetail job = launchJob(); - ExecutionSummary summary = jobDao.getExecutionSummary(job); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJobEnvironment() { - JobDetail job = launchJob(); - Map map = jobDao.getEnvironment(job); - for (Map.Entry e : map.entrySet()) { - assertEquals("VNP_VCR_SESSION", e.getKey()); - assertEquals("9000", e.getValue()); - } - } - - @Test - @Transactional - @Rollback(true) - public void testInsertJobEnvironment() { - JobDetail job = launchJob(); - jobDao.insertEnvironment(job, "CHAMBERS", "123"); - Map map = jobDao.getEnvironment(job); - assertEquals(2, map.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertJobEnvironmentMap() { - JobDetail job = launchJob(); - Map map = new HashMap(); - map.put("CHAMBERS", "123"); - map.put("OVER9000", "123"); - - jobDao.insertEnvironment(job, map); - Map env = jobDao.getEnvironment(job); - assertEquals(3, env.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindLastJob() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - jobLauncher.launch(spec); - - JobInterface job = spec.getJobs().get(0).detail; - jobDao.getFrameStateTotals(job); - jobManager.shutdownJob(job); - // this might fail - JobDetail oldJob = jobDao.findLastJob(job.getName()); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobLogPath() { - JobDetail job = launchJob(); - String newLogDir = "/path/to/nowhere"; - jobDao.updateLogPath(job, newLogDir); - assertEquals(newLogDir, jdbcTemplate - .queryForObject("SELECT str_log_dir FROM job WHERE pk_job=?", String.class, job.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobParent() { - JobDetail job = launchJob(); - - // Make a new test group. - GroupDetail root = groupDao.getRootGroupDetail(job); - - GroupDetail testGroup = new GroupDetail(); - testGroup.name = "testGroup"; - testGroup.deptId = departmentDao.getDefaultDepartment().getId(); - testGroup.showId = root.getShowId(); - - groupDao.insertGroup(testGroup, root); - - jdbcTemplate.update( - "UPDATE folder SET int_job_max_cores=-1, int_job_min_cores=-1, int_job_priority=-1 WHERE pk_folder=?", - testGroup.getId()); - - GroupDetail group = groupDao.getGroupDetail(testGroup.getId()); - jobDao.updateParent(job, group); - - assertEquals(-1, group.jobMaxCores); - assertEquals(-1, group.jobMinCores); - assertEquals(-1, group.jobPriority); - - assertEquals(group.getGroupId(), jdbcTemplate - .queryForObject("SELECT pk_folder FROM job WHERE pk_job=?", String.class, job.id)); - - assertEquals(group.getDepartmentId(), jdbcTemplate - .queryForObject("SELECT pk_dept FROM job WHERE pk_job=?", String.class, job.id)); - - group.jobMaxCores = 100; - group.jobMinCores = 100; - group.jobPriority = 100; - - jobDao.updateParent(job, group); - - assertEquals(Integer.valueOf(group.jobMaxCores), jdbcTemplate.queryForObject( - "SELECT int_max_cores FROM job_resource WHERE pk_job=?", Integer.class, job.id)); - - assertEquals(Integer.valueOf(group.jobMinCores), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM job_resource WHERE pk_job=?", Integer.class, job.id)); - - assertEquals(Integer.valueOf(group.jobPriority), jdbcTemplate.queryForObject( - "SELECT int_priority FROM job_resource WHERE pk_job=?", Integer.class, job.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testCueHasPendingJobs() { - jobDao.cueHasPendingJobs(new FacilityEntity("0")); - - } - - @Test - @Transactional - @Rollback(true) - public void mapPostJob() { - JobSpec spec = - jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_postframes.xml")); - jobLauncher.launch(spec); - - final String pk_job = spec.getJobs().get(0).detail.id; - - assertEquals(Integer.valueOf(1), jdbcTemplate - .queryForObject("SELECT COUNT(*) FROM job_post WHERE pk_job=?", Integer.class, pk_job)); - } - - @Test - @Transactional - @Rollback(true) - public void activatePostJob() { - JobSpec spec = - jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_postframes.xml")); - jobLauncher.launch(spec); - - jobDao.activatePostJob(spec.getJobs().get(0).detail); - - assertEquals(JobState.PENDING.toString(), - jdbcTemplate.queryForObject("SELECT str_state FROM job WHERE pk_job=?", String.class, - spec.getJobs().get(0).getPostJob().detail.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateUsage() { - - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - jobLauncher.launch(spec); - - JobInterface job = jobDao.findJob(spec.getJobs().get(0).detail.name); - - /** 60 seconds of 100 core units **/ - ResourceUsage usage = new ResourceUsage(60, 33, 0); - - assertTrue(usage.getClockTimeSeconds() > 0); - assertTrue(usage.getCoreTimeSeconds() > 0); - - /** - * Successful frame - */ - jobDao.updateUsage(job, usage, 0); - assertEquals(Long.valueOf(usage.getClockTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_clock_time_success FROM job_usage WHERE pk_job=?", Long.class, job.getId())); + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; - assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_core_time_success FROM job_usage WHERE pk_job=?", Long.class, job.getId())); - - assertEquals(Integer.valueOf(1), - jdbcTemplate.queryForObject("SELECT int_frame_success_count FROM job_usage WHERE pk_job=?", - Integer.class, job.getId())); - - /** - * Failed frame - */ - jobDao.updateUsage(job, usage, 1); - assertEquals(Long.valueOf(usage.getClockTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_clock_time_fail FROM job_usage WHERE pk_job=?", Long.class, job.getId())); - - assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_core_time_fail FROM job_usage WHERE pk_job=?", Long.class, job.getId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_frame_fail_count FROM job_usage WHERE pk_job=?", Integer.class, job.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testOverrideMaxCoresAndGpus() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/override_max_cores_gpus.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_test"); - assertEquals(job.maxCoreUnits, 42000); - assertEquals(job.maxGpuUnits, 42); - } + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + JobDao jobDao; + + @Resource + PointDao pointDao; + + @Resource + ShowDao showDao; + + @Resource + TaskDao taskDao; + + @Resource + GroupDao groupDao; + + @Resource + FacilityDao facilityDao; + + @Resource + DepartmentDao departmentDao; + + @Resource + JobLogUtil jobLogUtil; + + private static String ROOT_FOLDER = "A0000000-0000-0000-0000-000000000000"; + private static String ROOT_SHOW = "00000000-0000-0000-0000-000000000000"; + private static String JOB_NAME = "pipe-dev.cue-testuser_shell_v1"; + + @Before + public void testMode() { + jobLauncher.testMode = true; + } + + public JobDetail buildJobDetail() { + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); + return spec.getJobs().get(0).detail; + } + + public JobDetail insertJob() { + JobDetail job = this.buildJobDetail(); + job.groupId = ROOT_FOLDER; + job.showId = ROOT_SHOW; + job.showName = "pipe"; + job.logDir = jobLogUtil.getJobLogPath(job); + job.deptId = departmentDao.getDefaultDepartment().getId(); + job.deptName = departmentDao.getDefaultDepartment().getName(); + job.facilityId = facilityDao.getDefaultFacility().getId(); + job.facilityName = facilityDao.getDefaultFacility().getName(); + job.state = JobState.PENDING; + job.maxCoreUnits = 10000; + jobDao.insertJob(job, jobLogUtil); + return job; + } + + public JobDetail launchJob() { + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDispatchJob() { + JobDetail job = insertJob(); + DispatchJob djob = jobDao.getDispatchJob(job.id); + assertEquals(djob.id, job.id); + } + + @Test + @Transactional + @Rollback(true) + public void testIsJobComplete() { + JobDetail job = insertJob(); + // returns true because there are no dispatchable frames + assertEquals(true, jobDao.isJobComplete(job)); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertJob() { + JobDetail job = this.buildJobDetail(); + job.groupId = ROOT_FOLDER; + job.showId = ROOT_SHOW; + job.logDir = jobLogUtil.getJobLogPath(job); + job.deptId = departmentDao.getDefaultDepartment().getId(); + job.facilityId = facilityDao.getDefaultFacility().getId(); + jobDao.insertJob(job, jobLogUtil); + assertNotNull(job.id); + } + + @Test + @Transactional + @Rollback(true) + public void testFindJob() { + JobDetail job = insertJob(); + JobInterface j1 = jobDao.findJob(job.name); + JobDetail j2 = jobDao.findJobDetail(job.name); + assertEquals(job.name, j1.getName()); + assertEquals(job.name, j2.getName()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetJob() { + JobDetail job = insertJob(); + jobDao.getJobDetail(job.id); + jobDao.getJob(job.id); + } + + @Test + @Transactional + @Rollback(true) + public void testGetJobDetail() { + JobDetail src = insertJob(); + JobDetail job = jobDao.getJobDetail(src.id); + assertEquals(job.id, src.id); + assertEquals(job.name, src.name); + assertEquals(job.showId, src.showId); + assertEquals(job.facilityId, src.facilityId); + assertEquals(job.groupId, src.groupId); + assertEquals(job.deptId, src.deptId); + assertEquals(job.state, src.state); + assertEquals(job.shot, src.shot); + assertEquals(job.user, src.user); + assertEquals(job.email, src.email); + assertEquals(job.uid, src.uid); + assertEquals(job.logDir, src.logDir); + assertEquals(job.isPaused, src.isPaused); + assertEquals(job.isAutoEat, src.isAutoEat); + assertEquals(job.totalFrames, src.totalFrames); + assertEquals(job.totalLayers, src.totalLayers); + assertEquals(job.startTime, src.startTime); + assertEquals(job.stopTime, src.stopTime); + assertEquals(job.maxRetries, src.maxRetries); + assertEquals(job.os, src.os); + assertEquals(job.facilityName, src.facilityName); + assertEquals(job.deptName, src.deptName); + assertEquals(job.showName, src.showName); + assertEquals(job.priority, src.priority); + assertEquals(job.minCoreUnits, src.minCoreUnits); + assertEquals(job.maxCoreUnits, src.maxCoreUnits); + assertEquals(job.isLocal, src.isLocal); + assertEquals(job.localHostName, src.localHostName); + assertEquals(job.localMaxCores, src.localMaxCores); + assertEquals(job.localMaxMemory, src.localMaxMemory); + assertEquals(job.localThreadNumber, src.localThreadNumber); + assertEquals(job.localMaxGpus, src.localMaxGpus); + } + + @Test + @Transactional + @Rollback(true) + public void testGetJobsByTask() { + + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + + TaskEntity t = new TaskEntity(p, "dev.cue"); + taskDao.insertTask(t); + jobDao.getJobs(t); + } + + @Test + @Transactional + @Rollback(true) + public void testJobExists() { + assertFalse(jobDao.exists(JOB_NAME)); + JobDetail job = insertJob(); + jdbcTemplate.update("UPDATE job SET str_state='PENDING' WHERE pk_job=?", job.id); + assertTrue(jobDao.exists(JOB_NAME)); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteJob() { + jobDao.deleteJob(insertJob()); + } + + @Test + @Transactional + @Rollback(true) + public void testActivateJob() { + jobDao.activateJob(insertJob(), JobState.PENDING); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobState() { + JobDetail job = insertJob(); + assertEquals(JobState.PENDING, job.state); + jobDao.updateState(job, JobState.FINISHED); + assertEquals(JobState.FINISHED.toString(), jdbcTemplate.queryForObject( + "SELECT str_state FROM job WHERE pk_job=?", String.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobFinished() { + jobDao.updateJobFinished(insertJob()); + } + + @Test + @Transactional + @Rollback(true) + public void testIsJobOverMinProc() { + JobDetail job = insertJob(); + assertFalse(jobDao.isOverMinCores(job)); + } + + @Test + @Transactional + @Rollback(true) + public void testHasPendingFrames() { + assertFalse(jobDao.hasPendingFrames(insertJob())); + } + + @Test + @Transactional + @Rollback(true) + public void testIsJobOverMaxProc() { + JobDetail job = insertJob(); + assertFalse(jobDao.isOverMaxCores(job)); + } + + @Test + @Transactional + @Rollback(true) + public void testIsJobAtMaxCores() { + JobDetail job = insertJob(); + assertFalse(jobDao.isAtMaxCores(job)); + + jdbcTemplate.update("UPDATE job_resource SET int_cores = int_max_cores WHERE pk_job=?", + job.getJobId()); + + assertTrue(jobDao.isAtMaxCores(job)); + + } + + @Test + @Transactional + @Rollback(true) + public void testIsOverMaxCores() { + JobDetail job = insertJob(); + jobDao.updateMaxCores(job, 500); + jdbcTemplate.update("UPDATE job_resource SET int_cores = 450 WHERE pk_job=?", + job.getJobId()); + + assertFalse(jobDao.isOverMaxCores(job)); + assertFalse(jobDao.isOverMaxCores(job, 50)); + assertTrue(jobDao.isOverMaxCores(job, 100)); + + jdbcTemplate.update("UPDATE job_resource SET int_max_cores = 200 WHERE pk_job=?", + job.getJobId()); + assertTrue(jobDao.isOverMaxCores(job)); + } + + @Test(expected = org.springframework.jdbc.UncategorizedSQLException.class) + @Transactional + @Rollback(true) + public void testMaxCoreTrigger() { + JobDetail job = insertJob(); + int maxCores = + jdbcTemplate.queryForObject("SELECT int_max_cores FROM job_resource WHERE pk_job=?", + Integer.class, job.getJobId()); + + jdbcTemplate.update("UPDATE job_resource SET int_cores = ? WHERE pk_job=?", maxCores + 1, + job.getJobId()); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobPriority() { + JobDetail job = insertJob(); + jobDao.updatePriority(job, 199); + assertEquals(Integer.valueOf(199), + jdbcTemplate.queryForObject("SELECT int_priority FROM job_resource WHERE pk_job=?", + Integer.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobMinCores() { + JobDetail job = insertJob(); + jobDao.updateMinCores(job, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject("SELECT int_min_cores FROM job_resource WHERE pk_job=?", + Integer.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobMaxCores() { + JobDetail job = insertJob(); + jobDao.updateMaxCores(job, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject("SELECT int_max_cores FROM job_resource WHERE pk_job=?", + Integer.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobMinCoresByGroup() { + JobDetail job = insertJob(); + GroupInterface g = groupDao.getGroup(job.groupId); + jobDao.updateMinCores(g, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject("SELECT int_min_cores FROM job_resource WHERE pk_job=?", + Integer.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobMaxCoresByGroup() { + JobDetail job = insertJob(); + GroupInterface g = groupDao.getGroup(job.groupId); + jobDao.updateMaxCores(g, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject("SELECT int_max_cores FROM job_resource WHERE pk_job=?", + Integer.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobPriorityByGroup() { + JobDetail job = insertJob(); + GroupInterface g = groupDao.getGroup(job.groupId); + jobDao.updatePriority(g, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject("SELECT int_priority FROM job_resource WHERE pk_job=?", + Integer.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobMaxRss() { + long maxRss = 100000; + JobDetail job = insertJob(); + jobDao.updateMaxRSS(job, maxRss); + assertEquals(Long.valueOf(maxRss), jdbcTemplate.queryForObject( + "SELECT int_max_rss FROM job_mem WHERE pk_job=?", Long.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobPaused() { + JobDetail job = insertJob(); + + assertTrue(jdbcTemplate.queryForObject("SELECT b_paused FROM job WHERE pk_job=?", + Boolean.class, job.getJobId())); + + jobDao.updatePaused(job, false); + + assertFalse(jdbcTemplate.queryForObject("SELECT b_paused FROM job WHERE pk_job=?", + Boolean.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobAutoEat() { + JobDetail job = insertJob(); + + assertFalse(jdbcTemplate.queryForObject("SELECT b_autoeat FROM job WHERE pk_job=?", + Boolean.class, job.getJobId())); + + jobDao.updateAutoEat(job, true); + + assertTrue(jdbcTemplate.queryForObject("SELECT b_autoeat FROM job WHERE pk_job=?", + Boolean.class, job.getJobId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobMaxRetries() { + JobDetail job = insertJob(); + jobDao.updateMaxFrameRetries(job, 10); + assertEquals(Integer.valueOf(10), jdbcTemplate.queryForObject( + "SELECT int_max_retries FROM job WHERE pk_job=?", Integer.class, job.getJobId())); + } + + @Test(expected = IllegalArgumentException.class) + @Transactional + @Rollback(true) + public void testUpdateJobMaxRetriesTooLow() { + JobDetail job = insertJob(); + jobDao.updateMaxFrameRetries(job, -1); + } + + @Test(expected = IllegalArgumentException.class) + @Transactional + @Rollback(true) + public void testUpdateJobMaxRetriesTooHigh() { + JobDetail job = insertJob(); + jobDao.updateMaxFrameRetries(job, 100000); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFrameStateTotals() { + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); + jobLauncher.launch(spec); + jobDao.getFrameStateTotals(spec.getJobs().get(0).detail); + } + + @Test + @Transactional + @Rollback(true) + public void testGetExecutionSummary() { + JobDetail job = launchJob(); + ExecutionSummary summary = jobDao.getExecutionSummary(job); + } + + @Test + @Transactional + @Rollback(true) + public void testGetJobEnvironment() { + JobDetail job = launchJob(); + Map map = jobDao.getEnvironment(job); + for (Map.Entry e : map.entrySet()) { + assertEquals("VNP_VCR_SESSION", e.getKey()); + assertEquals("9000", e.getValue()); + } + } + + @Test + @Transactional + @Rollback(true) + public void testInsertJobEnvironment() { + JobDetail job = launchJob(); + jobDao.insertEnvironment(job, "CHAMBERS", "123"); + Map map = jobDao.getEnvironment(job); + assertEquals(2, map.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertJobEnvironmentMap() { + JobDetail job = launchJob(); + Map map = new HashMap(); + map.put("CHAMBERS", "123"); + map.put("OVER9000", "123"); + + jobDao.insertEnvironment(job, map); + Map env = jobDao.getEnvironment(job); + assertEquals(3, env.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindLastJob() { + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); + jobLauncher.launch(spec); + + JobInterface job = spec.getJobs().get(0).detail; + jobDao.getFrameStateTotals(job); + jobManager.shutdownJob(job); + // this might fail + JobDetail oldJob = jobDao.findLastJob(job.getName()); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobLogPath() { + JobDetail job = launchJob(); + String newLogDir = "/path/to/nowhere"; + jobDao.updateLogPath(job, newLogDir); + assertEquals(newLogDir, jdbcTemplate.queryForObject( + "SELECT str_log_dir FROM job WHERE pk_job=?", String.class, job.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateJobParent() { + JobDetail job = launchJob(); + + // Make a new test group. + GroupDetail root = groupDao.getRootGroupDetail(job); + + GroupDetail testGroup = new GroupDetail(); + testGroup.name = "testGroup"; + testGroup.deptId = departmentDao.getDefaultDepartment().getId(); + testGroup.showId = root.getShowId(); + + groupDao.insertGroup(testGroup, root); + + jdbcTemplate.update( + "UPDATE folder SET int_job_max_cores=-1, int_job_min_cores=-1, int_job_priority=-1 WHERE pk_folder=?", + testGroup.getId()); + + GroupDetail group = groupDao.getGroupDetail(testGroup.getId()); + jobDao.updateParent(job, group); + + assertEquals(-1, group.jobMaxCores); + assertEquals(-1, group.jobMinCores); + assertEquals(-1, group.jobPriority); + + assertEquals(group.getGroupId(), jdbcTemplate + .queryForObject("SELECT pk_folder FROM job WHERE pk_job=?", String.class, job.id)); + + assertEquals(group.getDepartmentId(), jdbcTemplate + .queryForObject("SELECT pk_dept FROM job WHERE pk_job=?", String.class, job.id)); + + group.jobMaxCores = 100; + group.jobMinCores = 100; + group.jobPriority = 100; + + jobDao.updateParent(job, group); + + assertEquals(Integer.valueOf(group.jobMaxCores), jdbcTemplate.queryForObject( + "SELECT int_max_cores FROM job_resource WHERE pk_job=?", Integer.class, job.id)); + + assertEquals(Integer.valueOf(group.jobMinCores), jdbcTemplate.queryForObject( + "SELECT int_min_cores FROM job_resource WHERE pk_job=?", Integer.class, job.id)); + + assertEquals(Integer.valueOf(group.jobPriority), jdbcTemplate.queryForObject( + "SELECT int_priority FROM job_resource WHERE pk_job=?", Integer.class, job.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testCueHasPendingJobs() { + jobDao.cueHasPendingJobs(new FacilityEntity("0")); + + } + + @Test + @Transactional + @Rollback(true) + public void mapPostJob() { + JobSpec spec = jobLauncher + .parse(new File("src/test/resources/conf/jobspec/jobspec_postframes.xml")); + jobLauncher.launch(spec); + + final String pk_job = spec.getJobs().get(0).detail.id; + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM job_post WHERE pk_job=?", Integer.class, pk_job)); + } + + @Test + @Transactional + @Rollback(true) + public void activatePostJob() { + JobSpec spec = jobLauncher + .parse(new File("src/test/resources/conf/jobspec/jobspec_postframes.xml")); + jobLauncher.launch(spec); + + jobDao.activatePostJob(spec.getJobs().get(0).detail); + + assertEquals(JobState.PENDING.toString(), + jdbcTemplate.queryForObject("SELECT str_state FROM job WHERE pk_job=?", + String.class, spec.getJobs().get(0).getPostJob().detail.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateUsage() { + + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); + jobLauncher.launch(spec); + + JobInterface job = jobDao.findJob(spec.getJobs().get(0).detail.name); + + /** 60 seconds of 100 core units **/ + ResourceUsage usage = new ResourceUsage(60, 33, 0); + + assertTrue(usage.getClockTimeSeconds() > 0); + assertTrue(usage.getCoreTimeSeconds() > 0); + + /** + * Successful frame + */ + jobDao.updateUsage(job, usage, 0); + assertEquals(Long.valueOf(usage.getClockTimeSeconds()), + jdbcTemplate.queryForObject( + "SELECT int_clock_time_success FROM job_usage WHERE pk_job=?", Long.class, + job.getId())); + + assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), + jdbcTemplate.queryForObject( + "SELECT int_core_time_success FROM job_usage WHERE pk_job=?", Long.class, + job.getId())); + + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject( + "SELECT int_frame_success_count FROM job_usage WHERE pk_job=?", + Integer.class, job.getId())); + + /** + * Failed frame + */ + jobDao.updateUsage(job, usage, 1); + assertEquals(Long.valueOf(usage.getClockTimeSeconds()), + jdbcTemplate.queryForObject( + "SELECT int_clock_time_fail FROM job_usage WHERE pk_job=?", Long.class, + job.getId())); + + assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), + jdbcTemplate.queryForObject( + "SELECT int_core_time_fail FROM job_usage WHERE pk_job=?", Long.class, + job.getId())); + + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject( + "SELECT int_frame_fail_count FROM job_usage WHERE pk_job=?", Integer.class, + job.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void testOverrideMaxCoresAndGpus() { + jobLauncher.launch(new File("src/test/resources/conf/jobspec/override_max_cores_gpus.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_test"); + assertEquals(job.maxCoreUnits, 42000); + assertEquals(job.maxGpuUnits, 42); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LayerDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LayerDaoTests.java index 32361cb26..834ef3539 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LayerDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LayerDaoTests.java @@ -70,663 +70,677 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class LayerDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - JobDao jobDao; - - @Resource - LayerDao layerDao; - - @Resource - LimitDao limitDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - DepartmentDao departmentDao; - - @Resource - FacilityDao facilityDao; - - @Resource - JobLogUtil jobLogUtil; - - private static String ROOT_FOLDER = "A0000000-0000-0000-0000-000000000000"; - private static String ROOT_SHOW = "00000000-0000-0000-0000-000000000000"; - private static String LAYER_NAME = "pass_1"; - private static String JOB_NAME = "pipe-dev.cue-testuser_shell_v1"; - private static String LIMIT_NAME = "test-limit"; - private static String LIMIT_TEST_A = "testlimita"; - private static String LIMIT_TEST_B = "testlimitb"; - private static String LIMIT_TEST_C = "testlimitc"; - private static int LIMIT_MAX_VALUE = 32; - - @Before - public void testMode() { - jobLauncher.testMode = true; - } - - public LayerDetail getLayer() { - List layers = getLayers(); - return layers.get(layers.size() - 1); - } - - public List getLayers() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = spec.getJobs().get(0).detail; - job.groupId = ROOT_FOLDER; - job.showId = ROOT_SHOW; - job.logDir = jobLogUtil.getJobLogPath(job); - job.deptId = departmentDao.getDefaultDepartment().getId(); - job.facilityId = facilityDao.getDefaultFacility().getId(); - jobDao.insertJob(job, jobLogUtil); - - List result = new ArrayList<>(); - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - limitDao.createLimit(LIMIT_TEST_A, 1); - limitDao.createLimit(LIMIT_TEST_B, 2); - limitDao.createLimit(LIMIT_TEST_C, 3); - - for (BuildableLayer buildableLayer : spec.getJobs().get(0).getBuildableLayers()) { - LayerDetail layer = buildableLayer.layerDetail; - FrameSet frameSet = new FrameSet(layer.range); - int num_frames = frameSet.size(); - int chunk_size = layer.chunkSize; - - layer.jobId = job.id; - layer.showId = ROOT_SHOW; - layer.totalFrameCount = num_frames / chunk_size; - if (num_frames % chunk_size > 0) { - layer.totalFrameCount++; - } - - layerDao.insertLayerDetail(layer); - layerDao.insertLayerEnvironment(layer, buildableLayer.env); - layerDao.addLimit(layer, limitId); - result.add(layer); - } - - return result; - } - - public JobDetail getJob() { - return jobDao.findJobDetail(JOB_NAME); - } - - public String getTestLimitId(String name) { - return limitDao.findLimit(name).getLimitId(); - } - - @Test - @Transactional - @Rollback(true) - public void testIsLayerComplete() { - layerDao.isLayerComplete(getLayer()); - } - - @Test - @Transactional - @Rollback(true) - public void testIsLayerDispatchable() { - layerDao.isLayerDispatchable(getLayer()); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerDetail() { - LayerDetail layer = getLayer(); - assertEquals(LAYER_NAME, layer.name); - assertEquals(layer.chunkSize, 1); - assertEquals(layer.dispatchOrder, 2); - assertNotNull(layer.id); - assertNotNull(layer.jobId); - assertEquals(layer.showId, ROOT_SHOW); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayerDetail() { - LayerDetail layer = getLayer(); - assertEquals(LAYER_NAME, layer.name); - assertEquals(layer.chunkSize, 1); - assertEquals(layer.dispatchOrder, 2); - assertNotNull(layer.id); - assertNotNull(layer.jobId); - assertEquals(layer.showId, ROOT_SHOW); - - LayerDetail l2 = layerDao.getLayerDetail(layer); - LayerDetail l3 = layerDao.getLayerDetail(layer.id); - assertEquals(layer, l2); - assertEquals(layer, l3); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayerDetails() { - List wantLayers = getLayers(); - List gotLayers = layerDao.getLayerDetails(getJob()); - assertThat(gotLayers, containsInAnyOrder(wantLayers.toArray())); - } - - @Test - @Transactional - @Rollback(true) - public void testFindLayerDetail() { - LayerDetail layer = getLayer(); - layerDao.findLayer(getJob(), "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayer() { - LayerDetail layer = getLayer(); - layerDao.getLayer(layer.id); - layerDao.getLayerDetail(layer); - layerDao.getLayerDetail(layer.id); - } - - @Test - @Transactional - @Rollback(true) - public void testFindLayer() { - LayerDetail layer = getLayer(); - layerDao.findLayer(getJob(), "pass_1"); - layerDao.findLayerDetail(getJob(), "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateLayerMinCores() { - LayerDetail layer = getLayer(); - layerDao.updateLayerMinCores(layer, 200); - LayerDetail l2 = layerDao.findLayerDetail(getJob(), "pass_1"); - assertEquals(l2.minimumCores, 200); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateLayerThreadable() { - LayerDetail layer = getLayer(); - layerDao.updateThreadable(layer, false); - assertFalse(jdbcTemplate.queryForObject("SELECT b_threadable FROM layer WHERE pk_layer=?", - Boolean.class, layer.getLayerId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateLayerMinMemory() { - LayerDetail layer = getLayer(); - - /* - * Check to ensure going below Dispatcher.MEM_RESERVED_MIN is not allowed. - */ - layerDao.updateLayerMinMemory(layer, 8096); - LayerDetail l2 = layerDao.findLayerDetail(getJob(), "pass_1"); - // Hardcoded value of dispatcher.memory.mem_reserved_min - // to avoid having to read opencue.properties on a test setting - assertEquals(l2.minimumMemory, 262144); - - /* - * Check regular operation. - */ - layerDao.updateLayerMinMemory(layer, CueUtil.GB); - LayerDetail l3 = layerDao.findLayerDetail(getJob(), "pass_1"); - assertEquals(l3.minimumMemory, CueUtil.GB); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateLayerTags() { - LayerDetail layer = getLayer(); - - HashSet tags = new HashSet(); - tags.add("frickjack"); - tags.add("pancake"); - - layerDao.updateLayerTags(layer, tags); - LayerDetail l2 = layerDao.findLayerDetail(getJob(), "pass_1"); - assertEquals(StringUtils.join(l2.tags, " | "), "frickjack | pancake"); - - tags.clear(); - tags.add("frickjack"); - - layerDao.updateLayerTags(layer, tags); - l2 = layerDao.findLayerDetail(getJob(), "pass_1"); - assertEquals(StringUtils.join(l2.tags, " | "), "frickjack"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFrameStateTotals() { - LayerDetail layer = getLayer(); - layerDao.getFrameStateTotals(layer); - jobDao.getFrameStateTotals(layer); - } - - @Test - @Transactional - @Rollback(true) - public void testGetExecutionSummary() { - LayerDetail layer = getLayer(); - layerDao.getExecutionSummary(layer); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayerEnvironment() { - LayerDetail layer = getLayer(); - Map map = layerDao.getLayerEnvironment(layer); - for (Map.Entry e : map.entrySet()) { - - } - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerEnvironment() { - LayerDetail layer = getLayer(); - layerDao.insertLayerEnvironment(layer, "CHAMBERS", "123"); - Map env = layerDao.getLayerEnvironment(layer); - assertEquals(2, env.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerEnvironmentMap() { - LayerDetail layer = getLayer(); - Map map = new HashMap(); - map.put("CHAMBERS", "123"); - map.put("OVER9000", "123"); - - layerDao.insertLayerEnvironment(layer, map); - Map env = layerDao.getLayerEnvironment(layer); - assertEquals(3, env.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindPastSameNameMaxRSS() { - getLayer(); - jobDao.updateState(getJob(), JobState.FINISHED); - assertEquals(JobState.FINISHED, getJob().state); - - JobDetail lastJob = null; - lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v1"); - long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindPastTimeStampMaxRSS() { - getLayer(); - jobDao.updateState(getJob(), JobState.FINISHED); - assertEquals(JobState.FINISHED, getJob().state); - - JobDetail lastJob = null; - lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v1_2011_05_03_16_03"); - long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindPastNewVersionMaxRSS() { - getLayer(); - jobDao.updateState(getJob(), JobState.FINISHED); - assertEquals(JobState.FINISHED, getJob().state); - - JobDetail lastJob = null; - lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v2"); - long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindPastNewVersionTimeStampMaxRSS() { - getLayer(); - jobDao.updateState(getJob(), JobState.FINISHED); - assertEquals(JobState.FINISHED, getJob().state); - - JobDetail lastJob = null; - lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v2_2011_05_03_16_03"); - long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); - } - - @Test(expected = org.springframework.dao.EmptyResultDataAccessException.class) - @Transactional - @Rollback(true) - public void testFindPastNewVersionFailMaxRSS() { - getLayer(); - jobDao.updateState(getJob(), JobState.FINISHED); - assertEquals(JobState.FINISHED, getJob().state); - - JobDetail lastJob = null; - lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_vfail_v2"); - long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateLayerMaxRSS() { - LayerDetail layer = getLayer(); - - layerDao.updateLayerMaxRSS(layer, 1000, true); - assertEquals(Long.valueOf(1000), jdbcTemplate.queryForObject( - "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", Long.class, layer.getId())); - - layerDao.updateLayerMaxRSS(layer, 999, true); - assertEquals(Long.valueOf(999), jdbcTemplate.queryForObject( - "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", Long.class, layer.getId())); - - layerDao.updateLayerMaxRSS(layer, 900, false); - assertEquals(Long.valueOf(999), jdbcTemplate.queryForObject( - "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", Long.class, layer.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void updateTags() { - String tag = "dillweed"; - LayerDetail layer = getLayer(); - layerDao.updateTags(layer, tag, LayerType.RENDER); - assertEquals(tag, jdbcTemplate.queryForObject("SELECT str_tags FROM layer WHERE pk_layer=?", - String.class, layer.getLayerId())); - } - - @Test - @Transactional - @Rollback(true) - public void updateMinMemory() { - long mem = CueUtil.GB; - LayerDetail layer = getLayer(); - layerDao.updateMinMemory(layer, mem, LayerType.RENDER); - assertEquals(Long.valueOf(mem), jdbcTemplate.queryForObject( - "SELECT int_mem_min FROM layer WHERE pk_layer=?", Long.class, layer.getLayerId())); - } - - @Test - @Transactional - @Rollback(true) - public void updateMinGpuMemory() { - long mem = CueUtil.GB; - LayerDetail layer = getLayer(); - layerDao.updateMinGpuMemory(layer, mem, LayerType.RENDER); - assertEquals(Long.valueOf(mem), jdbcTemplate.queryForObject( - "SELECT int_gpu_mem_min FROM layer WHERE pk_layer=?", Long.class, layer.getLayerId())); - } - - @Test - @Transactional - @Rollback(true) - public void updateMinCores() { - int cores = CueUtil.ONE_CORE * 2; - LayerDetail layer = getLayer(); - layerDao.updateMinCores(layer, cores, LayerType.RENDER); - assertEquals(Integer.valueOf(cores), jdbcTemplate.queryForObject( - "SELECT int_cores_min FROM layer WHERE pk_layer=?", Integer.class, layer.getLayerId())); - } - - @Test - @Transactional - @Rollback(true) - public void updateMaxCores() { - int cores = CueUtil.ONE_CORE * 2; - LayerDetail layer = getLayer(); - layerDao.updateLayerMaxCores(layer, cores); - assertEquals(Integer.valueOf(cores), jdbcTemplate.queryForObject( - "SELECT int_cores_max FROM layer WHERE pk_layer=?", Integer.class, layer.getLayerId())); - } - - @Test - @Transactional - @Rollback(true) - public void isOptimizable() { - LayerDetail layer = getLayer(); - - assertFalse(layerDao.isOptimizable(layer, 5, 3600)); - - /* - * The succeeded count is good but the frames are too long Assert False - */ - jdbcTemplate.update("UPDATE layer_stat SET int_succeeded_count = 5 WHERE pk_layer=?", - layer.getLayerId()); - - jdbcTemplate.update( - "UPDATE layer_usage SET int_core_time_success = 3600 * 6 " + "WHERE pk_layer=?", - layer.getLayerId()); - - assertFalse(layerDao.isOptimizable(layer, 5, 3600)); - - /* - * Set the frame times lower, so now we meet the criteria Assert True - */ - jdbcTemplate.update( - "UPDATE layer_usage SET int_core_time_success = 3500 * 5 " + "WHERE pk_layer=?", - layer.getLayerId()); - - assertTrue(layerDao.isOptimizable(layer, 5, 3600)); - - /* - * Take the general tag away. If a layer is not a general layer it cannot be optmiized. Assert - * False - */ - jdbcTemplate.update("UPDATE layer SET str_tags=? WHERE pk_layer=?", "desktop", - layer.getLayerId()); - - assertFalse(layerDao.isOptimizable(layer, 5, 3600)); - - /* - * Layers that are already tagged util should return false as well. - * - * Assert False - */ - jdbcTemplate.update("UPDATE layer SET str_tags=? WHERE pk_layer=?", "general | util", - layer.getLayerId()); - - assertFalse(layerDao.isOptimizable(layer, 5, 3600)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateUsage() { - LayerDetail layer = getLayer(); - - assertEquals(Integer.valueOf(0), - jdbcTemplate.queryForObject( - "SELECT int_clock_time_success FROM layer_usage WHERE pk_layer=?", Integer.class, - layer.getId())); - - assertEquals(Integer.valueOf(0), - jdbcTemplate.queryForObject( - "SELECT int_core_time_success FROM layer_usage WHERE pk_layer=?", Integer.class, - layer.getId())); - - assertEquals(Integer.valueOf(0), - jdbcTemplate.queryForObject( - "SELECT int_frame_success_count FROM layer_usage WHERE pk_layer=?", Integer.class, - layer.getId())); - - assertEquals(Integer.valueOf(0), - jdbcTemplate.queryForObject("SELECT int_clock_time_fail FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - - assertEquals(Integer.valueOf(0), - jdbcTemplate.queryForObject("SELECT int_core_time_fail FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - - assertEquals(Integer.valueOf(0), - jdbcTemplate.queryForObject("SELECT int_frame_fail_count FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - - /** 60 seconds of 100 core units **/ - ResourceUsage usage = new ResourceUsage(60, 33, 0); - - assertTrue(usage.getClockTimeSeconds() > 0); - assertTrue(usage.getCoreTimeSeconds() > 0); - - /** - * Successful frame - */ - layerDao.updateUsage(layer, usage, 0); - assertEquals(Long.valueOf(usage.getClockTimeSeconds()), - jdbcTemplate.queryForObject( - "SELECT int_clock_time_success FROM layer_usage WHERE pk_layer=?", Long.class, - layer.getId())); - - assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), - jdbcTemplate.queryForObject( - "SELECT int_core_time_success FROM layer_usage WHERE pk_layer=?", Long.class, - layer.getId())); - - assertEquals(Integer.valueOf(1), - jdbcTemplate.queryForObject( - "SELECT int_frame_success_count FROM layer_usage WHERE pk_layer=?", Integer.class, - layer.getId())); - - /** - * Failed frame - */ - layerDao.updateUsage(layer, usage, 1); - assertEquals(Long.valueOf(usage.getClockTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_clock_time_fail FROM layer_usage WHERE pk_layer=?", Long.class, layer.getId())); - - assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_core_time_fail FROM layer_usage WHERE pk_layer=?", Long.class, layer.getId())); - - assertEquals(Integer.valueOf(1), - jdbcTemplate.queryForObject("SELECT int_frame_fail_count FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void isLayerThreadable() { - LayerDetail layer = getLayer(); - jdbcTemplate.update("UPDATE layer set b_threadable = false WHERE pk_layer = ?", layer.getId()); - - assertFalse(layerDao.isThreadable(layer)); - - jdbcTemplate.update("UPDATE layer set b_threadable = true WHERE pk_layer = ?", layer.getId()); - - assertTrue(layerDao.isThreadable(layer)); - } - - @Test - @Transactional - @Rollback(true) - public void enableMemoryOptimizer() { - LayerDetail layer = getLayer(); - layerDao.enableMemoryOptimizer(layer, false); - assertFalse(jdbcTemplate.queryForObject("SELECT b_optimize FROM layer WHERE pk_layer=?", - Boolean.class, layer.getLayerId())); - - layerDao.enableMemoryOptimizer(layer, true); - assertTrue(jdbcTemplate.queryForObject("SELECT b_optimize FROM layer WHERE pk_layer=?", - Boolean.class, layer.getLayerId())); - } - - @Test - @Transactional - @Rollback(true) - public void testBalanceMemory() { - LayerDetail layer = getLayer(); - assertTrue(layerDao.balanceLayerMinMemory(layer, CueUtil.GB)); - jdbcTemplate.update("UPDATE layer_mem SET int_max_rss=? WHERE pk_layer=?", CueUtil.GB8, - layer.getId()); - assertFalse(layerDao.balanceLayerMinMemory(layer, CueUtil.MB512)); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerOutput() { - LayerDetail layer = getLayer(); - layerDao.insertLayerOutput(layer, "filespec1"); - layerDao.insertLayerOutput(layer, "filespec2"); - layerDao.insertLayerOutput(layer, "filespec3"); - assertEquals(3, layerDao.getLayerOutputs(layer).size()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLimits() { - LayerDetail layer = getLayer(); - List limits = layerDao.getLimits(layer); - assertEquals(limits.size(), 1); - assertEquals(limits.get(0).id, getTestLimitId(LIMIT_NAME)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLimitNames() { - LayerDetail layer = getLayer(); - List limits = layerDao.getLimitNames(layer); - assertEquals(limits.size(), 1); - assertEquals(limits.get(0), LIMIT_NAME); - } - - @Test - @Transactional - @Rollback(true) - public void testAddLimit() { - LayerDetail layer = getLayer(); - layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_A)); - layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_B)); - layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_C)); - LayerInterface layerResult = layerDao.getLayer(layer.getLayerId()); - List limits = layerDao.getLimits(layerResult); - assertEquals(limits.size(), 4); - List sourceIds = Arrays.asList(getTestLimitId(LIMIT_NAME), getTestLimitId(LIMIT_TEST_A), - getTestLimitId(LIMIT_TEST_B), getTestLimitId(LIMIT_TEST_C)); - List resultIds = - Arrays.asList(limits.get(0).id, limits.get(1).id, limits.get(2).id, limits.get(3).id); - Collections.sort(sourceIds); - Collections.sort(resultIds); - assertEquals(sourceIds, resultIds); - } - - @Test - @Transactional - @Rollback(true) - public void testDropLimit() { - LayerDetail layer = getLayer(); - layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_A)); - layerDao.dropLimit(layer, getTestLimitId(LIMIT_NAME)); - LayerInterface layerResult = layerDao.getLayer(layer.getLayerId()); - List limits = layerDao.getLimits(layerResult); - assertEquals(limits.size(), 1); - assertEquals(limits.get(0).id, getTestLimitId(LIMIT_TEST_A)); - layerDao.dropLimit(layer, getTestLimitId(LIMIT_TEST_A)); - LayerInterface layerResultB = layerDao.getLayer(layer.getLayerId()); - List limitsB = layerDao.getLimits(layerResultB); - assertEquals(limitsB.size(), 0); - } + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + JobDao jobDao; + + @Resource + LayerDao layerDao; + + @Resource + LimitDao limitDao; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + DepartmentDao departmentDao; + + @Resource + FacilityDao facilityDao; + + @Resource + JobLogUtil jobLogUtil; + + private static String ROOT_FOLDER = "A0000000-0000-0000-0000-000000000000"; + private static String ROOT_SHOW = "00000000-0000-0000-0000-000000000000"; + private static String LAYER_NAME = "pass_1"; + private static String JOB_NAME = "pipe-dev.cue-testuser_shell_v1"; + private static String LIMIT_NAME = "test-limit"; + private static String LIMIT_TEST_A = "testlimita"; + private static String LIMIT_TEST_B = "testlimitb"; + private static String LIMIT_TEST_C = "testlimitc"; + private static int LIMIT_MAX_VALUE = 32; + + @Before + public void testMode() { + jobLauncher.testMode = true; + } + + public LayerDetail getLayer() { + List layers = getLayers(); + return layers.get(layers.size() - 1); + } + + public List getLayers() { + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = spec.getJobs().get(0).detail; + job.groupId = ROOT_FOLDER; + job.showId = ROOT_SHOW; + job.logDir = jobLogUtil.getJobLogPath(job); + job.deptId = departmentDao.getDefaultDepartment().getId(); + job.facilityId = facilityDao.getDefaultFacility().getId(); + jobDao.insertJob(job, jobLogUtil); + + List result = new ArrayList<>(); + String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); + limitDao.createLimit(LIMIT_TEST_A, 1); + limitDao.createLimit(LIMIT_TEST_B, 2); + limitDao.createLimit(LIMIT_TEST_C, 3); + + for (BuildableLayer buildableLayer : spec.getJobs().get(0).getBuildableLayers()) { + LayerDetail layer = buildableLayer.layerDetail; + FrameSet frameSet = new FrameSet(layer.range); + int num_frames = frameSet.size(); + int chunk_size = layer.chunkSize; + + layer.jobId = job.id; + layer.showId = ROOT_SHOW; + layer.totalFrameCount = num_frames / chunk_size; + if (num_frames % chunk_size > 0) { + layer.totalFrameCount++; + } + + layerDao.insertLayerDetail(layer); + layerDao.insertLayerEnvironment(layer, buildableLayer.env); + layerDao.addLimit(layer, limitId); + result.add(layer); + } + + return result; + } + + public JobDetail getJob() { + return jobDao.findJobDetail(JOB_NAME); + } + + public String getTestLimitId(String name) { + return limitDao.findLimit(name).getLimitId(); + } + + @Test + @Transactional + @Rollback(true) + public void testIsLayerComplete() { + layerDao.isLayerComplete(getLayer()); + } + + @Test + @Transactional + @Rollback(true) + public void testIsLayerDispatchable() { + layerDao.isLayerDispatchable(getLayer()); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertLayerDetail() { + LayerDetail layer = getLayer(); + assertEquals(LAYER_NAME, layer.name); + assertEquals(layer.chunkSize, 1); + assertEquals(layer.dispatchOrder, 2); + assertNotNull(layer.id); + assertNotNull(layer.jobId); + assertEquals(layer.showId, ROOT_SHOW); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLayerDetail() { + LayerDetail layer = getLayer(); + assertEquals(LAYER_NAME, layer.name); + assertEquals(layer.chunkSize, 1); + assertEquals(layer.dispatchOrder, 2); + assertNotNull(layer.id); + assertNotNull(layer.jobId); + assertEquals(layer.showId, ROOT_SHOW); + + LayerDetail l2 = layerDao.getLayerDetail(layer); + LayerDetail l3 = layerDao.getLayerDetail(layer.id); + assertEquals(layer, l2); + assertEquals(layer, l3); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLayerDetails() { + List wantLayers = getLayers(); + List gotLayers = layerDao.getLayerDetails(getJob()); + assertThat(gotLayers, containsInAnyOrder(wantLayers.toArray())); + } + + @Test + @Transactional + @Rollback(true) + public void testFindLayerDetail() { + LayerDetail layer = getLayer(); + layerDao.findLayer(getJob(), "pass_1"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLayer() { + LayerDetail layer = getLayer(); + layerDao.getLayer(layer.id); + layerDao.getLayerDetail(layer); + layerDao.getLayerDetail(layer.id); + } + + @Test + @Transactional + @Rollback(true) + public void testFindLayer() { + LayerDetail layer = getLayer(); + layerDao.findLayer(getJob(), "pass_1"); + layerDao.findLayerDetail(getJob(), "pass_1"); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateLayerMinCores() { + LayerDetail layer = getLayer(); + layerDao.updateLayerMinCores(layer, 200); + LayerDetail l2 = layerDao.findLayerDetail(getJob(), "pass_1"); + assertEquals(l2.minimumCores, 200); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateLayerThreadable() { + LayerDetail layer = getLayer(); + layerDao.updateThreadable(layer, false); + assertFalse(jdbcTemplate.queryForObject("SELECT b_threadable FROM layer WHERE pk_layer=?", + Boolean.class, layer.getLayerId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateLayerMinMemory() { + LayerDetail layer = getLayer(); + + /* + * Check to ensure going below Dispatcher.MEM_RESERVED_MIN is not allowed. + */ + layerDao.updateLayerMinMemory(layer, 8096); + LayerDetail l2 = layerDao.findLayerDetail(getJob(), "pass_1"); + // Hardcoded value of dispatcher.memory.mem_reserved_min + // to avoid having to read opencue.properties on a test setting + assertEquals(l2.minimumMemory, 262144); + + /* + * Check regular operation. + */ + layerDao.updateLayerMinMemory(layer, CueUtil.GB); + LayerDetail l3 = layerDao.findLayerDetail(getJob(), "pass_1"); + assertEquals(l3.minimumMemory, CueUtil.GB); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateLayerTags() { + LayerDetail layer = getLayer(); + + HashSet tags = new HashSet(); + tags.add("frickjack"); + tags.add("pancake"); + + layerDao.updateLayerTags(layer, tags); + LayerDetail l2 = layerDao.findLayerDetail(getJob(), "pass_1"); + assertEquals(StringUtils.join(l2.tags, " | "), "frickjack | pancake"); + + tags.clear(); + tags.add("frickjack"); + + layerDao.updateLayerTags(layer, tags); + l2 = layerDao.findLayerDetail(getJob(), "pass_1"); + assertEquals(StringUtils.join(l2.tags, " | "), "frickjack"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFrameStateTotals() { + LayerDetail layer = getLayer(); + layerDao.getFrameStateTotals(layer); + jobDao.getFrameStateTotals(layer); + } + + @Test + @Transactional + @Rollback(true) + public void testGetExecutionSummary() { + LayerDetail layer = getLayer(); + layerDao.getExecutionSummary(layer); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLayerEnvironment() { + LayerDetail layer = getLayer(); + Map map = layerDao.getLayerEnvironment(layer); + for (Map.Entry e : map.entrySet()) { + + } + } + + @Test + @Transactional + @Rollback(true) + public void testInsertLayerEnvironment() { + LayerDetail layer = getLayer(); + layerDao.insertLayerEnvironment(layer, "CHAMBERS", "123"); + Map env = layerDao.getLayerEnvironment(layer); + assertEquals(2, env.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertLayerEnvironmentMap() { + LayerDetail layer = getLayer(); + Map map = new HashMap(); + map.put("CHAMBERS", "123"); + map.put("OVER9000", "123"); + + layerDao.insertLayerEnvironment(layer, map); + Map env = layerDao.getLayerEnvironment(layer); + assertEquals(3, env.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindPastSameNameMaxRSS() { + getLayer(); + jobDao.updateState(getJob(), JobState.FINISHED); + assertEquals(JobState.FINISHED, getJob().state); + + JobDetail lastJob = null; + lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v1"); + long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); + } + + @Test + @Transactional + @Rollback(true) + public void testFindPastTimeStampMaxRSS() { + getLayer(); + jobDao.updateState(getJob(), JobState.FINISHED); + assertEquals(JobState.FINISHED, getJob().state); + + JobDetail lastJob = null; + lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v1_2011_05_03_16_03"); + long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); + } + + @Test + @Transactional + @Rollback(true) + public void testFindPastNewVersionMaxRSS() { + getLayer(); + jobDao.updateState(getJob(), JobState.FINISHED); + assertEquals(JobState.FINISHED, getJob().state); + + JobDetail lastJob = null; + lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v2"); + long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); + } + + @Test + @Transactional + @Rollback(true) + public void testFindPastNewVersionTimeStampMaxRSS() { + getLayer(); + jobDao.updateState(getJob(), JobState.FINISHED); + assertEquals(JobState.FINISHED, getJob().state); + + JobDetail lastJob = null; + lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v2_2011_05_03_16_03"); + long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); + } + + @Test(expected = org.springframework.dao.EmptyResultDataAccessException.class) + @Transactional + @Rollback(true) + public void testFindPastNewVersionFailMaxRSS() { + getLayer(); + jobDao.updateState(getJob(), JobState.FINISHED); + assertEquals(JobState.FINISHED, getJob().state); + + JobDetail lastJob = null; + lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_vfail_v2"); + long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateLayerMaxRSS() { + LayerDetail layer = getLayer(); + + layerDao.updateLayerMaxRSS(layer, 1000, true); + assertEquals(Long.valueOf(1000), jdbcTemplate.queryForObject( + "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", Long.class, layer.getId())); + + layerDao.updateLayerMaxRSS(layer, 999, true); + assertEquals(Long.valueOf(999), jdbcTemplate.queryForObject( + "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", Long.class, layer.getId())); + + layerDao.updateLayerMaxRSS(layer, 900, false); + assertEquals(Long.valueOf(999), jdbcTemplate.queryForObject( + "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", Long.class, layer.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void updateTags() { + String tag = "dillweed"; + LayerDetail layer = getLayer(); + layerDao.updateTags(layer, tag, LayerType.RENDER); + assertEquals(tag, jdbcTemplate.queryForObject("SELECT str_tags FROM layer WHERE pk_layer=?", + String.class, layer.getLayerId())); + } + + @Test + @Transactional + @Rollback(true) + public void updateMinMemory() { + long mem = CueUtil.GB; + LayerDetail layer = getLayer(); + layerDao.updateMinMemory(layer, mem, LayerType.RENDER); + assertEquals(Long.valueOf(mem), jdbcTemplate.queryForObject( + "SELECT int_mem_min FROM layer WHERE pk_layer=?", Long.class, layer.getLayerId())); + } + + @Test + @Transactional + @Rollback(true) + public void updateMinGpuMemory() { + long mem = CueUtil.GB; + LayerDetail layer = getLayer(); + layerDao.updateMinGpuMemory(layer, mem, LayerType.RENDER); + assertEquals(Long.valueOf(mem), + jdbcTemplate.queryForObject("SELECT int_gpu_mem_min FROM layer WHERE pk_layer=?", + Long.class, layer.getLayerId())); + } + + @Test + @Transactional + @Rollback(true) + public void updateMinCores() { + int cores = CueUtil.ONE_CORE * 2; + LayerDetail layer = getLayer(); + layerDao.updateMinCores(layer, cores, LayerType.RENDER); + assertEquals(Integer.valueOf(cores), + jdbcTemplate.queryForObject("SELECT int_cores_min FROM layer WHERE pk_layer=?", + Integer.class, layer.getLayerId())); + } + + @Test + @Transactional + @Rollback(true) + public void updateMaxCores() { + int cores = CueUtil.ONE_CORE * 2; + LayerDetail layer = getLayer(); + layerDao.updateLayerMaxCores(layer, cores); + assertEquals(Integer.valueOf(cores), + jdbcTemplate.queryForObject("SELECT int_cores_max FROM layer WHERE pk_layer=?", + Integer.class, layer.getLayerId())); + } + + @Test + @Transactional + @Rollback(true) + public void isOptimizable() { + LayerDetail layer = getLayer(); + + assertFalse(layerDao.isOptimizable(layer, 5, 3600)); + + /* + * The succeeded count is good but the frames are too long Assert False + */ + jdbcTemplate.update("UPDATE layer_stat SET int_succeeded_count = 5 WHERE pk_layer=?", + layer.getLayerId()); + + jdbcTemplate.update( + "UPDATE layer_usage SET int_core_time_success = 3600 * 6 " + "WHERE pk_layer=?", + layer.getLayerId()); + + assertFalse(layerDao.isOptimizable(layer, 5, 3600)); + + /* + * Set the frame times lower, so now we meet the criteria Assert True + */ + jdbcTemplate.update( + "UPDATE layer_usage SET int_core_time_success = 3500 * 5 " + "WHERE pk_layer=?", + layer.getLayerId()); + + assertTrue(layerDao.isOptimizable(layer, 5, 3600)); + + /* + * Take the general tag away. If a layer is not a general layer it cannot be optmiized. + * Assert False + */ + jdbcTemplate.update("UPDATE layer SET str_tags=? WHERE pk_layer=?", "desktop", + layer.getLayerId()); + + assertFalse(layerDao.isOptimizable(layer, 5, 3600)); + + /* + * Layers that are already tagged util should return false as well. + * + * Assert False + */ + jdbcTemplate.update("UPDATE layer SET str_tags=? WHERE pk_layer=?", "general | util", + layer.getLayerId()); + + assertFalse(layerDao.isOptimizable(layer, 5, 3600)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateUsage() { + LayerDetail layer = getLayer(); + + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject( + "SELECT int_clock_time_success FROM layer_usage WHERE pk_layer=?", + Integer.class, layer.getId())); + + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject( + "SELECT int_core_time_success FROM layer_usage WHERE pk_layer=?", + Integer.class, layer.getId())); + + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject( + "SELECT int_frame_success_count FROM layer_usage WHERE pk_layer=?", + Integer.class, layer.getId())); + + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject( + "SELECT int_clock_time_fail FROM layer_usage WHERE pk_layer=?", + Integer.class, layer.getId())); + + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject( + "SELECT int_core_time_fail FROM layer_usage WHERE pk_layer=?", + Integer.class, layer.getId())); + + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject( + "SELECT int_frame_fail_count FROM layer_usage WHERE pk_layer=?", + Integer.class, layer.getId())); + + /** 60 seconds of 100 core units **/ + ResourceUsage usage = new ResourceUsage(60, 33, 0); + + assertTrue(usage.getClockTimeSeconds() > 0); + assertTrue(usage.getCoreTimeSeconds() > 0); + + /** + * Successful frame + */ + layerDao.updateUsage(layer, usage, 0); + assertEquals(Long.valueOf(usage.getClockTimeSeconds()), + jdbcTemplate.queryForObject( + "SELECT int_clock_time_success FROM layer_usage WHERE pk_layer=?", + Long.class, layer.getId())); + + assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), + jdbcTemplate.queryForObject( + "SELECT int_core_time_success FROM layer_usage WHERE pk_layer=?", + Long.class, layer.getId())); + + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject( + "SELECT int_frame_success_count FROM layer_usage WHERE pk_layer=?", + Integer.class, layer.getId())); + + /** + * Failed frame + */ + layerDao.updateUsage(layer, usage, 1); + assertEquals(Long.valueOf(usage.getClockTimeSeconds()), + jdbcTemplate.queryForObject( + "SELECT int_clock_time_fail FROM layer_usage WHERE pk_layer=?", Long.class, + layer.getId())); + + assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), + jdbcTemplate.queryForObject( + "SELECT int_core_time_fail FROM layer_usage WHERE pk_layer=?", Long.class, + layer.getId())); + + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject( + "SELECT int_frame_fail_count FROM layer_usage WHERE pk_layer=?", + Integer.class, layer.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void isLayerThreadable() { + LayerDetail layer = getLayer(); + jdbcTemplate.update("UPDATE layer set b_threadable = false WHERE pk_layer = ?", + layer.getId()); + + assertFalse(layerDao.isThreadable(layer)); + + jdbcTemplate.update("UPDATE layer set b_threadable = true WHERE pk_layer = ?", + layer.getId()); + + assertTrue(layerDao.isThreadable(layer)); + } + + @Test + @Transactional + @Rollback(true) + public void enableMemoryOptimizer() { + LayerDetail layer = getLayer(); + layerDao.enableMemoryOptimizer(layer, false); + assertFalse(jdbcTemplate.queryForObject("SELECT b_optimize FROM layer WHERE pk_layer=?", + Boolean.class, layer.getLayerId())); + + layerDao.enableMemoryOptimizer(layer, true); + assertTrue(jdbcTemplate.queryForObject("SELECT b_optimize FROM layer WHERE pk_layer=?", + Boolean.class, layer.getLayerId())); + } + + @Test + @Transactional + @Rollback(true) + public void testBalanceMemory() { + LayerDetail layer = getLayer(); + assertTrue(layerDao.balanceLayerMinMemory(layer, CueUtil.GB)); + jdbcTemplate.update("UPDATE layer_mem SET int_max_rss=? WHERE pk_layer=?", CueUtil.GB8, + layer.getId()); + assertFalse(layerDao.balanceLayerMinMemory(layer, CueUtil.MB512)); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertLayerOutput() { + LayerDetail layer = getLayer(); + layerDao.insertLayerOutput(layer, "filespec1"); + layerDao.insertLayerOutput(layer, "filespec2"); + layerDao.insertLayerOutput(layer, "filespec3"); + assertEquals(3, layerDao.getLayerOutputs(layer).size()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLimits() { + LayerDetail layer = getLayer(); + List limits = layerDao.getLimits(layer); + assertEquals(limits.size(), 1); + assertEquals(limits.get(0).id, getTestLimitId(LIMIT_NAME)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLimitNames() { + LayerDetail layer = getLayer(); + List limits = layerDao.getLimitNames(layer); + assertEquals(limits.size(), 1); + assertEquals(limits.get(0), LIMIT_NAME); + } + + @Test + @Transactional + @Rollback(true) + public void testAddLimit() { + LayerDetail layer = getLayer(); + layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_A)); + layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_B)); + layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_C)); + LayerInterface layerResult = layerDao.getLayer(layer.getLayerId()); + List limits = layerDao.getLimits(layerResult); + assertEquals(limits.size(), 4); + List sourceIds = + Arrays.asList(getTestLimitId(LIMIT_NAME), getTestLimitId(LIMIT_TEST_A), + getTestLimitId(LIMIT_TEST_B), getTestLimitId(LIMIT_TEST_C)); + List resultIds = Arrays.asList(limits.get(0).id, limits.get(1).id, limits.get(2).id, + limits.get(3).id); + Collections.sort(sourceIds); + Collections.sort(resultIds); + assertEquals(sourceIds, resultIds); + } + + @Test + @Transactional + @Rollback(true) + public void testDropLimit() { + LayerDetail layer = getLayer(); + layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_A)); + layerDao.dropLimit(layer, getTestLimitId(LIMIT_NAME)); + LayerInterface layerResult = layerDao.getLayer(layer.getLayerId()); + List limits = layerDao.getLimits(layerResult); + assertEquals(limits.size(), 1); + assertEquals(limits.get(0).id, getTestLimitId(LIMIT_TEST_A)); + layerDao.dropLimit(layer, getTestLimitId(LIMIT_TEST_A)); + LayerInterface layerResultB = layerDao.getLayer(layer.getLayerId()); + List limitsB = layerDao.getLimits(layerResultB); + assertEquals(limitsB.size(), 0); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LimitDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LimitDaoTests.java index 64fb456f7..920219145 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LimitDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LimitDaoTests.java @@ -37,94 +37,98 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class LimitDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - LimitDao limitDao; - - private static String LIMIT_NAME = "test-limit"; - private static int LIMIT_MAX_VALUE = 32; - - @Test - @Transactional - @Rollback(true) - public void testCreateLimit() { - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - LimitEntity limit = limitDao.getLimit(limitId); - assertEquals(limit.id, limitId); - assertEquals(limit.name, LIMIT_NAME); - assertEquals(limit.maxValue, LIMIT_MAX_VALUE); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteLimit() { - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - LimitEntity limit = limitDao.getLimit(limitId); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM limit_record WHERE pk_limit_record=?", Integer.class, limitId)); - - limitDao.deleteLimit(limit); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM limit_record WHERE pk_limit_record=?", Integer.class, limitId)); - } - - @Test - @Transactional - @Rollback(true) - public void testFindLimit() { - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - - LimitEntity limit = limitDao.findLimit(LIMIT_NAME); - assertEquals(limit.name, LIMIT_NAME); - assertEquals(limit.maxValue, LIMIT_MAX_VALUE); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLimit() { - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - - LimitEntity limit = limitDao.getLimit(limitId); - assertEquals(limit.name, LIMIT_NAME); - assertEquals(limit.maxValue, LIMIT_MAX_VALUE); - } - - @Test - @Transactional - @Rollback(true) - public void testSetLimitName() { - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - LimitEntity limit = limitDao.getLimit(limitId); - String newName = "heyIChanged"; - - limitDao.setLimitName(limit, newName); - - limit = limitDao.getLimit(limitId); - assertEquals(limit.id, limitId); - assertEquals(limit.name, newName); - assertEquals(limit.maxValue, LIMIT_MAX_VALUE); - } - - @Test - @Transactional - @Rollback(true) - public void testSetMaxValue() { - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - LimitEntity limit = limitDao.getLimit(limitId); - int newValue = 600; - - limitDao.setMaxValue(limit, newValue); - - limit = limitDao.getLimit(limitId); - assertEquals(limit.id, limitId); - assertEquals(limit.name, LIMIT_NAME); - assertEquals(limit.maxValue, newValue); - } + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + LimitDao limitDao; + + private static String LIMIT_NAME = "test-limit"; + private static int LIMIT_MAX_VALUE = 32; + + @Test + @Transactional + @Rollback(true) + public void testCreateLimit() { + String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); + LimitEntity limit = limitDao.getLimit(limitId); + assertEquals(limit.id, limitId); + assertEquals(limit.name, LIMIT_NAME); + assertEquals(limit.maxValue, LIMIT_MAX_VALUE); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteLimit() { + String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); + LimitEntity limit = limitDao.getLimit(limitId); + + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM limit_record WHERE pk_limit_record=?", Integer.class, + limitId)); + + limitDao.deleteLimit(limit); + + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM limit_record WHERE pk_limit_record=?", Integer.class, + limitId)); + } + + @Test + @Transactional + @Rollback(true) + public void testFindLimit() { + String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); + + LimitEntity limit = limitDao.findLimit(LIMIT_NAME); + assertEquals(limit.name, LIMIT_NAME); + assertEquals(limit.maxValue, LIMIT_MAX_VALUE); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLimit() { + String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); + + LimitEntity limit = limitDao.getLimit(limitId); + assertEquals(limit.name, LIMIT_NAME); + assertEquals(limit.maxValue, LIMIT_MAX_VALUE); + } + + @Test + @Transactional + @Rollback(true) + public void testSetLimitName() { + String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); + LimitEntity limit = limitDao.getLimit(limitId); + String newName = "heyIChanged"; + + limitDao.setLimitName(limit, newName); + + limit = limitDao.getLimit(limitId); + assertEquals(limit.id, limitId); + assertEquals(limit.name, newName); + assertEquals(limit.maxValue, LIMIT_MAX_VALUE); + } + + @Test + @Transactional + @Rollback(true) + public void testSetMaxValue() { + String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); + LimitEntity limit = limitDao.getLimit(limitId); + int newValue = 600; + + limitDao.setMaxValue(limit, newValue); + + limit = limitDao.getLimit(limitId); + assertEquals(limit.id, limitId); + assertEquals(limit.name, LIMIT_NAME); + assertEquals(limit.maxValue, newValue); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/MaintenanceDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/MaintenanceDaoTests.java index 2b96fa039..71353c121 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/MaintenanceDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/MaintenanceDaoTests.java @@ -38,33 +38,33 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class MaintenanceDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; - @Resource - MaintenanceDao maintenanceDao; + @Resource + MaintenanceDao maintenanceDao; - @Test - @Transactional - @Rollback(true) - public void testSetUpHostsToDown() { - maintenanceDao.setUpHostsToDown(); - } + @Test + @Transactional + @Rollback(true) + public void testSetUpHostsToDown() { + maintenanceDao.setUpHostsToDown(); + } - @Test - @Transactional - @Rollback(true) - public void testLockHistoricalTask() { - assertTrue(maintenanceDao.lockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER)); - assertFalse(maintenanceDao.lockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER)); - } + @Test + @Transactional + @Rollback(true) + public void testLockHistoricalTask() { + assertTrue(maintenanceDao.lockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER)); + assertFalse(maintenanceDao.lockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER)); + } - @Test - @Transactional - @Rollback(true) - public void testUnlockHistoricalTask() { - assertTrue(maintenanceDao.lockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER)); - maintenanceDao.unlockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER); - } + @Test + @Transactional + @Rollback(true) + public void testUnlockHistoricalTask() { + assertTrue(maintenanceDao.lockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER)); + maintenanceDao.unlockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/MatcherDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/MatcherDaoTests.java index 4cfbf8287..3b1688b44 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/MatcherDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/MatcherDaoTests.java @@ -43,92 +43,92 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class MatcherDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - MatcherDao matcherDao; - - @Resource - FilterDao filterDao; - - @Resource - ShowDao showDao; - - @Resource - GroupDao groupDao; - - private static String FILTER_NAME = "test_filter"; - - public ShowEntity getShow() { - return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); - } - - public MatcherEntity createMatcher() { - FilterEntity filter = createFilter(); - MatcherEntity matcher = new MatcherEntity(); - matcher.filterId = filter.id; - matcher.name = null; - matcher.showId = getShow().getId(); - matcher.subject = MatchSubject.JOB_NAME; - matcher.type = MatchType.CONTAINS; - matcher.value = "testuser"; - matcherDao.insertMatcher(matcher); - return matcher; - } - - public FilterEntity createFilter() { - FilterEntity filter = new FilterEntity(); - filter.name = FILTER_NAME; - filter.showId = "00000000-0000-0000-0000-000000000000"; - filter.type = FilterType.MATCH_ANY; - filter.enabled = true; - filterDao.insertFilter(filter); - return filter; - } - - @Test - @Transactional - @Rollback(true) - public void testInsertMatcher() { - createMatcher(); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteMatcher() { - MatcherEntity matcher = createMatcher(); - matcherDao.deleteMatcher(matcher); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateMatcher() { - MatcherEntity matcher = createMatcher(); - matcher.subject = MatchSubject.USER; - matcher.value = "testuser"; - matcher.type = MatchType.IS; - matcherDao.updateMatcher(matcher); - } - - @Test - @Transactional - @Rollback(true) - public void testGetMatcher() { - MatcherEntity matcher = createMatcher(); - matcherDao.getMatcher(matcher); - matcherDao.getMatcher(matcher.getMatcherId()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetMatchers() { - MatcherEntity matcher = createMatcher(); - matcherDao.getMatchers(matcher); - } + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + MatcherDao matcherDao; + + @Resource + FilterDao filterDao; + + @Resource + ShowDao showDao; + + @Resource + GroupDao groupDao; + + private static String FILTER_NAME = "test_filter"; + + public ShowEntity getShow() { + return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); + } + + public MatcherEntity createMatcher() { + FilterEntity filter = createFilter(); + MatcherEntity matcher = new MatcherEntity(); + matcher.filterId = filter.id; + matcher.name = null; + matcher.showId = getShow().getId(); + matcher.subject = MatchSubject.JOB_NAME; + matcher.type = MatchType.CONTAINS; + matcher.value = "testuser"; + matcherDao.insertMatcher(matcher); + return matcher; + } + + public FilterEntity createFilter() { + FilterEntity filter = new FilterEntity(); + filter.name = FILTER_NAME; + filter.showId = "00000000-0000-0000-0000-000000000000"; + filter.type = FilterType.MATCH_ANY; + filter.enabled = true; + filterDao.insertFilter(filter); + return filter; + } + + @Test + @Transactional + @Rollback(true) + public void testInsertMatcher() { + createMatcher(); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteMatcher() { + MatcherEntity matcher = createMatcher(); + matcherDao.deleteMatcher(matcher); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateMatcher() { + MatcherEntity matcher = createMatcher(); + matcher.subject = MatchSubject.USER; + matcher.value = "testuser"; + matcher.type = MatchType.IS; + matcherDao.updateMatcher(matcher); + } + + @Test + @Transactional + @Rollback(true) + public void testGetMatcher() { + MatcherEntity matcher = createMatcher(); + matcherDao.getMatcher(matcher); + matcherDao.getMatcher(matcher.getMatcherId()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetMatchers() { + MatcherEntity matcher = createMatcher(); + matcherDao.getMatchers(matcher); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/NestedWhiteboardDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/NestedWhiteboardDaoTests.java index 1e3ce4246..68b97b272 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/NestedWhiteboardDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/NestedWhiteboardDaoTests.java @@ -36,28 +36,28 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class NestedWhiteboardDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - NestedWhiteboardDao nestedWhiteboardDao; - - @Resource - ShowDao showDao; - - public ShowEntity getShow() { - return showDao.findShowDetail("pipe"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetNestedJobWhiteboard() { - nestedWhiteboardDao.getJobWhiteboard(getShow()); - nestedWhiteboardDao.getJobWhiteboard(getShow()); - nestedWhiteboardDao.getJobWhiteboard(getShow()); - nestedWhiteboardDao.getJobWhiteboard(getShow()); - nestedWhiteboardDao.getJobWhiteboard(getShow()); - } + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + NestedWhiteboardDao nestedWhiteboardDao; + + @Resource + ShowDao showDao; + + public ShowEntity getShow() { + return showDao.findShowDetail("pipe"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetNestedJobWhiteboard() { + nestedWhiteboardDao.getJobWhiteboard(getShow()); + nestedWhiteboardDao.getJobWhiteboard(getShow()); + nestedWhiteboardDao.getJobWhiteboard(getShow()); + nestedWhiteboardDao.getJobWhiteboard(getShow()); + nestedWhiteboardDao.getJobWhiteboard(getShow()); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/OwnerDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/OwnerDaoTests.java index 2b549d7eb..68c71dfbf 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/OwnerDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/OwnerDaoTests.java @@ -40,84 +40,84 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class OwnerDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - OwnerDao ownerDao; - - @Resource - AdminManager adminManager; - - @Resource - HostManager hostManager; - - @Test - @Transactional - @Rollback(true) - public void testInsertOwner() { - ShowInterface show = adminManager.findShowEntity("pipe"); - OwnerEntity o = new OwnerEntity(); - o.name = "spongebob"; - ownerDao.insertOwner(o, show); - } - - @Test - @Transactional - @Rollback(true) - public void testIsOwner() { - ShowInterface show = adminManager.findShowEntity("pipe"); - OwnerEntity o = new OwnerEntity(); - o.name = "spongebob"; - ownerDao.insertOwner(o, show); - } - - @Test - @Transactional - @Rollback(true) - public void testGetOwner() { - ShowInterface show = adminManager.findShowEntity("pipe"); - OwnerEntity o = new OwnerEntity(); - o.name = "spongebob"; - ownerDao.insertOwner(o, show); - - assertEquals(o, ownerDao.findOwner("spongebob")); - assertEquals(o, ownerDao.getOwner(o.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteOwner() { - ShowInterface show = adminManager.findShowEntity("pipe"); - OwnerEntity o = new OwnerEntity(); - o.name = "spongebob"; - ownerDao.insertOwner(o, show); - - assertEquals(Integer.valueOf(1), jdbcTemplate - .queryForObject("SELECT COUNT(*) FROM owner WHERE pk_owner=?", Integer.class, o.id)); - - ownerDao.deleteOwner(o); - - assertEquals(Integer.valueOf(0), jdbcTemplate - .queryForObject("SELECT COUNT(*) FROM owner WHERE pk_owner=?", Integer.class, o.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateShow() { - ShowInterface show = adminManager.findShowEntity("pipe"); - OwnerEntity o = new OwnerEntity(); - o.name = "spongebob"; - ownerDao.insertOwner(o, show); - - ShowInterface newShow = adminManager.findShowEntity("edu"); - - ownerDao.updateShow(o, newShow); - - assertEquals(newShow.getShowId(), jdbcTemplate - .queryForObject("SELECT pk_show FROM owner WHERE pk_owner=?", String.class, o.id)); - } + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + OwnerDao ownerDao; + + @Resource + AdminManager adminManager; + + @Resource + HostManager hostManager; + + @Test + @Transactional + @Rollback(true) + public void testInsertOwner() { + ShowInterface show = adminManager.findShowEntity("pipe"); + OwnerEntity o = new OwnerEntity(); + o.name = "spongebob"; + ownerDao.insertOwner(o, show); + } + + @Test + @Transactional + @Rollback(true) + public void testIsOwner() { + ShowInterface show = adminManager.findShowEntity("pipe"); + OwnerEntity o = new OwnerEntity(); + o.name = "spongebob"; + ownerDao.insertOwner(o, show); + } + + @Test + @Transactional + @Rollback(true) + public void testGetOwner() { + ShowInterface show = adminManager.findShowEntity("pipe"); + OwnerEntity o = new OwnerEntity(); + o.name = "spongebob"; + ownerDao.insertOwner(o, show); + + assertEquals(o, ownerDao.findOwner("spongebob")); + assertEquals(o, ownerDao.getOwner(o.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteOwner() { + ShowInterface show = adminManager.findShowEntity("pipe"); + OwnerEntity o = new OwnerEntity(); + o.name = "spongebob"; + ownerDao.insertOwner(o, show); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM owner WHERE pk_owner=?", Integer.class, o.id)); + + ownerDao.deleteOwner(o); + + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM owner WHERE pk_owner=?", Integer.class, o.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateShow() { + ShowInterface show = adminManager.findShowEntity("pipe"); + OwnerEntity o = new OwnerEntity(); + o.name = "spongebob"; + ownerDao.insertOwner(o, show); + + ShowInterface newShow = adminManager.findShowEntity("edu"); + + ownerDao.updateShow(o, newShow); + + assertEquals(newShow.getShowId(), jdbcTemplate + .queryForObject("SELECT pk_show FROM owner WHERE pk_owner=?", String.class, o.id)); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/PointDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/PointDaoTests.java index 378fc2d34..43aab42e8 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/PointDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/PointDaoTests.java @@ -47,114 +47,114 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class PointDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - DepartmentDao departmentDao; + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + DepartmentDao departmentDao; - @Resource - AdminManager adminManager; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - PointDao pointDao; - - public JobDetail launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - @Test - @Transactional - @Rollback(true) - public void insertDepartmentConfig() { - ShowEntity show = new ShowEntity(); - show.name = "testtest"; - adminManager.createShow(show); - DepartmentInterface dept = departmentDao.findDepartment("Lighting"); - PointInterface d = pointDao.insertPointConf(show, dept); - - assertEquals(show.id, jdbcTemplate.queryForObject("SELECT pk_show FROM point WHERE pk_point=?", - String.class, d.getPointId())); - - assertEquals(dept.getDepartmentId(), jdbcTemplate.queryForObject( - "SELECT pk_dept FROM point WHERE pk_point=?", String.class, d.getPointId())); - } - - @Test - @Transactional - @Rollback(true) - public void departmentConfigExists() { - ShowEntity show = new ShowEntity(); - show.name = "testtest"; - adminManager.createShow(show); - - assertTrue(pointDao.pointConfExists(show, departmentDao.getDefaultDepartment())); - - assertFalse(pointDao.pointConfExists(show, departmentDao.findDepartment("Lighting"))); - } - - @Test - @Transactional - @Rollback(true) - public void updateEnableTiManaged() { - ShowEntity show = new ShowEntity(); - show.name = "testtest"; - adminManager.createShow(show); - - PointInterface config = - pointDao.getPointConfigDetail(show, departmentDao.getDefaultDepartment()); - - // pointDao.updateEnableManaged(config, "Lighting", 10); - } - - @Test - @Transactional - @Rollback(true) - public void getDepartmentConfig() { - ShowEntity show = new ShowEntity(); - show.name = "testtest"; - adminManager.createShow(show); - - /* Tests both overlodaed methods */ - PointInterface configA = - pointDao.getPointConfigDetail(show, departmentDao.getDefaultDepartment()); - - PointInterface configB = pointDao.getPointConfDetail(configA.getPointId()); - - assertEquals(configA.getPointId(), configB.getPointId()); - assertEquals(configA.getDepartmentId(), configB.getDepartmentId()); - assertEquals(configA.getShowId(), configB.getShowId()); - } - - @Test - @Transactional - @Rollback(true) - public void testIsOverMinCores() { - - JobDetail job = launchJob(); - - PointInterface pointConfig = - pointDao.getPointConfigDetail(job, departmentDao.getDepartment(job.getDepartmentId())); - - assertFalse(pointDao.isOverMinCores(job)); - - // Now update some values so it returns true. - jdbcTemplate.update("UPDATE point SET int_cores = int_min_cores + 2000000 WHERE pk_point=?", - pointConfig.getId()); + @Resource + AdminManager adminManager; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + PointDao pointDao; + + public JobDetail launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + } + + @Test + @Transactional + @Rollback(true) + public void insertDepartmentConfig() { + ShowEntity show = new ShowEntity(); + show.name = "testtest"; + adminManager.createShow(show); + DepartmentInterface dept = departmentDao.findDepartment("Lighting"); + PointInterface d = pointDao.insertPointConf(show, dept); + + assertEquals(show.id, jdbcTemplate.queryForObject( + "SELECT pk_show FROM point WHERE pk_point=?", String.class, d.getPointId())); + + assertEquals(dept.getDepartmentId(), jdbcTemplate.queryForObject( + "SELECT pk_dept FROM point WHERE pk_point=?", String.class, d.getPointId())); + } + + @Test + @Transactional + @Rollback(true) + public void departmentConfigExists() { + ShowEntity show = new ShowEntity(); + show.name = "testtest"; + adminManager.createShow(show); + + assertTrue(pointDao.pointConfExists(show, departmentDao.getDefaultDepartment())); + + assertFalse(pointDao.pointConfExists(show, departmentDao.findDepartment("Lighting"))); + } + + @Test + @Transactional + @Rollback(true) + public void updateEnableTiManaged() { + ShowEntity show = new ShowEntity(); + show.name = "testtest"; + adminManager.createShow(show); + + PointInterface config = + pointDao.getPointConfigDetail(show, departmentDao.getDefaultDepartment()); + + // pointDao.updateEnableManaged(config, "Lighting", 10); + } + + @Test + @Transactional + @Rollback(true) + public void getDepartmentConfig() { + ShowEntity show = new ShowEntity(); + show.name = "testtest"; + adminManager.createShow(show); + + /* Tests both overlodaed methods */ + PointInterface configA = + pointDao.getPointConfigDetail(show, departmentDao.getDefaultDepartment()); + + PointInterface configB = pointDao.getPointConfDetail(configA.getPointId()); + + assertEquals(configA.getPointId(), configB.getPointId()); + assertEquals(configA.getDepartmentId(), configB.getDepartmentId()); + assertEquals(configA.getShowId(), configB.getShowId()); + } + + @Test + @Transactional + @Rollback(true) + public void testIsOverMinCores() { + + JobDetail job = launchJob(); + + PointInterface pointConfig = pointDao.getPointConfigDetail(job, + departmentDao.getDepartment(job.getDepartmentId())); + + assertFalse(pointDao.isOverMinCores(job)); + + // Now update some values so it returns true. + jdbcTemplate.update("UPDATE point SET int_cores = int_min_cores + 2000000 WHERE pk_point=?", + pointConfig.getId()); - logger.info(jdbcTemplate.queryForObject("SELECT int_min_cores from point where pk_point=?", - Integer.class, pointConfig.getId())); + logger.info(jdbcTemplate.queryForObject("SELECT int_min_cores from point where pk_point=?", + Integer.class, pointConfig.getId())); - assertTrue(pointDao.isOverMinCores(job)); - } + assertTrue(pointDao.isOverMinCores(job)); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ProcDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ProcDaoTests.java index 402ed745b..b3e930d5a 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ProcDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ProcDaoTests.java @@ -68,742 +68,743 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class ProcDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; - @Autowired - private Environment env; + @Autowired + private Environment env; - @Resource - ProcDao procDao; + @Resource + ProcDao procDao; - @Resource - HostDao hostDao; + @Resource + HostDao hostDao; - @Resource - JobManager jobManager; + @Resource + JobManager jobManager; - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - FrameDao frameDao; + @Resource + FrameDao frameDao; - @Resource - LayerDao layerDao; - - @Resource - DispatcherDao dispatcherDao; - - @Resource - HostManager hostManager; + @Resource + LayerDao layerDao; + + @Resource + DispatcherDao dispatcherDao; + + @Resource + HostManager hostManager; + + @Resource + AdminManager adminManager; + + @Resource + Dispatcher dispatcher; + + @Resource + FrameSearchFactory frameSearchFactory; + + @Resource + ProcSearchFactory procSearchFactory; + + private static String PK_ALLOC = "00000000-0000-0000-0000-000000000000"; + + private long MEM_RESERVED_DEFAULT; + private long MEM_GPU_RESERVED_DEFAULT; + + public DispatchHost createHost() { + + RenderHost host = RenderHost.newBuilder().setName("beta").setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB32).setTotalSwap(20960) + .setNimbyEnabled(false).setNumProcs(8).setCoresPerProc(100) + .setState(HardwareState.UP).setFacility("spi").build(); + + DispatchHost dh = hostManager.createHost(host); + hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); + + return hostDao.findDispatchHost("beta"); + } + + public JobDetail launchJob() { + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + } + + @Before + public void setDispatcherTestMode() { + dispatcher.setTestMode(true); + jobLauncher.testMode = true; + this.MEM_RESERVED_DEFAULT = + env.getRequiredProperty("dispatcher.memory.mem_reserved_default", Long.class); + this.MEM_GPU_RESERVED_DEFAULT = + env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_default", Long.class); + } + + @Test + @Transactional + @Rollback(true) + public void testDontVerifyRunningProc() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail fd = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + DispatchFrame frame = frameDao.getDispatchFrame(fd.getId()); + VirtualProc proc = VirtualProc.build(host, frame); + dispatcher.dispatch(frame, proc); + + // Confirm was have a running frame. + assertEquals("RUNNING", jdbcTemplate.queryForObject( + "SELECT str_state FROM frame WHERE pk_frame=?", String.class, frame.id)); + + assertTrue(procDao.verifyRunningProc(proc.getId(), frame.getId())); + jobManager.shutdownJob(job); + + int result = jdbcTemplate.update("UPDATE job SET ts_stopped = " + + "current_timestamp - interval '10' minute " + "WHERE pk_job=?", job.id); + + assertEquals(1, result); + assertFalse(procDao.verifyRunningProc(proc.getId(), frame.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertVirtualProc() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteVirtualProc() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + procDao.deleteVirtualProc(proc); + } + + @Test + @Transactional + @Rollback(true) + public void testClearVirtualProcAssignment() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + procDao.clearVirtualProcAssignment(proc); + } + + @Test + @Transactional + @Rollback(true) + public void testClearVirtualProcAssignmentByFrame() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + assertTrue(procDao.clearVirtualProcAssignment(frame)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateVirtualProcAssignment() { + + DispatchHost host = createHost(); + + JobDetail job = launchJob(); + FrameDetail frame1 = frameDao.findFrameDetail(job, "0001-pass_1"); + FrameDetail frame2 = frameDao.findFrameDetail(job, "0002-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame1.id; + proc.layerId = frame1.layerId; + proc.showId = frame1.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame1.getId()); + + proc.frameId = frame2.id; + + procDao.updateVirtualProcAssignment(proc); + procDao.verifyRunningProc(proc.getId(), frame2.getId()); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateProcMemoryUsage() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + procDao.verifyRunningProc(proc.getId(), frame.getId()); + byte[] children = new byte[100]; + + procDao.updateProcMemoryUsage(frame, 100, 100, 1000, 1000, 0, 0, 0, children); + + } + + @Test + @Transactional + @Rollback(true) + public void testGetVirtualProc() { + DispatchHost host = createHost(); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM host WHERE pk_host=?", Integer.class, host.id)); + + JobDetail job = launchJob(); + FrameDetail fd = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + DispatchFrame frame = frameDao.getDispatchFrame(fd.getId()); + VirtualProc proc = VirtualProc.build(host, frame); + dispatcher.dispatch(frame, proc); + + assertTrue(procDao.verifyRunningProc(proc.getId(), frame.getId())); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM proc WHERE pk_proc=?", Integer.class, proc.id)); + + VirtualProc verifyProc = procDao.getVirtualProc(proc.getId()); + assertEquals(host.allocationId, verifyProc.allocationId); + assertEquals(proc.coresReserved, verifyProc.coresReserved); + assertEquals(proc.frameId, verifyProc.frameId); + assertEquals(proc.hostId, verifyProc.hostId); + assertEquals(proc.id, verifyProc.id); + assertEquals(proc.jobId, verifyProc.jobId); + assertEquals(proc.layerId, verifyProc.layerId); + assertEquals(proc.showId, verifyProc.showId); + } + + @Test + @Transactional + @Rollback(true) + public void testFindVirtualProc() { + + DispatchHost host = createHost(); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM host WHERE pk_host=?", Integer.class, host.id)); + + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + procDao.insertVirtualProc(proc); + + procDao.findVirtualProc(frame); + } + + @Test + @Transactional + @Rollback(true) + public void testFindVirtualProcs() { + + DispatchHost host = createHost(); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM host WHERE pk_host=?", Integer.class, host.id)); + + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + procDao.insertVirtualProc(proc); + + assertEquals(1, procDao.findVirtualProcs(HardwareState.UP).size()); + assertEquals(1, procDao.findVirtualProcs(host).size()); + assertEquals(1, procDao.findVirtualProcs(job).size()); + assertEquals(1, procDao.findVirtualProcs(frame).size()); + assertEquals(1, procDao.findVirtualProcs(frameSearchFactory.create(job)).size()); + assertEquals(1, + procDao.findVirtualProcs(frameSearchFactory.create((LayerInterface) frame)).size()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindOrphanedVirtualProcs() { + DispatchHost host = createHost(); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM host WHERE pk_host=?", Integer.class, host.id)); + + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + procDao.insertVirtualProc(proc); + + assertEquals(0, procDao.findOrphanedVirtualProcs().size()); + + /** + * This is destructive to running jobs + */ + jdbcTemplate.update("UPDATE proc SET ts_ping = (current_timestamp - interval '30' day)"); + + assertEquals(1, procDao.findOrphanedVirtualProcs().size()); + assertTrue(procDao.isOrphan(proc)); + } + + @Test + @Transactional + @Rollback(true) + public void testUnbookProc() { + + DispatchHost host = createHost(); + + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + procDao.insertVirtualProc(proc); + + procDao.unbookProc(proc); + assertTrue(jdbcTemplate.queryForObject("SELECT b_unbooked FROM proc WHERE pk_proc=?", + Boolean.class, proc.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testUnbookVirtualProcs() { + + DispatchHost host = createHost(); + + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + procDao.insertVirtualProc(proc); + + List procs = new ArrayList(); + procs.add(proc); + + procDao.unbookVirtualProcs(procs); + + assertTrue(jdbcTemplate.queryForObject("SELECT b_unbooked FROM proc WHERE pk_proc=?", + Boolean.class, proc.id)); + } + + @Test(expected = ResourceReservationFailureException.class) + @Transactional + @Rollback(true) + public void testIncreaseReservedMemoryFail() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + procDao.insertVirtualProc(proc); + + procDao.increaseReservedMemory(proc, 8173264l * 8); + } + + @Test + @Transactional + @Rollback(true) + public void testIncreaseReservedMemory() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = PK_ALLOC; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + procDao.insertVirtualProc(proc); + + procDao.increaseReservedMemory(proc, 3145728); + } + + @Test + @Transactional + @Rollback(true) + public void testGetReservedMemory() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1"); + DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); + + VirtualProc proc = VirtualProc.build(host, frame); + proc.frameId = frame.id; + procDao.insertVirtualProc(proc); + + VirtualProc _proc = procDao.findVirtualProc(frame); + assertEquals(Long.valueOf(this.MEM_RESERVED_DEFAULT), jdbcTemplate.queryForObject( + "SELECT int_mem_reserved FROM proc WHERE pk_proc=?", Long.class, _proc.id)); + assertEquals(this.MEM_RESERVED_DEFAULT, procDao.getReservedMemory(_proc)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetReservedGpuMemory() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1"); + DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); + + VirtualProc proc = VirtualProc.build(host, frame); + proc.frameId = frame.id; + procDao.insertVirtualProc(proc); + + VirtualProc _proc = procDao.findVirtualProc(frame); + assertEquals(Long.valueOf(this.MEM_GPU_RESERVED_DEFAULT), jdbcTemplate.queryForObject( + "SELECT int_gpu_mem_reserved FROM proc WHERE pk_proc=?", Long.class, _proc.id)); + assertEquals(this.MEM_GPU_RESERVED_DEFAULT, procDao.getReservedGpuMemory(_proc)); + } + + @Test + @Transactional + @Rollback(true) + public void testBalanceUnderUtilizedProcs() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frameDetail1 = frameDao.findFrameDetail(job, "0001-pass_1"); + DispatchFrame frame1 = frameDao.getDispatchFrame(frameDetail1.id); + + VirtualProc proc1 = VirtualProc.build(host, frame1); + proc1.frameId = frame1.id; + procDao.insertVirtualProc(proc1); + + byte[] children = new byte[100]; + procDao.updateProcMemoryUsage(frame1, 250000, 250000, 250000, 250000, 0, 0, 0, children); + layerDao.updateLayerMaxRSS(frame1, 250000, true); + + FrameDetail frameDetail2 = frameDao.findFrameDetail(job, "0002-pass_1"); + DispatchFrame frame2 = frameDao.getDispatchFrame(frameDetail2.id); + + VirtualProc proc2 = VirtualProc.build(host, frame2); + proc2.frameId = frame2.id; + procDao.insertVirtualProc(proc2); + + procDao.updateProcMemoryUsage(frame2, 255000, 255000, 255000, 255000, 0, 0, 0, children); + layerDao.updateLayerMaxRSS(frame2, 255000, true); + + FrameDetail frameDetail3 = frameDao.findFrameDetail(job, "0003-pass_1"); + DispatchFrame frame3 = frameDao.getDispatchFrame(frameDetail3.id); + + VirtualProc proc3 = VirtualProc.build(host, frame3); + proc3.frameId = frame3.id; + procDao.insertVirtualProc(proc3); + + procDao.updateProcMemoryUsage(frame3, 3145728, 3145728, 3145728, 3145728, 0, 0, 0, + children); + layerDao.updateLayerMaxRSS(frame3, 300000, true); + + procDao.balanceUnderUtilizedProcs(proc3, 100000); + procDao.increaseReservedMemory(proc3, this.MEM_RESERVED_DEFAULT + 100000); + + // Check the target proc + VirtualProc targetProc = procDao.getVirtualProc(proc3.getId()); + assertEquals(this.MEM_RESERVED_DEFAULT + 100000, targetProc.memoryReserved); + + // Check other procs + VirtualProc firstProc = procDao.getVirtualProc(proc1.getId()); + assertEquals(this.MEM_RESERVED_DEFAULT - 50000 - 1, firstProc.memoryReserved); + + VirtualProc secondProc = procDao.getVirtualProc(proc2.getId()); + assertEquals(this.MEM_RESERVED_DEFAULT - 50000 - 1, secondProc.memoryReserved); - @Resource - AdminManager adminManager; - - @Resource - Dispatcher dispatcher; - - @Resource - FrameSearchFactory frameSearchFactory; - - @Resource - ProcSearchFactory procSearchFactory; - - private static String PK_ALLOC = "00000000-0000-0000-0000-000000000000"; - - private long MEM_RESERVED_DEFAULT; - private long MEM_GPU_RESERVED_DEFAULT; - - public DispatchHost createHost() { - - RenderHost host = RenderHost.newBuilder().setName("beta").setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB32).setTotalSwap(20960) - .setNimbyEnabled(false).setNumProcs(8).setCoresPerProc(100).setState(HardwareState.UP) - .setFacility("spi").build(); - - DispatchHost dh = hostManager.createHost(host); - hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); - - return hostDao.findDispatchHost("beta"); - } - - public JobDetail launchJob() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - @Before - public void setDispatcherTestMode() { - dispatcher.setTestMode(true); - jobLauncher.testMode = true; - this.MEM_RESERVED_DEFAULT = - env.getRequiredProperty("dispatcher.memory.mem_reserved_default", Long.class); - this.MEM_GPU_RESERVED_DEFAULT = - env.getRequiredProperty("dispatcher.memory.mem_gpu_reserved_default", Long.class); - } - - @Test - @Transactional - @Rollback(true) - public void testDontVerifyRunningProc() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail fd = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame frame = frameDao.getDispatchFrame(fd.getId()); - VirtualProc proc = VirtualProc.build(host, frame); - dispatcher.dispatch(frame, proc); - - // Confirm was have a running frame. - assertEquals("RUNNING", jdbcTemplate - .queryForObject("SELECT str_state FROM frame WHERE pk_frame=?", String.class, frame.id)); - - assertTrue(procDao.verifyRunningProc(proc.getId(), frame.getId())); - jobManager.shutdownJob(job); - - int result = jdbcTemplate.update("UPDATE job SET ts_stopped = " - + "current_timestamp - interval '10' minute " + "WHERE pk_job=?", job.id); - - assertEquals(1, result); - assertFalse(procDao.verifyRunningProc(proc.getId(), frame.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertVirtualProc() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteVirtualProc() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - procDao.deleteVirtualProc(proc); - } - - @Test - @Transactional - @Rollback(true) - public void testClearVirtualProcAssignment() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - procDao.clearVirtualProcAssignment(proc); - } - - @Test - @Transactional - @Rollback(true) - public void testClearVirtualProcAssignmentByFrame() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - assertTrue(procDao.clearVirtualProcAssignment(frame)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateVirtualProcAssignment() { - - DispatchHost host = createHost(); - - JobDetail job = launchJob(); - FrameDetail frame1 = frameDao.findFrameDetail(job, "0001-pass_1"); - FrameDetail frame2 = frameDao.findFrameDetail(job, "0002-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame1.id; - proc.layerId = frame1.layerId; - proc.showId = frame1.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame1.getId()); - - proc.frameId = frame2.id; - - procDao.updateVirtualProcAssignment(proc); - procDao.verifyRunningProc(proc.getId(), frame2.getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateProcMemoryUsage() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - byte[] children = new byte[100]; - - procDao.updateProcMemoryUsage(frame, 100, 100, 1000, 1000, 0, 0, 0, children); - - } - - @Test - @Transactional - @Rollback(true) - public void testGetVirtualProc() { - DispatchHost host = createHost(); - - assertEquals(Integer.valueOf(1), jdbcTemplate - .queryForObject("SELECT COUNT(*) FROM host WHERE pk_host=?", Integer.class, host.id)); - - JobDetail job = launchJob(); - FrameDetail fd = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - DispatchFrame frame = frameDao.getDispatchFrame(fd.getId()); - VirtualProc proc = VirtualProc.build(host, frame); - dispatcher.dispatch(frame, proc); - - assertTrue(procDao.verifyRunningProc(proc.getId(), frame.getId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate - .queryForObject("SELECT COUNT(*) FROM proc WHERE pk_proc=?", Integer.class, proc.id)); - - VirtualProc verifyProc = procDao.getVirtualProc(proc.getId()); - assertEquals(host.allocationId, verifyProc.allocationId); - assertEquals(proc.coresReserved, verifyProc.coresReserved); - assertEquals(proc.frameId, verifyProc.frameId); - assertEquals(proc.hostId, verifyProc.hostId); - assertEquals(proc.id, verifyProc.id); - assertEquals(proc.jobId, verifyProc.jobId); - assertEquals(proc.layerId, verifyProc.layerId); - assertEquals(proc.showId, verifyProc.showId); - } - - @Test - @Transactional - @Rollback(true) - public void testFindVirtualProc() { - - DispatchHost host = createHost(); - - assertEquals(Integer.valueOf(1), jdbcTemplate - .queryForObject("SELECT COUNT(*) FROM host WHERE pk_host=?", Integer.class, host.id)); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); - - procDao.findVirtualProc(frame); - } - - @Test - @Transactional - @Rollback(true) - public void testFindVirtualProcs() { - - DispatchHost host = createHost(); - - assertEquals(Integer.valueOf(1), jdbcTemplate - .queryForObject("SELECT COUNT(*) FROM host WHERE pk_host=?", Integer.class, host.id)); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); - - assertEquals(1, procDao.findVirtualProcs(HardwareState.UP).size()); - assertEquals(1, procDao.findVirtualProcs(host).size()); - assertEquals(1, procDao.findVirtualProcs(job).size()); - assertEquals(1, procDao.findVirtualProcs(frame).size()); - assertEquals(1, procDao.findVirtualProcs(frameSearchFactory.create(job)).size()); - assertEquals(1, - procDao.findVirtualProcs(frameSearchFactory.create((LayerInterface) frame)).size()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindOrphanedVirtualProcs() { - DispatchHost host = createHost(); - - assertEquals(Integer.valueOf(1), jdbcTemplate - .queryForObject("SELECT COUNT(*) FROM host WHERE pk_host=?", Integer.class, host.id)); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); - - assertEquals(0, procDao.findOrphanedVirtualProcs().size()); - - /** - * This is destructive to running jobs - */ - jdbcTemplate.update("UPDATE proc SET ts_ping = (current_timestamp - interval '30' day)"); - - assertEquals(1, procDao.findOrphanedVirtualProcs().size()); - assertTrue(procDao.isOrphan(proc)); - } - - @Test - @Transactional - @Rollback(true) - public void testUnbookProc() { - - DispatchHost host = createHost(); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); - - procDao.unbookProc(proc); - assertTrue(jdbcTemplate.queryForObject("SELECT b_unbooked FROM proc WHERE pk_proc=?", - Boolean.class, proc.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testUnbookVirtualProcs() { - - DispatchHost host = createHost(); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); - - List procs = new ArrayList(); - procs.add(proc); - - procDao.unbookVirtualProcs(procs); - - assertTrue(jdbcTemplate.queryForObject("SELECT b_unbooked FROM proc WHERE pk_proc=?", - Boolean.class, proc.id)); - } - - @Test(expected = ResourceReservationFailureException.class) - @Transactional - @Rollback(true) - public void testIncreaseReservedMemoryFail() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); - - procDao.increaseReservedMemory(proc, 8173264l * 8); - } - - @Test - @Transactional - @Rollback(true) - public void testIncreaseReservedMemory() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); - - procDao.increaseReservedMemory(proc, 3145728); - } - - @Test - @Transactional - @Rollback(true) - public void testGetReservedMemory() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); - - VirtualProc _proc = procDao.findVirtualProc(frame); - assertEquals(Long.valueOf(this.MEM_RESERVED_DEFAULT), jdbcTemplate - .queryForObject("SELECT int_mem_reserved FROM proc WHERE pk_proc=?", Long.class, _proc.id)); - assertEquals(this.MEM_RESERVED_DEFAULT, procDao.getReservedMemory(_proc)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetReservedGpuMemory() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); - - VirtualProc _proc = procDao.findVirtualProc(frame); - assertEquals(Long.valueOf(this.MEM_GPU_RESERVED_DEFAULT), jdbcTemplate.queryForObject( - "SELECT int_gpu_mem_reserved FROM proc WHERE pk_proc=?", Long.class, _proc.id)); - assertEquals(this.MEM_GPU_RESERVED_DEFAULT, procDao.getReservedGpuMemory(_proc)); - } - - @Test - @Transactional - @Rollback(true) - public void testBalanceUnderUtilizedProcs() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail1 = frameDao.findFrameDetail(job, "0001-pass_1"); - DispatchFrame frame1 = frameDao.getDispatchFrame(frameDetail1.id); - - VirtualProc proc1 = VirtualProc.build(host, frame1); - proc1.frameId = frame1.id; - procDao.insertVirtualProc(proc1); - - byte[] children = new byte[100]; - procDao.updateProcMemoryUsage(frame1, 250000, 250000, 250000, 250000, 0, 0, 0, children); - layerDao.updateLayerMaxRSS(frame1, 250000, true); - - FrameDetail frameDetail2 = frameDao.findFrameDetail(job, "0002-pass_1"); - DispatchFrame frame2 = frameDao.getDispatchFrame(frameDetail2.id); - - VirtualProc proc2 = VirtualProc.build(host, frame2); - proc2.frameId = frame2.id; - procDao.insertVirtualProc(proc2); - - procDao.updateProcMemoryUsage(frame2, 255000, 255000, 255000, 255000, 0, 0, 0, children); - layerDao.updateLayerMaxRSS(frame2, 255000, true); - - FrameDetail frameDetail3 = frameDao.findFrameDetail(job, "0003-pass_1"); - DispatchFrame frame3 = frameDao.getDispatchFrame(frameDetail3.id); - - VirtualProc proc3 = VirtualProc.build(host, frame3); - proc3.frameId = frame3.id; - procDao.insertVirtualProc(proc3); - - procDao.updateProcMemoryUsage(frame3, 3145728, 3145728, 3145728, 3145728, 0, 0, 0, children); - layerDao.updateLayerMaxRSS(frame3, 300000, true); - - procDao.balanceUnderUtilizedProcs(proc3, 100000); - procDao.increaseReservedMemory(proc3, this.MEM_RESERVED_DEFAULT + 100000); - - // Check the target proc - VirtualProc targetProc = procDao.getVirtualProc(proc3.getId()); - assertEquals(this.MEM_RESERVED_DEFAULT + 100000, targetProc.memoryReserved); - - // Check other procs - VirtualProc firstProc = procDao.getVirtualProc(proc1.getId()); - assertEquals(this.MEM_RESERVED_DEFAULT - 50000 - 1, firstProc.memoryReserved); - - VirtualProc secondProc = procDao.getVirtualProc(proc2.getId()); - assertEquals(this.MEM_RESERVED_DEFAULT - 50000 - 1, secondProc.memoryReserved); - - } - - @Test - @Transactional - @Rollback(true) - public void testGetCurrentShowId() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); - - assertEquals(job.getShowId(), procDao.getCurrentShowId(proc)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetCurrentJobId() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); - - assertEquals(job.getJobId(), procDao.getCurrentJobId(proc)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetCurrentLayerId() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); - - assertEquals(frame.getLayerId(), procDao.getCurrentLayerId(proc)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetCurrentFrameId() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); - - assertEquals(frame.getFrameId(), procDao.getCurrentFrameId(proc)); - } - - @Test - @Transactional - @Rollback(true) - public void getProcsBySearch() { - DispatchHost host = createHost(); - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); - - /* - * Book 5 procs. - */ - for (int i = 1; i < 6; i++) { - FrameDetail f = frameDao.findFrameDetail(job, String.format("%04d-pass_1", i)); - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = f.id; - proc.layerId = f.layerId; - proc.showId = f.showId; - proc.childProcesses = "".getBytes(); - procDao.insertVirtualProc(proc); } - ProcSearchInterface r; - - /* - * Search for all 5 running procs - */ - r = procSearchFactory.create(); - r.addSort(new Sort("proc.ts_booked", Direction.ASC)); - ProcSearchCriteria criteriaA = r.getCriteria(); - r.setCriteria(criteriaA.toBuilder().addShows("pipe").build()); - assertEquals(5, procDao.findVirtualProcs(r).size()); - - /* - * Limit the result to 1 result. - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteriaB = r.getCriteria(); - r.setCriteria(criteriaB.toBuilder().addShows("pipe").addMaxResults(1).build()); - assertEquals(1, procDao.findVirtualProcs(r).size()); - - /* - * Change the first result to 1, which should limt the result to 4. - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteriaC = r.getCriteria(); - r.setCriteria(criteriaC.toBuilder().addShows("pipe").setFirstResult(2).build()); - r.addSort(new Sort("proc.ts_booked", Direction.ASC)); - assertEquals(4, procDao.findVirtualProcs(r).size()); - - /* - * Now try to do the eqivalent of a limit/offset - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteriaD = r.getCriteria(); - r.setCriteria( - criteriaD.toBuilder().addShows("pipe").setFirstResult(3).addMaxResults(2).build()); - assertEquals(2, procDao.findVirtualProcs(r).size()); - - } - - @Test - @Transactional - @Rollback(true) - public void testVirtualProcWithSelfishService() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - frame.minCores = 250; - frame.threadable = true; - - // Frame from a non-selfish sevice - VirtualProc proc = VirtualProc.build(host, frame, "something-else"); - assertEquals(250, proc.coresReserved); - - // When no selfish service config is provided - proc = VirtualProc.build(host, frame); - assertEquals(250, proc.coresReserved); - - // Frame with a selfish service - proc = VirtualProc.build(host, frame, "shell", "something-else"); - assertEquals(800, proc.coresReserved); - } + @Test + @Transactional + @Rollback(true) + public void testGetCurrentShowId() { + + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); + + VirtualProc proc = VirtualProc.build(host, frame); + proc.frameId = frame.id; + procDao.insertVirtualProc(proc); + + assertEquals(job.getShowId(), procDao.getCurrentShowId(proc)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetCurrentJobId() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); + + VirtualProc proc = VirtualProc.build(host, frame); + proc.frameId = frame.id; + procDao.insertVirtualProc(proc); + + assertEquals(job.getJobId(), procDao.getCurrentJobId(proc)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetCurrentLayerId() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); + + VirtualProc proc = VirtualProc.build(host, frame); + proc.frameId = frame.id; + procDao.insertVirtualProc(proc); + + assertEquals(frame.getLayerId(), procDao.getCurrentLayerId(proc)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetCurrentFrameId() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); + + VirtualProc proc = VirtualProc.build(host, frame); + proc.frameId = frame.id; + procDao.insertVirtualProc(proc); + + assertEquals(frame.getFrameId(), procDao.getCurrentFrameId(proc)); + } + + @Test + @Transactional + @Rollback(true) + public void getProcsBySearch() { + DispatchHost host = createHost(); + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); + + /* + * Book 5 procs. + */ + for (int i = 1; i < 6; i++) { + FrameDetail f = frameDao.findFrameDetail(job, String.format("%04d-pass_1", i)); + VirtualProc proc = new VirtualProc(); + proc.allocationId = null; + proc.coresReserved = 100; + proc.hostId = host.id; + proc.hostName = host.name; + proc.jobId = job.id; + proc.frameId = f.id; + proc.layerId = f.layerId; + proc.showId = f.showId; + proc.childProcesses = "".getBytes(); + procDao.insertVirtualProc(proc); + } + + ProcSearchInterface r; + + /* + * Search for all 5 running procs + */ + r = procSearchFactory.create(); + r.addSort(new Sort("proc.ts_booked", Direction.ASC)); + ProcSearchCriteria criteriaA = r.getCriteria(); + r.setCriteria(criteriaA.toBuilder().addShows("pipe").build()); + assertEquals(5, procDao.findVirtualProcs(r).size()); + + /* + * Limit the result to 1 result. + */ + r = procSearchFactory.create(); + ProcSearchCriteria criteriaB = r.getCriteria(); + r.setCriteria(criteriaB.toBuilder().addShows("pipe").addMaxResults(1).build()); + assertEquals(1, procDao.findVirtualProcs(r).size()); + + /* + * Change the first result to 1, which should limt the result to 4. + */ + r = procSearchFactory.create(); + ProcSearchCriteria criteriaC = r.getCriteria(); + r.setCriteria(criteriaC.toBuilder().addShows("pipe").setFirstResult(2).build()); + r.addSort(new Sort("proc.ts_booked", Direction.ASC)); + assertEquals(4, procDao.findVirtualProcs(r).size()); + + /* + * Now try to do the eqivalent of a limit/offset + */ + r = procSearchFactory.create(); + ProcSearchCriteria criteriaD = r.getCriteria(); + r.setCriteria( + criteriaD.toBuilder().addShows("pipe").setFirstResult(3).addMaxResults(2).build()); + assertEquals(2, procDao.findVirtualProcs(r).size()); + + } + + @Test + @Transactional + @Rollback(true) + public void testVirtualProcWithSelfishService() { + DispatchHost host = createHost(); + JobDetail job = launchJob(); + + FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); + frame.minCores = 250; + frame.threadable = true; + + // Frame from a non-selfish sevice + VirtualProc proc = VirtualProc.build(host, frame, "something-else"); + assertEquals(250, proc.coresReserved); + + // When no selfish service config is provided + proc = VirtualProc.build(host, frame); + assertEquals(250, proc.coresReserved); + + // Frame with a selfish service + proc = VirtualProc.build(host, frame, "shell", "something-else"); + assertEquals(800, proc.coresReserved); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ServiceDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ServiceDaoTests.java index 0172d007e..db209f22b 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ServiceDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ServiceDaoTests.java @@ -40,193 +40,195 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class ServiceDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - ServiceDao serviceDao; - - @Test - @Transactional - @Rollback(true) - public void testGetService() { - ServiceEntity s1 = serviceDao.get("default"); - ServiceEntity s2 = serviceDao.get("AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"); - assertEquals(s1, s2); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertService() { - ServiceEntity s = new ServiceEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.timeout = 0; - s.timeout_llu = 0; - s.minMemory = CueUtil.GB4; - s.minGpuMemory = CueUtil.GB; - s.threadable = false; - s.tags.addAll(Sets.newHashSet(new String[] {"general"})); - s.minMemoryIncrease = CueUtil.GB4; - - serviceDao.insert(s); - assertEquals(s, serviceDao.get("dillweed")); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateService() { - ServiceEntity s = new ServiceEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.timeout = 0; - s.timeout_llu = 0; - s.minMemory = CueUtil.GB4; - s.minGpuMemory = CueUtil.GB; - s.threadable = false; - s.tags.addAll(Sets.newHashSet(new String[] {"general"})); - s.minMemoryIncrease = CueUtil.GB; - - serviceDao.insert(s); - assertEquals(s, serviceDao.get("dillweed")); - - s.name = "smacktest"; - s.minCores = 200; - s.timeout = 0; - s.timeout_llu = 0; - s.minMemory = CueUtil.GB8; - s.minGpuMemory = CueUtil.GB2; - s.threadable = true; - s.tags = Sets.newLinkedHashSet(); - s.tags.add("linux"); - s.minMemoryIncrease = CueUtil.GB4 + CueUtil.GB2; - - serviceDao.update(s); - ServiceEntity s1 = serviceDao.get(s.getId()); - - assertEquals(s.name, s1.name); - assertEquals(s.minCores, s1.minCores); - assertEquals(s.minMemory, s1.minMemory); - assertEquals(s.threadable, s1.threadable); - assertEquals(s.tags.toArray()[0], s1.tags.toArray()[0]); - assertEquals(s.minMemoryIncrease, s1.minMemoryIncrease); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteService() { - ServiceEntity s = new ServiceEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.timeout = 0; - s.timeout_llu = 0; - s.minMemory = CueUtil.GB4; - s.minGpuMemory = CueUtil.GB; - s.threadable = false; - s.tags.addAll(Sets.newHashSet(new String[] {"general"})); - s.minMemoryIncrease = CueUtil.GB2; - - serviceDao.insert(s); - assertEquals(s, serviceDao.get("dillweed")); - - serviceDao.delete(s); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(1) FROM service WHERE pk_service=?", Integer.class, s.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertServiceOverride() { - ServiceOverrideEntity s = new ServiceOverrideEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.timeout = 0; - s.timeout_llu = 0; - s.minMemory = CueUtil.GB4; - s.minGpuMemory = CueUtil.GB; - s.threadable = false; - s.tags.addAll(Sets.newHashSet(new String[] {"general"})); - s.showId = "00000000-0000-0000-0000-000000000000"; - s.minMemoryIncrease = CueUtil.GB2; - - serviceDao.insert(s); - assertEquals(s, serviceDao.getOverride("dillweed")); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateServiceOverride() { - ServiceOverrideEntity s = new ServiceOverrideEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.timeout = 0; - s.timeout_llu = 0; - s.minMemory = CueUtil.GB4; - s.minGpuMemory = CueUtil.GB2; - s.threadable = false; - s.tags.addAll(Sets.newHashSet(new String[] {"general"})); - s.showId = "00000000-0000-0000-0000-000000000000"; - s.minMemoryIncrease = CueUtil.GB2; - - serviceDao.insert(s); - assertEquals(s, serviceDao.getOverride("dillweed")); - assertEquals(s, serviceDao.getOverride("dillweed", s.showId)); - - s.name = "smacktest"; - s.minCores = 200; - s.timeout = 10; - s.timeout_llu = 10; - s.minMemory = CueUtil.GB8; - s.minGpuMemory = CueUtil.GB4; - s.threadable = true; - s.tags = Sets.newLinkedHashSet(); - s.tags.add("linux"); - s.minMemoryIncrease = CueUtil.GB4; - - serviceDao.update(s); - ServiceEntity s1 = serviceDao.getOverride(s.getId()); - - assertEquals(s.name, s1.name); - assertEquals(s.minCores, s1.minCores); - assertEquals(s.timeout, s1.timeout); - assertEquals(s.timeout_llu, s1.timeout_llu); - assertEquals(s.minMemory, s1.minMemory); - assertEquals(s.minGpuMemory, s1.minGpuMemory); - assertEquals(s.threadable, s1.threadable); - assertEquals(s.tags.toArray()[0], s1.tags.toArray()[0]); - assertEquals(s.minMemoryIncrease, s1.minMemoryIncrease); - assertEquals(s1.minMemoryIncrease, CueUtil.GB4); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteServiceOverride() { - ServiceOverrideEntity s = new ServiceOverrideEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.timeout = 0; - s.timeout_llu = 0; - s.minMemory = CueUtil.GB4; - s.minGpuMemory = CueUtil.GB; - s.threadable = false; - s.tags.addAll(Sets.newHashSet(new String[] {"general"})); - s.showId = "00000000-0000-0000-0000-000000000000"; - s.minMemoryIncrease = CueUtil.GB2; - - serviceDao.insert(s); - assertEquals(s, serviceDao.getOverride("dillweed")); - assertEquals(s, serviceDao.getOverride("dillweed", s.showId)); - serviceDao.delete(s); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(1) FROM show_service WHERE pk_show_service=?", Integer.class, s.getId())); - } + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + ServiceDao serviceDao; + + @Test + @Transactional + @Rollback(true) + public void testGetService() { + ServiceEntity s1 = serviceDao.get("default"); + ServiceEntity s2 = serviceDao.get("AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"); + assertEquals(s1, s2); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertService() { + ServiceEntity s = new ServiceEntity(); + s.name = "dillweed"; + s.minCores = 100; + s.timeout = 0; + s.timeout_llu = 0; + s.minMemory = CueUtil.GB4; + s.minGpuMemory = CueUtil.GB; + s.threadable = false; + s.tags.addAll(Sets.newHashSet(new String[] {"general"})); + s.minMemoryIncrease = CueUtil.GB4; + + serviceDao.insert(s); + assertEquals(s, serviceDao.get("dillweed")); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateService() { + ServiceEntity s = new ServiceEntity(); + s.name = "dillweed"; + s.minCores = 100; + s.timeout = 0; + s.timeout_llu = 0; + s.minMemory = CueUtil.GB4; + s.minGpuMemory = CueUtil.GB; + s.threadable = false; + s.tags.addAll(Sets.newHashSet(new String[] {"general"})); + s.minMemoryIncrease = CueUtil.GB; + + serviceDao.insert(s); + assertEquals(s, serviceDao.get("dillweed")); + + s.name = "smacktest"; + s.minCores = 200; + s.timeout = 0; + s.timeout_llu = 0; + s.minMemory = CueUtil.GB8; + s.minGpuMemory = CueUtil.GB2; + s.threadable = true; + s.tags = Sets.newLinkedHashSet(); + s.tags.add("linux"); + s.minMemoryIncrease = CueUtil.GB4 + CueUtil.GB2; + + serviceDao.update(s); + ServiceEntity s1 = serviceDao.get(s.getId()); + + assertEquals(s.name, s1.name); + assertEquals(s.minCores, s1.minCores); + assertEquals(s.minMemory, s1.minMemory); + assertEquals(s.threadable, s1.threadable); + assertEquals(s.tags.toArray()[0], s1.tags.toArray()[0]); + assertEquals(s.minMemoryIncrease, s1.minMemoryIncrease); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteService() { + ServiceEntity s = new ServiceEntity(); + s.name = "dillweed"; + s.minCores = 100; + s.timeout = 0; + s.timeout_llu = 0; + s.minMemory = CueUtil.GB4; + s.minGpuMemory = CueUtil.GB; + s.threadable = false; + s.tags.addAll(Sets.newHashSet(new String[] {"general"})); + s.minMemoryIncrease = CueUtil.GB2; + + serviceDao.insert(s); + assertEquals(s, serviceDao.get("dillweed")); + + serviceDao.delete(s); + + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT COUNT(1) FROM service WHERE pk_service=?", Integer.class, s.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertServiceOverride() { + ServiceOverrideEntity s = new ServiceOverrideEntity(); + s.name = "dillweed"; + s.minCores = 100; + s.timeout = 0; + s.timeout_llu = 0; + s.minMemory = CueUtil.GB4; + s.minGpuMemory = CueUtil.GB; + s.threadable = false; + s.tags.addAll(Sets.newHashSet(new String[] {"general"})); + s.showId = "00000000-0000-0000-0000-000000000000"; + s.minMemoryIncrease = CueUtil.GB2; + + serviceDao.insert(s); + assertEquals(s, serviceDao.getOverride("dillweed")); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateServiceOverride() { + ServiceOverrideEntity s = new ServiceOverrideEntity(); + s.name = "dillweed"; + s.minCores = 100; + s.timeout = 0; + s.timeout_llu = 0; + s.minMemory = CueUtil.GB4; + s.minGpuMemory = CueUtil.GB2; + s.threadable = false; + s.tags.addAll(Sets.newHashSet(new String[] {"general"})); + s.showId = "00000000-0000-0000-0000-000000000000"; + s.minMemoryIncrease = CueUtil.GB2; + + serviceDao.insert(s); + assertEquals(s, serviceDao.getOverride("dillweed")); + assertEquals(s, serviceDao.getOverride("dillweed", s.showId)); + + s.name = "smacktest"; + s.minCores = 200; + s.timeout = 10; + s.timeout_llu = 10; + s.minMemory = CueUtil.GB8; + s.minGpuMemory = CueUtil.GB4; + s.threadable = true; + s.tags = Sets.newLinkedHashSet(); + s.tags.add("linux"); + s.minMemoryIncrease = CueUtil.GB4; + + serviceDao.update(s); + ServiceEntity s1 = serviceDao.getOverride(s.getId()); + + assertEquals(s.name, s1.name); + assertEquals(s.minCores, s1.minCores); + assertEquals(s.timeout, s1.timeout); + assertEquals(s.timeout_llu, s1.timeout_llu); + assertEquals(s.minMemory, s1.minMemory); + assertEquals(s.minGpuMemory, s1.minGpuMemory); + assertEquals(s.threadable, s1.threadable); + assertEquals(s.tags.toArray()[0], s1.tags.toArray()[0]); + assertEquals(s.minMemoryIncrease, s1.minMemoryIncrease); + assertEquals(s1.minMemoryIncrease, CueUtil.GB4); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteServiceOverride() { + ServiceOverrideEntity s = new ServiceOverrideEntity(); + s.name = "dillweed"; + s.minCores = 100; + s.timeout = 0; + s.timeout_llu = 0; + s.minMemory = CueUtil.GB4; + s.minGpuMemory = CueUtil.GB; + s.threadable = false; + s.tags.addAll(Sets.newHashSet(new String[] {"general"})); + s.showId = "00000000-0000-0000-0000-000000000000"; + s.minMemoryIncrease = CueUtil.GB2; + + serviceDao.insert(s); + assertEquals(s, serviceDao.getOverride("dillweed")); + assertEquals(s, serviceDao.getOverride("dillweed", s.showId)); + serviceDao.delete(s); + + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject( + "SELECT COUNT(1) FROM show_service WHERE pk_show_service=?", Integer.class, + s.getId())); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ShowDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ShowDaoTests.java index 99dea7b6a..294238d88 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ShowDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ShowDaoTests.java @@ -46,166 +46,173 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class ShowDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - ShowDao showDao; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - private static String SHOW_ID = "00000000-0000-0000-0000-000000000000"; - private static String SHOW_NAME = "pipe"; - - public DispatchHost createHost() { - - RenderHost host = RenderHost.newBuilder().setName("test_host").setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16).setTotalSwap((int) CueUtil.GB16) - .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(100).addTags("general") - .setState(HardwareState.UP).setFacility("spi").setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512).build(); - - DispatchHost dh = hostManager.createHost(host); - hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); - - return dh; - } - - @Test - @Transactional - @Rollback(true) - public void testFindShowDetail() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - assertEquals(SHOW_ID, show.id); - assertEquals(SHOW_NAME, show.name); - assertFalse(show.paused); - } - - @Test(expected = EmptyResultDataAccessException.class) - @Transactional - @Rollback(true) - public void testFindShowDetailByHost() { - // TODO: Add code to setup a host and make the sow - // prefer the host, then check result again. - showDao.getShowDetail(createHost()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetShowDetail() { - ShowEntity show = showDao.getShowDetail(SHOW_ID); - assertEquals(SHOW_ID, show.id); - assertEquals(SHOW_NAME, show.name); - assertFalse(show.paused); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertShow() { - ShowEntity show = new ShowEntity(); - show.name = "uber"; - showDao.insertShow(show); - - assertEquals(Integer.valueOf(1), jdbcTemplate - .queryForObject("SELECT count(*) FROM show where str_name=?", Integer.class, show.name)); - - ShowEntity newShow = showDao.findShowDetail(show.name); - assertEquals(newShow.id, show.id); - assertEquals(newShow.name, show.name); - assertFalse(show.paused); - } - - @Test - @Transactional - @Rollback(true) - public void testShowExists() { - assertFalse(showDao.showExists("uber")); - assertTrue(showDao.showExists("pipe")); - assertTrue(showDao.showExists("fx")); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateShowDefaultMinCores() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - showDao.updateShowDefaultMinCores(show, 100); - assertTrue(jdbcTemplate.queryForObject("SELECT int_default_min_cores FROM show WHERE pk_show=?", - Integer.class, show.id) == 100); - - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateShowDefaultMaxCores() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - showDao.updateShowDefaultMaxCores(show, 1000); - assertTrue(jdbcTemplate.queryForObject("SELECT int_default_max_cores FROM show WHERE pk_show=?", - Integer.class, show.id) == 1000); - - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateShowCommentEmail() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - showDao.updateShowCommentEmail(show, new String[] {"test@imageworks.com"}); - String email = jdbcTemplate.queryForObject("SELECT str_comment_email FROM show WHERE pk_show=?", - String.class, show.id); - assertEquals("test@imageworks.com", email); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateBookingEnabled() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - showDao.updateBookingEnabled(show, false); - assertFalse(jdbcTemplate.queryForObject("SELECT b_booking_enabled FROM show WHERE pk_show=?", - Boolean.class, show.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateActive() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - showDao.updateActive(show, false); - assertFalse(jdbcTemplate.queryForObject("SELECT b_active FROM show WHERE pk_show=?", - Boolean.class, show.id)); - showDao.updateActive(show, true); - assertTrue(jdbcTemplate.queryForObject("SELECT b_active FROM show WHERE pk_show=?", - Boolean.class, show.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameCounters() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - int frameSuccess = jdbcTemplate.queryForObject( - "SELECT int_frame_success_count FROM show_stats WHERE pk_show=?", Integer.class, show.id); - showDao.updateFrameCounters(show, 0); - int frameSucces2 = jdbcTemplate.queryForObject( - "SELECT int_frame_success_count FROM show_stats WHERE pk_show=?", Integer.class, show.id); - assertEquals(frameSuccess + 1, frameSucces2); - - int frameFail = jdbcTemplate.queryForObject( - "SELECT int_frame_fail_count FROM show_stats WHERE pk_show=?", Integer.class, show.id); - showDao.updateFrameCounters(show, 1); - int frameFail2 = jdbcTemplate.queryForObject( - "SELECT int_frame_fail_count FROM show_stats WHERE pk_show=?", Integer.class, show.id); - assertEquals(frameFail + 1, frameFail2); - } + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + ShowDao showDao; + + @Resource + HostManager hostManager; + + @Resource + AdminManager adminManager; + + private static String SHOW_ID = "00000000-0000-0000-0000-000000000000"; + private static String SHOW_NAME = "pipe"; + + public DispatchHost createHost() { + + RenderHost host = RenderHost.newBuilder().setName("test_host").setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16) + .setTotalSwap((int) CueUtil.GB16).setNimbyEnabled(false).setNumProcs(2) + .setCoresPerProc(100).addTags("general").setState(HardwareState.UP) + .setFacility("spi").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); + + DispatchHost dh = hostManager.createHost(host); + hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); + + return dh; + } + + @Test + @Transactional + @Rollback(true) + public void testFindShowDetail() { + ShowEntity show = showDao.findShowDetail(SHOW_NAME); + assertEquals(SHOW_ID, show.id); + assertEquals(SHOW_NAME, show.name); + assertFalse(show.paused); + } + + @Test(expected = EmptyResultDataAccessException.class) + @Transactional + @Rollback(true) + public void testFindShowDetailByHost() { + // TODO: Add code to setup a host and make the sow + // prefer the host, then check result again. + showDao.getShowDetail(createHost()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetShowDetail() { + ShowEntity show = showDao.getShowDetail(SHOW_ID); + assertEquals(SHOW_ID, show.id); + assertEquals(SHOW_NAME, show.name); + assertFalse(show.paused); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertShow() { + ShowEntity show = new ShowEntity(); + show.name = "uber"; + showDao.insertShow(show); + + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT count(*) FROM show where str_name=?", Integer.class, show.name)); + + ShowEntity newShow = showDao.findShowDetail(show.name); + assertEquals(newShow.id, show.id); + assertEquals(newShow.name, show.name); + assertFalse(show.paused); + } + + @Test + @Transactional + @Rollback(true) + public void testShowExists() { + assertFalse(showDao.showExists("uber")); + assertTrue(showDao.showExists("pipe")); + assertTrue(showDao.showExists("fx")); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateShowDefaultMinCores() { + ShowEntity show = showDao.findShowDetail(SHOW_NAME); + showDao.updateShowDefaultMinCores(show, 100); + assertTrue(jdbcTemplate.queryForObject( + "SELECT int_default_min_cores FROM show WHERE pk_show=?", Integer.class, + show.id) == 100); + + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateShowDefaultMaxCores() { + ShowEntity show = showDao.findShowDetail(SHOW_NAME); + showDao.updateShowDefaultMaxCores(show, 1000); + assertTrue(jdbcTemplate.queryForObject( + "SELECT int_default_max_cores FROM show WHERE pk_show=?", Integer.class, + show.id) == 1000); + + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateShowCommentEmail() { + ShowEntity show = showDao.findShowDetail(SHOW_NAME); + showDao.updateShowCommentEmail(show, new String[] {"test@imageworks.com"}); + String email = jdbcTemplate.queryForObject( + "SELECT str_comment_email FROM show WHERE pk_show=?", String.class, show.id); + assertEquals("test@imageworks.com", email); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateBookingEnabled() { + ShowEntity show = showDao.findShowDetail(SHOW_NAME); + showDao.updateBookingEnabled(show, false); + assertFalse(jdbcTemplate.queryForObject( + "SELECT b_booking_enabled FROM show WHERE pk_show=?", Boolean.class, show.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateActive() { + ShowEntity show = showDao.findShowDetail(SHOW_NAME); + showDao.updateActive(show, false); + assertFalse(jdbcTemplate.queryForObject("SELECT b_active FROM show WHERE pk_show=?", + Boolean.class, show.id)); + showDao.updateActive(show, true); + assertTrue(jdbcTemplate.queryForObject("SELECT b_active FROM show WHERE pk_show=?", + Boolean.class, show.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateFrameCounters() { + ShowEntity show = showDao.findShowDetail(SHOW_NAME); + int frameSuccess = jdbcTemplate.queryForObject( + "SELECT int_frame_success_count FROM show_stats WHERE pk_show=?", Integer.class, + show.id); + showDao.updateFrameCounters(show, 0); + int frameSucces2 = jdbcTemplate.queryForObject( + "SELECT int_frame_success_count FROM show_stats WHERE pk_show=?", Integer.class, + show.id); + assertEquals(frameSuccess + 1, frameSucces2); + + int frameFail = jdbcTemplate.queryForObject( + "SELECT int_frame_fail_count FROM show_stats WHERE pk_show=?", Integer.class, + show.id); + showDao.updateFrameCounters(show, 1); + int frameFail2 = jdbcTemplate.queryForObject( + "SELECT int_frame_fail_count FROM show_stats WHERE pk_show=?", Integer.class, + show.id); + assertEquals(frameFail + 1, frameFail2); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/SubscriptionDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/SubscriptionDaoTests.java index 92fd02ce7..9ecaa036e 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/SubscriptionDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/SubscriptionDaoTests.java @@ -48,199 +48,203 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class SubscriptionDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; - @Resource - AllocationDao allocDao; + @Resource + AllocationDao allocDao; - @Resource - SubscriptionDao subscriptionDao; + @Resource + SubscriptionDao subscriptionDao; - @Resource - AllocationDao allocationDao; + @Resource + AllocationDao allocationDao; - @Resource - ShowDao showDao; + @Resource + ShowDao showDao; - @Resource - FacilityDao facilityDao; + @Resource + FacilityDao facilityDao; - public static final String SUB_NAME = "test.pipe"; - public static final String ALLOC_NAME = "test"; + public static final String SUB_NAME = "test.pipe"; + public static final String ALLOC_NAME = "test"; - private AllocationEntity alloc; - - public ShowInterface getShow() { - return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); - } + private AllocationEntity alloc; + + public ShowInterface getShow() { + return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); + } - public SubscriptionEntity buildSubscription(ShowInterface t, AllocationInterface a) { - SubscriptionEntity s = new SubscriptionEntity(); - s.allocationId = a.getId(); - s.showId = t.getId(); - s.burst = 500; - s.size = 100; - return s; - } + public SubscriptionEntity buildSubscription(ShowInterface t, AllocationInterface a) { + SubscriptionEntity s = new SubscriptionEntity(); + s.allocationId = a.getId(); + s.showId = t.getId(); + s.burst = 500; + s.size = 100; + return s; + } - public AllocationEntity buildAllocation() { - AllocationEntity a = new AllocationEntity(); - a.tag = "test"; - a.name = ALLOC_NAME; - a.facilityId = facilityDao.getDefaultFacility().getFacilityId(); - return a; - } + public AllocationEntity buildAllocation() { + AllocationEntity a = new AllocationEntity(); + a.tag = "test"; + a.name = ALLOC_NAME; + a.facilityId = facilityDao.getDefaultFacility().getFacilityId(); + return a; + } - @Before - public void before() { - alloc = new AllocationEntity(); - alloc.name = ALLOC_NAME; - alloc.tag = "test"; - allocationDao.insertAllocation(facilityDao.getDefaultFacility(), alloc); - } + @Before + public void before() { + alloc = new AllocationEntity(); + alloc.name = ALLOC_NAME; + alloc.tag = "test"; + allocationDao.insertAllocation(facilityDao.getDefaultFacility(), alloc); + } - @Test - @Transactional - @Rollback(true) - public void testHasRunningProcs() { - SubscriptionEntity s = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(s); - assertFalse(subscriptionDao.hasRunningProcs(s)); - } + @Test + @Transactional + @Rollback(true) + public void testHasRunningProcs() { + SubscriptionEntity s = buildSubscription(getShow(), alloc); + subscriptionDao.insertSubscription(s); + assertFalse(subscriptionDao.hasRunningProcs(s)); + } - @Test - @Transactional - @Rollback(true) - public void testIsShowOverSize() { + @Test + @Transactional + @Rollback(true) + public void testIsShowOverSize() { - SubscriptionEntity sub = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(sub); - - assertFalse(this.subscriptionDao.isShowOverSize(getShow(), alloc)); - - jdbcTemplate.update("UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", 100, - sub.getSubscriptionId()); - - assertFalse(subscriptionDao.isShowOverSize(getShow(), alloc)); - - jdbcTemplate.update("UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", 101, - sub.getSubscriptionId()); - - assertEquals(true, subscriptionDao.isShowOverSize(getShow(), alloc)); - } - - @Test - @Transactional - @Rollback(true) - public void testIsShowAtOrOverSize() { - - SubscriptionEntity sub = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(sub); - assertFalse(this.subscriptionDao.isShowAtOrOverSize(getShow(), alloc)); - - jdbcTemplate.update("UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", 100, - sub.getSubscriptionId()); - - assertTrue(subscriptionDao.isShowAtOrOverSize(getShow(), alloc)); - - jdbcTemplate.update("UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", 200, - sub.getSubscriptionId()); - - assertTrue(subscriptionDao.isShowAtOrOverSize(getShow(), alloc)); - } - - @Test - @Transactional - @Rollback(true) - public void testIsShowOverBurst() { - subscriptionDao.insertSubscription(buildSubscription(getShow(), alloc)); - - // Burst is 500 so 600 would be over burst. - assertTrue(subscriptionDao.isShowOverBurst(getShow(), alloc, 600)); - - // Burst is 500 so 300 would be under burst. - assertFalse(subscriptionDao.isShowOverBurst(getShow(), alloc, 300)); - } - - @Test(expected = org.springframework.jdbc.UncategorizedSQLException.class) - @Transactional - @Rollback(true) - public void testIsShowAtOrOverBurst() { - - SubscriptionEntity sub = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(sub); - assertFalse(subscriptionDao.isShowAtOrOverBurst(getShow(), alloc)); - - jdbcTemplate.update("UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", 500, - sub.getSubscriptionId()); - - assertTrue(subscriptionDao.isShowAtOrOverBurst(getShow(), alloc)); - - jdbcTemplate.update("UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", 501, - sub.getSubscriptionId()); - - assertTrue(subscriptionDao.isShowAtOrOverBurst(getShow(), alloc)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetSubscriptionDetail() { - - FacilityInterface f = facilityDao.getDefaultFacility(); - - SubscriptionEntity s = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(s); - assertNotNull(s.id); - assertNotNull(s.getId()); - - SubscriptionEntity s1 = subscriptionDao.getSubscriptionDetail(s.getSubscriptionId()); - - assertEquals(alloc.getName() + ".pipe", s1.name); - assertEquals(s.burst, s1.burst); - assertEquals(s.id, s1.id); - assertEquals(s.size, s1.size); - assertEquals(s.allocationId, s1.allocationId); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertSubscription() { - SubscriptionEntity s = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(s); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteSubscription() { - SubscriptionEntity s = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(s); - subscriptionDao.deleteSubscription(s); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateSubscriptionSize() { - SubscriptionEntity s = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(s); - subscriptionDao.updateSubscriptionSize(s, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_size FROM subscription WHERE pk_subscription=?", Integer.class, s.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateSubscriptionBurst() { - SubscriptionEntity s = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(s); - subscriptionDao.updateSubscriptionBurst(s, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_burst FROM subscription WHERE pk_subscription=?", Integer.class, s.getId())); - } + SubscriptionEntity sub = buildSubscription(getShow(), alloc); + subscriptionDao.insertSubscription(sub); + + assertFalse(this.subscriptionDao.isShowOverSize(getShow(), alloc)); + + jdbcTemplate.update("UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", 100, + sub.getSubscriptionId()); + + assertFalse(subscriptionDao.isShowOverSize(getShow(), alloc)); + + jdbcTemplate.update("UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", 101, + sub.getSubscriptionId()); + + assertEquals(true, subscriptionDao.isShowOverSize(getShow(), alloc)); + } + + @Test + @Transactional + @Rollback(true) + public void testIsShowAtOrOverSize() { + + SubscriptionEntity sub = buildSubscription(getShow(), alloc); + subscriptionDao.insertSubscription(sub); + assertFalse(this.subscriptionDao.isShowAtOrOverSize(getShow(), alloc)); + + jdbcTemplate.update("UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", 100, + sub.getSubscriptionId()); + + assertTrue(subscriptionDao.isShowAtOrOverSize(getShow(), alloc)); + + jdbcTemplate.update("UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", 200, + sub.getSubscriptionId()); + + assertTrue(subscriptionDao.isShowAtOrOverSize(getShow(), alloc)); + } + + @Test + @Transactional + @Rollback(true) + public void testIsShowOverBurst() { + subscriptionDao.insertSubscription(buildSubscription(getShow(), alloc)); + + // Burst is 500 so 600 would be over burst. + assertTrue(subscriptionDao.isShowOverBurst(getShow(), alloc, 600)); + + // Burst is 500 so 300 would be under burst. + assertFalse(subscriptionDao.isShowOverBurst(getShow(), alloc, 300)); + } + + @Test(expected = org.springframework.jdbc.UncategorizedSQLException.class) + @Transactional + @Rollback(true) + public void testIsShowAtOrOverBurst() { + + SubscriptionEntity sub = buildSubscription(getShow(), alloc); + subscriptionDao.insertSubscription(sub); + assertFalse(subscriptionDao.isShowAtOrOverBurst(getShow(), alloc)); + + jdbcTemplate.update("UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", 500, + sub.getSubscriptionId()); + + assertTrue(subscriptionDao.isShowAtOrOverBurst(getShow(), alloc)); + + jdbcTemplate.update("UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", 501, + sub.getSubscriptionId()); + + assertTrue(subscriptionDao.isShowAtOrOverBurst(getShow(), alloc)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetSubscriptionDetail() { + + FacilityInterface f = facilityDao.getDefaultFacility(); + + SubscriptionEntity s = buildSubscription(getShow(), alloc); + subscriptionDao.insertSubscription(s); + assertNotNull(s.id); + assertNotNull(s.getId()); + + SubscriptionEntity s1 = subscriptionDao.getSubscriptionDetail(s.getSubscriptionId()); + + assertEquals(alloc.getName() + ".pipe", s1.name); + assertEquals(s.burst, s1.burst); + assertEquals(s.id, s1.id); + assertEquals(s.size, s1.size); + assertEquals(s.allocationId, s1.allocationId); + } + + @Test + @Transactional + @Rollback(true) + public void testInsertSubscription() { + SubscriptionEntity s = buildSubscription(getShow(), alloc); + subscriptionDao.insertSubscription(s); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteSubscription() { + SubscriptionEntity s = buildSubscription(getShow(), alloc); + subscriptionDao.insertSubscription(s); + subscriptionDao.deleteSubscription(s); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateSubscriptionSize() { + SubscriptionEntity s = buildSubscription(getShow(), alloc); + subscriptionDao.insertSubscription(s); + subscriptionDao.updateSubscriptionSize(s, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject( + "SELECT int_size FROM subscription WHERE pk_subscription=?", Integer.class, + s.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateSubscriptionBurst() { + SubscriptionEntity s = buildSubscription(getShow(), alloc); + subscriptionDao.insertSubscription(s); + subscriptionDao.updateSubscriptionBurst(s, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject( + "SELECT int_burst FROM subscription WHERE pk_subscription=?", Integer.class, + s.getId())); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/TaskDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/TaskDaoTests.java index 60f6c911c..79545b409 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/TaskDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/TaskDaoTests.java @@ -47,214 +47,221 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class TaskDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; - @Resource - ShowDao showDao; + @Resource + ShowDao showDao; - @Resource - DepartmentDao departmentDao; + @Resource + DepartmentDao departmentDao; - @Resource - TaskDao taskDao; + @Resource + TaskDao taskDao; - @Resource - PointDao pointDao; + @Resource + PointDao pointDao; - @Resource - JobManager jobManager; + @Resource + JobManager jobManager; - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Before - public void testMode() { - jobLauncher.testMode = true; - } + @Before + public void testMode() { + jobLauncher.testMode = true; + } - @Test - @Transactional - @Rollback(true) - public void insertTask() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + @Test + @Transactional + @Rollback(true) + public void insertTask() { + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - String dept = jdbcTemplate.queryForObject("SELECT pk_dept FROM job WHERE pk_job=?", - String.class, job.getJobId()); + String dept = jdbcTemplate.queryForObject("SELECT pk_dept FROM job WHERE pk_job=?", + String.class, job.getJobId()); - // Add in a new task, the job should switch to using this task. - PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), - departmentDao.getDepartment(dept)); + // Add in a new task, the job should switch to using this task. + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDepartment(dept)); - TaskEntity t = new TaskEntity(p, "dev.foo", 100); - taskDao.insertTask(t); - - t = taskDao.getTaskDetail(t.id); - taskDao.deleteTask(t); - } - - @Test - @Transactional - @Rollback(true) - public void deleteTask() { - PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - TaskEntity t = new TaskEntity(p, "dev.cue", 100); - taskDao.insertTask(t); - taskDao.deleteTask(t); - } - - @Test - @Transactional - @Rollback(true) - public void deleteTasksByShowAndDepartment() { - - PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - int task_count = jdbcTemplate.queryForObject("SELECT COUNT(*) FROM task WHERE pk_point=?", - Integer.class, p.getPointId()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - taskDao.insertTask(t); - - assertEquals(Integer.valueOf(task_count + 1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM task WHERE pk_point=?", Integer.class, p.getPointId())); - - taskDao.deleteTasks(p); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM task WHERE pk_point=?", Integer.class, p.getPointId())); - } - - @Test - @Transactional - @Rollback(true) - public void deleteTasksByDepartmentConfig() { - - PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - t.minCoreUnits = 100; - taskDao.insertTask(t); - - taskDao.deleteTasks(p); - - /** - * This is always - */ - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM task WHERE pk_point=?", Integer.class, p.getPointId())); - } - - @Test - @Transactional - @Rollback(true) - public void getTaskDetail() { - - PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - - taskDao.insertTask(t); - TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); - assertEquals(newTask.id, t.id); - } - - @Test - @Transactional - @Rollback(true) - public void getTaskDetailByDept() { - PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - - taskDao.insertTask(t); - TaskEntity newTask = taskDao.getTaskDetail(departmentDao.getDefaultDepartment(), "dev.cue"); - assertEquals(newTask.id, t.id); - } - - @Test - @Transactional - @Rollback(true) - public void updateTaskMinProcs() { - - PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - t.minCoreUnits = 100; - taskDao.insertTask(t); - TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); - taskDao.updateTaskMinCores(newTask, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM task WHERE pk_task=?", Integer.class, newTask.getTaskId())); - } - - @Test - @Transactional - @Rollback(true) - public void adjustTaskMinProcs() { - - PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - t.minCoreUnits = 10; - taskDao.insertTask(t); - TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); - taskDao.updateTaskMinCores(newTask, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM task WHERE pk_task=?", Integer.class, newTask.getTaskId())); - - taskDao.adjustTaskMinCores(t, 105); - - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM task WHERE pk_task=?", Integer.class, newTask.getTaskId())); - assertEquals(Integer.valueOf(5), jdbcTemplate.queryForObject( - "SELECT int_adjust_cores FROM task WHERE pk_task=?", Integer.class, newTask.getTaskId())); - - taskDao.adjustTaskMinCores(t, 50); - - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM task WHERE pk_task=?", Integer.class, newTask.getTaskId())); - assertEquals(Integer.valueOf(-50), jdbcTemplate.queryForObject( - "SELECT int_adjust_cores FROM task WHERE pk_task=?", Integer.class, newTask.getTaskId())); - } - - @Test - @Transactional - @Rollback(true) - public void mergeTask() { - - PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - taskDao.insertTask(t); - - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM task WHERE pk_task=?", Integer.class, t.getTaskId())); - - TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); - newTask.minCoreUnits = 200; - taskDao.mergeTask(newTask); - - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM task WHERE pk_task=?", Integer.class, newTask.getTaskId())); - } - - @Test - @Transactional - @Rollback(true) - public void isJobManaged() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - assertFalse(taskDao.isManaged(job)); - } + TaskEntity t = new TaskEntity(p, "dev.foo", 100); + taskDao.insertTask(t); + + t = taskDao.getTaskDetail(t.id); + taskDao.deleteTask(t); + } + + @Test + @Transactional + @Rollback(true) + public void deleteTask() { + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + TaskEntity t = new TaskEntity(p, "dev.cue", 100); + taskDao.insertTask(t); + taskDao.deleteTask(t); + } + + @Test + @Transactional + @Rollback(true) + public void deleteTasksByShowAndDepartment() { + + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + + int task_count = jdbcTemplate.queryForObject("SELECT COUNT(*) FROM task WHERE pk_point=?", + Integer.class, p.getPointId()); + + TaskEntity t = new TaskEntity(p, "dev.cue"); + taskDao.insertTask(t); + + assertEquals(Integer.valueOf(task_count + 1), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM task WHERE pk_point=?", Integer.class, p.getPointId())); + + taskDao.deleteTasks(p); + + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM task WHERE pk_point=?", Integer.class, p.getPointId())); + } + + @Test + @Transactional + @Rollback(true) + public void deleteTasksByDepartmentConfig() { + + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + + TaskEntity t = new TaskEntity(p, "dev.cue"); + t.minCoreUnits = 100; + taskDao.insertTask(t); + + taskDao.deleteTasks(p); + + /** + * This is always + */ + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM task WHERE pk_point=?", Integer.class, p.getPointId())); + } + + @Test + @Transactional + @Rollback(true) + public void getTaskDetail() { + + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + + TaskEntity t = new TaskEntity(p, "dev.cue"); + + taskDao.insertTask(t); + TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); + assertEquals(newTask.id, t.id); + } + + @Test + @Transactional + @Rollback(true) + public void getTaskDetailByDept() { + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + + TaskEntity t = new TaskEntity(p, "dev.cue"); + + taskDao.insertTask(t); + TaskEntity newTask = taskDao.getTaskDetail(departmentDao.getDefaultDepartment(), "dev.cue"); + assertEquals(newTask.id, t.id); + } + + @Test + @Transactional + @Rollback(true) + public void updateTaskMinProcs() { + + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + + TaskEntity t = new TaskEntity(p, "dev.cue"); + t.minCoreUnits = 100; + taskDao.insertTask(t); + TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); + taskDao.updateTaskMinCores(newTask, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject("SELECT int_min_cores FROM task WHERE pk_task=?", + Integer.class, newTask.getTaskId())); + } + + @Test + @Transactional + @Rollback(true) + public void adjustTaskMinProcs() { + + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + + TaskEntity t = new TaskEntity(p, "dev.cue"); + t.minCoreUnits = 10; + taskDao.insertTask(t); + TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); + taskDao.updateTaskMinCores(newTask, 100); + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject("SELECT int_min_cores FROM task WHERE pk_task=?", + Integer.class, newTask.getTaskId())); + + taskDao.adjustTaskMinCores(t, 105); + + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject("SELECT int_min_cores FROM task WHERE pk_task=?", + Integer.class, newTask.getTaskId())); + assertEquals(Integer.valueOf(5), + jdbcTemplate.queryForObject("SELECT int_adjust_cores FROM task WHERE pk_task=?", + Integer.class, newTask.getTaskId())); + + taskDao.adjustTaskMinCores(t, 50); + + assertEquals(Integer.valueOf(100), + jdbcTemplate.queryForObject("SELECT int_min_cores FROM task WHERE pk_task=?", + Integer.class, newTask.getTaskId())); + assertEquals(Integer.valueOf(-50), + jdbcTemplate.queryForObject("SELECT int_adjust_cores FROM task WHERE pk_task=?", + Integer.class, newTask.getTaskId())); + } + + @Test + @Transactional + @Rollback(true) + public void mergeTask() { + + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + + TaskEntity t = new TaskEntity(p, "dev.cue"); + taskDao.insertTask(t); + + assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( + "SELECT int_min_cores FROM task WHERE pk_task=?", Integer.class, t.getTaskId())); + + TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); + newTask.minCoreUnits = 200; + taskDao.mergeTask(newTask); + + assertEquals(Integer.valueOf(200), + jdbcTemplate.queryForObject("SELECT int_min_cores FROM task WHERE pk_task=?", + Integer.class, newTask.getTaskId())); + } + + @Test + @Transactional + @Rollback(true) + public void isJobManaged() { + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + assertFalse(taskDao.isManaged(job)); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/WhiteboardDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/WhiteboardDaoTests.java index 70109b7f8..07cc41779 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/WhiteboardDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/WhiteboardDaoTests.java @@ -126,1214 +126,1216 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class WhiteboardDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; - @Resource - AllocationDao allocationDao; + @Resource + AllocationDao allocationDao; - @Resource - HostDao hostDao; + @Resource + HostDao hostDao; - @Resource - WhiteboardDao whiteboardDao; + @Resource + WhiteboardDao whiteboardDao; - @Resource - ShowDao showDao; + @Resource + ShowDao showDao; - @Resource - FilterDao filterDao; + @Resource + FilterDao filterDao; - @Resource - ProcDao procDao; + @Resource + ProcDao procDao; - @Resource - MatcherDao matcherDao; + @Resource + MatcherDao matcherDao; - @Resource - ActionDao actionDao; + @Resource + ActionDao actionDao; - @Resource - JobManager jobManager; + @Resource + JobManager jobManager; - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - GroupDao groupDao; + @Resource + GroupDao groupDao; - @Resource - LayerDao layerDao; + @Resource + LayerDao layerDao; - @Resource - LimitDao limitDao; + @Resource + LimitDao limitDao; - @Resource - DepartmentDao departmentDao; + @Resource + DepartmentDao departmentDao; - @Resource - DependManager dependManager; - - @Resource - FrameDao frameDao; - - @Resource - PointDao pointDao; - - @Resource - HostManager hostManager; - - @Resource - CommentManager commentManager; - - @Resource - DepartmentManager departmentManager; - - @Resource - Dispatcher dispatcher; - - @Resource - DispatchSupport dispatchSupport; - - @Resource - OwnerManager ownerManager; - - @Resource - BookingManager bookingManager; - - @Resource - ServiceManager serviceManager; - - @Resource - FrameSearchFactory frameSearchFactory; - - @Resource - HostSearchFactory hostSearchFactory; - - @Resource - JobSearchFactory jobSearchFactory; - - @Resource - ProcSearchFactory procSearchFactory; - - private static final String HOST = "testest"; - private static final String SHOW = "pipe"; - - @Before - public void testMode() { - jobLauncher.testMode = true; - } - - public ShowEntity getShow() { - return showDao.findShowDetail(SHOW); - } - - public FilterEntity createFilter() { - FilterEntity filter = new FilterEntity(); - filter.name = "Default"; - filter.showId = getShow().id; - filter.type = FilterType.MATCH_ANY; - filter.enabled = true; - filterDao.insertFilter(filter); - return filter; - } - - public MatcherEntity createMatcher(FilterEntity f) { - MatcherEntity matcher = new MatcherEntity(); - matcher.filterId = f.id; - matcher.name = null; - matcher.showId = getShow().getId(); - matcher.subject = MatchSubject.JOB_NAME; - matcher.type = MatchType.CONTAINS; - matcher.value = "testuser"; - matcherDao.insertMatcher(matcher); - return matcher; - } - - public ActionEntity createAction(FilterEntity f) { - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.PAUSE_JOB; - a1.filterId = f.getFilterId(); - a1.booleanValue = true; - a1.name = null; - a1.valueType = ActionValueType.BOOLEAN_TYPE; - actionDao.createAction(a1); - return a1; - } - - public RenderHost getRenderHost() { - // Hardcoded value of dispatcher.memory.mem_reserved_min - // to avoid having to read opencue.properties on a test setting - long memReservedMin = 262144; - RenderHost host = RenderHost.newBuilder().setName(HOST).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem((int) memReservedMin * 4).setFreeSwap(2076).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem((int) memReservedMin * 4).setTotalSwap(2096) - .setNimbyEnabled(true).setNumProcs(2).setCoresPerProc(400).setState(HardwareState.DOWN) - .setFacility("spi").setFreeGpuMem((int) CueUtil.MB512).setTotalGpuMem((int) CueUtil.MB512) - .build(); - return host; - } - - public JobDetail launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - public JobDetail launchLimitJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_limit.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - private void createTestLimits() { - limitDao.createLimit("util", 15); - limitDao.createLimit("arnold", 20); - } - - @Test - @Transactional - @Rollback(true) - public void getService() { - whiteboardDao.getService("arnold"); - } - - @Test - @Transactional - @Rollback(true) - public void getServices() { - whiteboardDao.getDefaultServices(); - } - - @Test - @Transactional - @Rollback(true) - public void getServiceOverride() { - - ShowEntity show = getShow(); - ServiceOverrideEntity s = new ServiceOverrideEntity(); - s.name = "test"; - s.minCores = 100; - s.timeout = 0; - s.timeout_llu = 0; - s.minMemory = 320000; - s.tags.add("general"); - s.threadable = false; - s.showId = show.getId(); - s.minMemoryIncrease = CueUtil.GB4; - - serviceManager.createService(s); - whiteboardDao.getServiceOverride(getShow(), "test"); - } - - @Test - @Transactional - @Rollback(true) - public void getServiceOverrides() { - whiteboardDao.getServiceOverrides(getShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDepend() { - - List depends = dependManager.getWhatDependsOn(launchJob()); - for (LightweightDependency depend : depends) { - whiteboardDao.getDepend(depend); - } - } - - @Test - @Transactional - @Rollback(true) - public void testGetDependById() { - - List depends = dependManager.getWhatDependsOn(launchJob()); - for (LightweightDependency depend : depends) { - whiteboardDao.getDepend(depend); - whiteboardDao.getDepend(depend.id); - } - } - - @Test - @Transactional - @Rollback(true) - public void testGetWhatDependsOnThis() { - JobDetail job = launchJob(); - assertEquals(1, whiteboardDao.getWhatDependsOnThis(job).getDependsCount()); - - LayerInterface layer1 = layerDao.findLayer(job, "pass_1"); - assertEquals(0, whiteboardDao.getWhatDependsOnThis(layer1).getDependsCount()); - - LayerInterface layer2 = layerDao.findLayer(job, "pass_1_preprocess"); - assertEquals(1, whiteboardDao.getWhatDependsOnThis(layer2).getDependsCount()); - - FrameInterface frame = frameDao.findFrame(job, "0001-pass_1"); - assertEquals(0, whiteboardDao.getWhatDependsOnThis(frame).getDependsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetWhatThisDependsOn() { - JobDetail job = launchJob(); - assertEquals(0, whiteboardDao.getWhatThisDependsOn(job).getDependsCount()); - - LayerInterface layer1 = layerDao.findLayer(job, "pass_1"); - assertEquals(1, whiteboardDao.getWhatThisDependsOn(layer1).getDependsCount()); - - LayerInterface layer2 = layerDao.findLayer(job, "pass_1_preprocess"); - assertEquals(0, whiteboardDao.getWhatThisDependsOn(layer2).getDependsCount()); - - FrameInterface frame = frameDao.findFrame(job, "0001-pass_1"); - assertEquals(1, whiteboardDao.getWhatThisDependsOn(frame).getDependsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDepends() { - JobDetail job = launchJob(); - assertEquals(1, whiteboardDao.getDepends(job).getDependsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetCommentsOnJob() { - JobDetail job = launchJob(); - assertEquals(0, whiteboardDao.getComments(job).getCommentsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetCommentsOnHost() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - hostDao.updateHostLock(hd, LockState.LOCKED, new Source("TEST")); - - CommentDetail c = new CommentDetail(); - c.message = "you suck"; - c.subject = "a useful message"; - c.user = "testuser"; - c.timestamp = null; - - commentManager.addComment(hd, c); - assertEquals(1, whiteboardDao.getComments(hd).getCommentsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFilter() { - createFilter(); - whiteboardDao.findFilter(getShow(), "Default"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFilter() { - whiteboardDao.getFilter(createFilter()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetMatchers() { - FilterEntity f = createFilter(); - createMatcher(f); - whiteboardDao.getMatchers(f); - } - - @Test - @Transactional - @Rollback(true) - public void testGetMatcher() { - FilterEntity f = createFilter(); - MatcherEntity m = createMatcher(f); - whiteboardDao.getMatcher(m); - } - - @Test - @Transactional - @Rollback(true) - public void testGetActions() { - FilterEntity f = createFilter(); - createAction(f); - whiteboardDao.getActions(f); - } - - @Test - @Transactional - @Rollback(true) - public void testGetAction() { - FilterEntity f = createFilter(); - whiteboardDao.getAction(createAction(f)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFilters() { - createFilter(); - whiteboardDao.getFilters(getShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFramesByFrameSearch() { - JobEntity job = launchJob(); - FrameSearchInterface r = frameSearchFactory.create(job); - FrameSearchCriteria criteria = r.getCriteria(); - r.setCriteria(criteria.toBuilder().setPage(1).setLimit(5).addLayers("pass_1").build()); - assertEquals(5, whiteboardDao.getFrames(r).getFramesCount()); - for (Frame f : whiteboardDao.getFrames(r).getFramesList()) { - assertEquals(f.getLayerName(), "pass_1"); - } - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayers() { - JobDetail job = launchJob(); - whiteboardDao.getLayers(job); - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = hd.id; - proc.hostName = host.getName(); - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - DispatchFrame dframe = frameDao.getDispatchFrame(frame.getId()); - dispatcher.setTestMode(true); - dispatcher.dispatch(dframe, proc); - - try { - Thread.sleep(2000); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - dframe = frameDao.getDispatchFrame(frame.getId()); - - assertTrue(dispatchSupport.stopFrame(dframe, FrameState.SUCCEEDED, 0)); - dispatchSupport.updateUsageCounters(frame, 0); - whiteboardDao.getLayers(job); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLimits() { - createTestLimits(); - List limits = whiteboardDao.getLimits(); - assertEquals(limits.size(), 2); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayerLimits() { - createTestLimits(); - JobDetail job = launchLimitJob(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - List limits = whiteboardDao.getLimits(layer); - assertEquals(limits.size(), 1); - assertEquals(limits.get(0).getName(), "arnold"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLimit() { - String limitName = "testing"; - int limitMaxValue = 20; - String limitId = limitDao.createLimit(limitName, limitMaxValue); - Limit limit = whiteboardDao.getLimit(limitId); - assertEquals(limit.getName(), limitName); - assertEquals(limit.getMaxValue(), limitMaxValue); - } - - @Test - @Transactional - @Rollback(true) - public void testFindLimit() { - String limitName = "testing"; - int limitMaxValue = 20; - String limitId = limitDao.createLimit(limitName, limitMaxValue); - Limit limit = whiteboardDao.findLimit(limitName); - assertEquals(limit.getName(), limitName); - assertEquals(limit.getMaxValue(), limitMaxValue); - assertEquals(limit.getId(), limitId); - } - - @Test - @Transactional - @Rollback(true) - public void testStopFrameUpdatesLayerMaxRSS() { - long max_rss = 123456L; - - JobDetail job = launchJob(); - whiteboardDao.getLayers(job); - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = hd.id; - proc.hostName = host.getName(); - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - DispatchFrame dframe = frameDao.getDispatchFrame(frame.getId()); - dispatcher.setTestMode(true); - dispatcher.dispatch(dframe, proc); - - try { - Thread.sleep(2000); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - dframe = frameDao.getDispatchFrame(frame.getId()); - - // Note use of 4-arg stopFrame here to update max rss. - assertTrue(dispatchSupport.stopFrame(dframe, FrameState.SUCCEEDED, 0, max_rss)); - dispatchSupport.updateUsageCounters(frame, 0); - Layer layer = whiteboardDao.getLayer(frame.layerId); - assertEquals(max_rss, layer.getLayerStats().getMaxRss()); - } - - @Test - @Transactional - @Rollback(true) - public void testStopFrameUpdatesJobMaxRSS() { - long max_rss = 123456L; - - JobDetail job = launchJob(); - whiteboardDao.getLayers(job); - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = hd.id; - proc.hostName = host.getName(); - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - DispatchFrame dframe = frameDao.getDispatchFrame(frame.getId()); - dispatcher.setTestMode(true); - dispatcher.dispatch(dframe, proc); - - try { - Thread.sleep(2000); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - dframe = frameDao.getDispatchFrame(frame.getId()); - - // Note use of 4-arg stopFrame here to update max rss. - assertTrue(dispatchSupport.stopFrame(dframe, FrameState.SUCCEEDED, 0, max_rss)); - dispatchSupport.updateUsageCounters(frame, 0); - Job grpc_job = whiteboardDao.getJob(job.id); - assertEquals(max_rss, grpc_job.getJobStats().getMaxRss()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJobs() { - launchJob(); - JobSearchCriteria r = JobSearchInterface.criteriaFactory(); - r = r.toBuilder().addShows("pipe").build(); - whiteboardDao.getJobs(jobSearchFactory.create(r)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJobNames() { - launchJob(); - JobSearchCriteria r = JobSearchInterface.criteriaFactory(); - r = r.toBuilder().addShows("pipe").build(); - whiteboardDao.getJobNames(jobSearchFactory.create(r)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetUpdatedFrames() { - final JobDetail job = launchJob(); - List jobs = new ArrayList(); - - jobs.add(new JobInterface() { - public String getJobId() { - return job.getId(); - } - - public String getShowId() { - return null; - } - - public String getId() { - return job.getId(); - } - - public String getName() { - return null; - } - - public String getFacilityId() { - throw new RuntimeException("not implemented"); - } - }); - - whiteboardDao.getUpdatedFrames(job, new ArrayList(), - (int) (System.currentTimeMillis() / 1000)); - - } - - @Test(expected = IllegalArgumentException.class) - @Transactional - @Rollback(true) - public void testGetUpdatedFramesFailure() { - final JobDetail job = launchJob(); - List jobs = new ArrayList(); - - jobs.add(new JobInterface() { - public String getJobId() { - return job.getId(); - } - - public String getShowId() { - return null; - } - - public String getId() { - return job.getId(); - } - - public String getName() { - return null; - } - - public String getFacilityId() { - throw new RuntimeException("not implemented"); - } - }); - - // this one should fail - whiteboardDao.getUpdatedFrames(job, new ArrayList(), - (int) (System.currentTimeMillis() / 1000 - 1000000)); - } - - @Test - @Transactional - @Rollback(true) - public void testFindJob() { - JobDetail job = launchJob(); - whiteboardDao.findJob(job.name); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJob() { - JobDetail job = launchJob(); - whiteboardDao.getJob(job.id); - } - - @Test - @Transactional - @Rollback(true) - public void testGetSubscriptionByID() { - whiteboardDao.getSubscription("00000000-0000-0000-0000-000000000001"); - } - - @Test - @Transactional - @Rollback(true) - public void findFindSubscription() { - whiteboardDao.findSubscription("pipe", "spi.general"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetSubscriptions() { - whiteboardDao.getSubscriptions(getShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetSubscriptionsByAllocation() { - whiteboardDao.getSubscriptions(allocationDao.findAllocationEntity("spi", "general")); - } - - @Test - @Transactional - @Rollback(true) - public void testGetShow() { - whiteboardDao.getShow(getShow().id); - } - - @Test - @Transactional - @Rollback(true) - public void testFindShow() { - whiteboardDao.findShow(getShow().name); - } - - @Test - @Transactional - @Rollback(true) - public void testGetShows() { - whiteboardDao.getShows(); - } - - @Test - @Transactional - @Rollback(true) - public void testGetActiveShows() { - whiteboardDao.getActiveShows(); - } - - @Test - @Transactional - @Rollback(true) - public void testFindHost() { - - try { - HostEntity h = hostManager.findHostDetail(HOST); - hostManager.deleteHost(h); - } catch (Exception e) { - } - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - hostDao.updateHostLock(hd, LockState.LOCKED, new Source("TEST")); - Host h = whiteboardDao.findHost(host.getName()); - assertEquals(host.getName(), h.getName()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetHosts() { - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - HostSearchCriteria h = HostSearchInterface.criteriaFactory(); - h = h.toBuilder().addHosts(HOST).build(); - assertEquals(1, whiteboardDao.getHosts(hostSearchFactory.create(h)).getHostsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetHostsByAllocation() { - RenderHost host = getRenderHost(); - AllocationEntity alloc = - allocationDao.getAllocationEntity("00000000-0000-0000-0000-000000000006"); - DispatchHost hd = hostManager.createHost(host, alloc); - - HostSearchCriteria h = HostSearchInterface.criteriaFactory(); - h = h.toBuilder().addAllocs(alloc.getName()).build(); - assertEquals(1, whiteboardDao.getHosts(hostSearchFactory.create(h)).getHostsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetAllocation() { - whiteboardDao.getAllocation("00000000-0000-0000-0000-000000000000"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindAllocation() { - whiteboardDao.findAllocation("spi.general"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetAllocations() { - whiteboardDao.getAllocations(); - } - - @Test - @Transactional - @Rollback(true) - public void testGetRootGroup() { - whiteboardDao.getRootGroup(getShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetGroup() { - whiteboardDao.getGroup("A0000000-0000-0000-0000-000000000000"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetGroups() { - whiteboardDao.getGroups(getShow()); - whiteboardDao.getGroup(groupDao.getRootGroupId(getShow())); - whiteboardDao.getGroups(groupDao.getRootGroupDetail(getShow())); - } - - @Test - @Transactional - @Rollback(true) - public void testFindGroup() { - whiteboardDao.findGroup("pipe", "pipe"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFrame() { - JobDetail job = launchJob(); - whiteboardDao.findFrame(job.name, "pass_1", 1); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFilterByName() { - createFilter(); - whiteboardDao.findFilter("pipe", "Default"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindLayer() { - JobDetail job = launchJob(); - whiteboardDao.findLayer(job.name, "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDepartment() { - ShowInterface show = showDao.findShowDetail("pipe"); - DepartmentInterface dept = departmentDao.getDefaultDepartment(); - - Department d = whiteboardDao.getDepartment(show, dept.getName()); - - assertEquals("pipe.Unknown", d.getName()); - assertEquals("Unknown", d.getDept()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDepartments() { - ShowInterface show = showDao.findShowDetail("pipe"); - whiteboardDao.getDepartments(show); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDepartmentNames() { - assertTrue(whiteboardDao.getDepartmentNames().size() > 0); - } - - @Test - @Transactional - @Rollback(true) - public void testGetTasks() { - whiteboardDao.getTasks(showDao.findShowDetail("pipe"), departmentDao.getDefaultDepartment()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetTask() { - PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - departmentManager.createTask(t); - - whiteboardDao.getTask(showDao.findShowDetail("pipe"), departmentDao.getDefaultDepartment(), - "dev.cue"); - } - - @Test - @Transactional - @Rollback(true) - public void getFrame() { - JobDetail job = launchJob(); - FrameInterface frame = frameDao.findFrame(job, "0001-pass_1_preprocess"); - assertEquals(1, whiteboardDao.getFrame(frame.getFrameId()).getNumber()); - } - - @Test - @Transactional - @Rollback(true) - public void getLayer() { - JobDetail job = launchJob(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - assertEquals(layer.getName(), whiteboardDao.getLayer(layer.getLayerId()).getName()); - } - - @Test - @Transactional - @Rollback(true) - public void getHost() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = hd.id; - proc.hostName = host.getName(); - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - assertEquals(hd.getName(), whiteboardDao.getHost(proc.getHostId()).getName()); - } - - @Test - @Transactional - @Rollback(true) - public void getProcs() { - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = hd.id; - proc.hostName = host.getName(); - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - assertEquals(1, whiteboardDao.getProcs(proc).getProcsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void getProcsBySearch() { - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); - - /* - * Book 5 procs. - */ - for (int i = 1; i < 6; i++) { - FrameDetail f = frameDao.findFrameDetail(job, String.format("%04d-pass_1", i)); - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = hd.id; - proc.hostName = host.getName(); - proc.jobId = job.id; - proc.frameId = f.id; - proc.layerId = f.layerId; - proc.showId = f.showId; - procDao.insertVirtualProc(proc); - } - - ProcSearchInterface r; - - /* - * Search for all 5 running procs - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteria = r.getCriteria(); - r.setCriteria(criteria.toBuilder().addShows("pipe").build()); - assertEquals(5, whiteboardDao.getProcs(r).getProcsCount()); - - /* - * Limit the result to 1 result. - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteriaA = r.getCriteria(); - r.setCriteria(criteriaA.toBuilder().addShows("pipe").addMaxResults(1).build()); - assertEquals(1, whiteboardDao.getProcs(r).getProcsCount()); - - /* - * Change the first result to 1, which should limit the result to 4. - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteriaB = r.getCriteria(); - r.setCriteria(criteriaB.toBuilder().addShows("pipe").setFirstResult(2).build()); - assertEquals(4, whiteboardDao.getProcs(r).getProcsCount()); - - /* - * Now try to do the equivalent of a limit/offset - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteriaC = r.getCriteria(); - r.setCriteria( - criteriaC.toBuilder().addShows("pipe").setFirstResult(3).addMaxResults(2).build()); - assertEquals(2, whiteboardDao.getProcs(r).getProcsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void getOwner() { - ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); - whiteboardDao.getOwner("spongebob"); - } - - @Test - @Transactional - @Rollback(true) - public void getOwnersByShow() { - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - OwnerEntity owner = ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); - - ownerManager.takeOwnership(owner, hd); - - assertTrue(whiteboardDao.getOwners(showDao.findShowDetail("pipe")).size() != 0); - } - - @Test - @Transactional - @Rollback(true) - public void getDeedsByShow() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - OwnerEntity owner = ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); - - ownerManager.takeOwnership(owner, hd); - assertTrue(whiteboardDao.getDeeds(showDao.findShowDetail("pipe")).getDeedsCount() != 0); - - assertTrue(whiteboardDao.getDeeds(showDao.findShowDetail("pipe")).getDeedsCount() != 0); - } - - @Test - @Transactional - @Rollback(true) - public void getDeedsByOwner() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - OwnerEntity owner = ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); - - ownerManager.takeOwnership(owner, hd); - assertTrue(whiteboardDao.getDeeds(owner).getDeedsCount() != 0); - } - - @Test - @Transactional - @Rollback(true) - public void getHostsByOwner() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - OwnerEntity owner = ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); - ownerManager.takeOwnership(owner, hd); + @Resource + DependManager dependManager; - assertEquals(1, whiteboardDao.getHosts(owner).getHostsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void getOwnerFromDeed() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - OwnerEntity owner = ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); - DeedEntity deed = ownerManager.takeOwnership(owner, hd); - - Owner o2 = whiteboardDao.getOwner(deed); - - assertEquals(owner.getName(), o2.getName()); - assertEquals(1, o2.getHostCount()); - } - - @Test - @Transactional - @Rollback(true) - public void getOwnerFromHost() { + @Resource + FrameDao frameDao; - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - OwnerEntity owner = ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); - ownerManager.takeOwnership(owner, hd); - - Owner o2 = whiteboardDao.getOwner(hd); - - assertEquals(owner.getName(), o2.getName()); - assertEquals(1, o2.getHostCount()); - } - - @Test - @Transactional - @Rollback(true) - public void getRenderPartition() { + @Resource + PointDao pointDao; - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); + @Resource + HostManager hostManager; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); - - LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 1, 1); - bookingManager.createLocalHostAssignment(hd, job, lba); + @Resource + CommentManager commentManager; - whiteboardDao.getRenderPartition(lba); - } - - @Test - @Transactional - @Rollback(true) - public void getRenderPartitionsByHost() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); - - LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 1, 1); - bookingManager.createLocalHostAssignment(hd, job, lba); - - assertEquals(1, whiteboardDao.getRenderPartitions(hd).getRenderPartitionsCount()); - - } - - @Test - @Transactional - @Rollback(true) - public void getFacility() { - whiteboardDao.getFacilities(); - whiteboardDao.getFacility("spi"); - } - - @Test - @Transactional - @Rollback(true) - public void getFrameWithNoDisplayOverride() { - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - Frame retrievedFrame = whiteboardDao.getFrame(frame.getFrameId()); - assertEquals(false, retrievedFrame.hasFrameStateDisplayOverride()); - } - - public FrameStateDisplayOverride createFrameStateDisplayOverride(String frameId) { - FrameStateDisplayOverride override = FrameStateDisplayOverride.newBuilder() - .setState(FrameState.SUCCEEDED).setText("FINISHED").setColor(FrameStateDisplayOverride.RGB - .newBuilder().setRed(114).setGreen(42).setBlue(200).build()) - .build(); - frameDao.setFrameStateDisplayOverride(frameId, override); - - return override; - } - - @Test - @Transactional - @Rollback(true) - public void testFramesWithDisplayOverride() { - // since current_timestamp does not update, we need to make sure the - // timestamp we use when retrieving updated frames is older than when - // the frame's ts_updated value is set to during insertion. - long timestamp = System.currentTimeMillis(); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - // Create override - FrameStateDisplayOverride override = createFrameStateDisplayOverride(frame.getFrameId()); - FrameStateDisplayOverrideSeq results = - frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); - assertEquals(1, results.getOverridesCount()); - - frameDao.updateFrameState(frame, FrameState.SUCCEEDED); - - // Test GET_FRAME - Frame retrievedFrame = whiteboardDao.getFrame(frame.getFrameId()); - assertTrue(retrievedFrame.hasFrameStateDisplayOverride()); - assertEquals(override, retrievedFrame.getFrameStateDisplayOverride()); - - // Test GET_UPDATED_FRAME - UpdatedFrameCheckResult rs = whiteboardDao.getUpdatedFrames(job, - new ArrayList(), (int) (timestamp / 1000)); - UpdatedFrameSeq uFrames = rs.getUpdatedFrames(); - // We'll end up getting all the frames for the job so we need to find - // the one we want. - for (UpdatedFrame uFrame : uFrames.getUpdatedFramesList()) { - if (uFrame.getId().equals(frame.getFrameId())) { - assertTrue(uFrame.hasFrameStateDisplayOverride()); - assertEquals(override, uFrame.getFrameStateDisplayOverride()); - break; - } - } - - // Test GET_FRAMES_CRITERIA - FrameSearchInterface r = frameSearchFactory.create(job); - FrameSearchCriteria criteria = r.getCriteria(); - r.setCriteria( - criteria.toBuilder().setPage(1).setLimit(5).addLayers("pass_1_preprocess").build()); - FrameSeq frames = whiteboardDao.getFrames(r); - Frame fcFrame = frames.getFrames(0); - assertTrue(fcFrame.hasFrameStateDisplayOverride()); - assertEquals(override, fcFrame.getFrameStateDisplayOverride()); - } + @Resource + DepartmentManager departmentManager; + + @Resource + Dispatcher dispatcher; + + @Resource + DispatchSupport dispatchSupport; + + @Resource + OwnerManager ownerManager; + + @Resource + BookingManager bookingManager; + + @Resource + ServiceManager serviceManager; + + @Resource + FrameSearchFactory frameSearchFactory; + + @Resource + HostSearchFactory hostSearchFactory; + + @Resource + JobSearchFactory jobSearchFactory; + + @Resource + ProcSearchFactory procSearchFactory; + + private static final String HOST = "testest"; + private static final String SHOW = "pipe"; + + @Before + public void testMode() { + jobLauncher.testMode = true; + } + + public ShowEntity getShow() { + return showDao.findShowDetail(SHOW); + } + + public FilterEntity createFilter() { + FilterEntity filter = new FilterEntity(); + filter.name = "Default"; + filter.showId = getShow().id; + filter.type = FilterType.MATCH_ANY; + filter.enabled = true; + filterDao.insertFilter(filter); + return filter; + } + + public MatcherEntity createMatcher(FilterEntity f) { + MatcherEntity matcher = new MatcherEntity(); + matcher.filterId = f.id; + matcher.name = null; + matcher.showId = getShow().getId(); + matcher.subject = MatchSubject.JOB_NAME; + matcher.type = MatchType.CONTAINS; + matcher.value = "testuser"; + matcherDao.insertMatcher(matcher); + return matcher; + } + + public ActionEntity createAction(FilterEntity f) { + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.PAUSE_JOB; + a1.filterId = f.getFilterId(); + a1.booleanValue = true; + a1.name = null; + a1.valueType = ActionValueType.BOOLEAN_TYPE; + actionDao.createAction(a1); + return a1; + } + + public RenderHost getRenderHost() { + // Hardcoded value of dispatcher.memory.mem_reserved_min + // to avoid having to read opencue.properties on a test setting + long memReservedMin = 262144; + RenderHost host = RenderHost.newBuilder().setName(HOST).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem((int) memReservedMin * 4).setFreeSwap(2076) + .setLoad(1).setTotalMcp(CueUtil.GB4).setTotalMem((int) memReservedMin * 4) + .setTotalSwap(2096).setNimbyEnabled(true).setNumProcs(2).setCoresPerProc(400) + .setState(HardwareState.DOWN).setFacility("spi").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); + return host; + } + + public JobDetail launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + } + + public JobDetail launchLimitJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_limit.xml")); + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + } + + private void createTestLimits() { + limitDao.createLimit("util", 15); + limitDao.createLimit("arnold", 20); + } + + @Test + @Transactional + @Rollback(true) + public void getService() { + whiteboardDao.getService("arnold"); + } + + @Test + @Transactional + @Rollback(true) + public void getServices() { + whiteboardDao.getDefaultServices(); + } + + @Test + @Transactional + @Rollback(true) + public void getServiceOverride() { + + ShowEntity show = getShow(); + ServiceOverrideEntity s = new ServiceOverrideEntity(); + s.name = "test"; + s.minCores = 100; + s.timeout = 0; + s.timeout_llu = 0; + s.minMemory = 320000; + s.tags.add("general"); + s.threadable = false; + s.showId = show.getId(); + s.minMemoryIncrease = CueUtil.GB4; + + serviceManager.createService(s); + whiteboardDao.getServiceOverride(getShow(), "test"); + } + + @Test + @Transactional + @Rollback(true) + public void getServiceOverrides() { + whiteboardDao.getServiceOverrides(getShow()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDepend() { + + List depends = dependManager.getWhatDependsOn(launchJob()); + for (LightweightDependency depend : depends) { + whiteboardDao.getDepend(depend); + } + } + + @Test + @Transactional + @Rollback(true) + public void testGetDependById() { + + List depends = dependManager.getWhatDependsOn(launchJob()); + for (LightweightDependency depend : depends) { + whiteboardDao.getDepend(depend); + whiteboardDao.getDepend(depend.id); + } + } + + @Test + @Transactional + @Rollback(true) + public void testGetWhatDependsOnThis() { + JobDetail job = launchJob(); + assertEquals(1, whiteboardDao.getWhatDependsOnThis(job).getDependsCount()); + + LayerInterface layer1 = layerDao.findLayer(job, "pass_1"); + assertEquals(0, whiteboardDao.getWhatDependsOnThis(layer1).getDependsCount()); + + LayerInterface layer2 = layerDao.findLayer(job, "pass_1_preprocess"); + assertEquals(1, whiteboardDao.getWhatDependsOnThis(layer2).getDependsCount()); + + FrameInterface frame = frameDao.findFrame(job, "0001-pass_1"); + assertEquals(0, whiteboardDao.getWhatDependsOnThis(frame).getDependsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetWhatThisDependsOn() { + JobDetail job = launchJob(); + assertEquals(0, whiteboardDao.getWhatThisDependsOn(job).getDependsCount()); + + LayerInterface layer1 = layerDao.findLayer(job, "pass_1"); + assertEquals(1, whiteboardDao.getWhatThisDependsOn(layer1).getDependsCount()); + + LayerInterface layer2 = layerDao.findLayer(job, "pass_1_preprocess"); + assertEquals(0, whiteboardDao.getWhatThisDependsOn(layer2).getDependsCount()); + + FrameInterface frame = frameDao.findFrame(job, "0001-pass_1"); + assertEquals(1, whiteboardDao.getWhatThisDependsOn(frame).getDependsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDepends() { + JobDetail job = launchJob(); + assertEquals(1, whiteboardDao.getDepends(job).getDependsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetCommentsOnJob() { + JobDetail job = launchJob(); + assertEquals(0, whiteboardDao.getComments(job).getCommentsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetCommentsOnHost() { + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + hostDao.updateHostLock(hd, LockState.LOCKED, new Source("TEST")); + + CommentDetail c = new CommentDetail(); + c.message = "you suck"; + c.subject = "a useful message"; + c.user = "testuser"; + c.timestamp = null; + + commentManager.addComment(hd, c); + assertEquals(1, whiteboardDao.getComments(hd).getCommentsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void testFindFilter() { + createFilter(); + whiteboardDao.findFilter(getShow(), "Default"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFilter() { + whiteboardDao.getFilter(createFilter()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetMatchers() { + FilterEntity f = createFilter(); + createMatcher(f); + whiteboardDao.getMatchers(f); + } + + @Test + @Transactional + @Rollback(true) + public void testGetMatcher() { + FilterEntity f = createFilter(); + MatcherEntity m = createMatcher(f); + whiteboardDao.getMatcher(m); + } + + @Test + @Transactional + @Rollback(true) + public void testGetActions() { + FilterEntity f = createFilter(); + createAction(f); + whiteboardDao.getActions(f); + } + + @Test + @Transactional + @Rollback(true) + public void testGetAction() { + FilterEntity f = createFilter(); + whiteboardDao.getAction(createAction(f)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFilters() { + createFilter(); + whiteboardDao.getFilters(getShow()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetFramesByFrameSearch() { + JobEntity job = launchJob(); + FrameSearchInterface r = frameSearchFactory.create(job); + FrameSearchCriteria criteria = r.getCriteria(); + r.setCriteria(criteria.toBuilder().setPage(1).setLimit(5).addLayers("pass_1").build()); + assertEquals(5, whiteboardDao.getFrames(r).getFramesCount()); + for (Frame f : whiteboardDao.getFrames(r).getFramesList()) { + assertEquals(f.getLayerName(), "pass_1"); + } + } + + @Test + @Transactional + @Rollback(true) + public void testGetLayers() { + JobDetail job = launchJob(); + whiteboardDao.getLayers(job); + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = null; + proc.coresReserved = 100; + proc.hostId = hd.id; + proc.hostName = host.getName(); + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + DispatchFrame dframe = frameDao.getDispatchFrame(frame.getId()); + dispatcher.setTestMode(true); + dispatcher.dispatch(dframe, proc); + + try { + Thread.sleep(2000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + dframe = frameDao.getDispatchFrame(frame.getId()); + + assertTrue(dispatchSupport.stopFrame(dframe, FrameState.SUCCEEDED, 0)); + dispatchSupport.updateUsageCounters(frame, 0); + whiteboardDao.getLayers(job); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLimits() { + createTestLimits(); + List limits = whiteboardDao.getLimits(); + assertEquals(limits.size(), 2); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLayerLimits() { + createTestLimits(); + JobDetail job = launchLimitJob(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); + List limits = whiteboardDao.getLimits(layer); + assertEquals(limits.size(), 1); + assertEquals(limits.get(0).getName(), "arnold"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLimit() { + String limitName = "testing"; + int limitMaxValue = 20; + String limitId = limitDao.createLimit(limitName, limitMaxValue); + Limit limit = whiteboardDao.getLimit(limitId); + assertEquals(limit.getName(), limitName); + assertEquals(limit.getMaxValue(), limitMaxValue); + } + + @Test + @Transactional + @Rollback(true) + public void testFindLimit() { + String limitName = "testing"; + int limitMaxValue = 20; + String limitId = limitDao.createLimit(limitName, limitMaxValue); + Limit limit = whiteboardDao.findLimit(limitName); + assertEquals(limit.getName(), limitName); + assertEquals(limit.getMaxValue(), limitMaxValue); + assertEquals(limit.getId(), limitId); + } + + @Test + @Transactional + @Rollback(true) + public void testStopFrameUpdatesLayerMaxRSS() { + long max_rss = 123456L; + + JobDetail job = launchJob(); + whiteboardDao.getLayers(job); + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = null; + proc.coresReserved = 100; + proc.hostId = hd.id; + proc.hostName = host.getName(); + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + DispatchFrame dframe = frameDao.getDispatchFrame(frame.getId()); + dispatcher.setTestMode(true); + dispatcher.dispatch(dframe, proc); + + try { + Thread.sleep(2000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + dframe = frameDao.getDispatchFrame(frame.getId()); + + // Note use of 4-arg stopFrame here to update max rss. + assertTrue(dispatchSupport.stopFrame(dframe, FrameState.SUCCEEDED, 0, max_rss)); + dispatchSupport.updateUsageCounters(frame, 0); + Layer layer = whiteboardDao.getLayer(frame.layerId); + assertEquals(max_rss, layer.getLayerStats().getMaxRss()); + } + + @Test + @Transactional + @Rollback(true) + public void testStopFrameUpdatesJobMaxRSS() { + long max_rss = 123456L; + + JobDetail job = launchJob(); + whiteboardDao.getLayers(job); + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = null; + proc.coresReserved = 100; + proc.hostId = hd.id; + proc.hostName = host.getName(); + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + DispatchFrame dframe = frameDao.getDispatchFrame(frame.getId()); + dispatcher.setTestMode(true); + dispatcher.dispatch(dframe, proc); + + try { + Thread.sleep(2000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + + dframe = frameDao.getDispatchFrame(frame.getId()); + + // Note use of 4-arg stopFrame here to update max rss. + assertTrue(dispatchSupport.stopFrame(dframe, FrameState.SUCCEEDED, 0, max_rss)); + dispatchSupport.updateUsageCounters(frame, 0); + Job grpc_job = whiteboardDao.getJob(job.id); + assertEquals(max_rss, grpc_job.getJobStats().getMaxRss()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetJobs() { + launchJob(); + JobSearchCriteria r = JobSearchInterface.criteriaFactory(); + r = r.toBuilder().addShows("pipe").build(); + whiteboardDao.getJobs(jobSearchFactory.create(r)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetJobNames() { + launchJob(); + JobSearchCriteria r = JobSearchInterface.criteriaFactory(); + r = r.toBuilder().addShows("pipe").build(); + whiteboardDao.getJobNames(jobSearchFactory.create(r)); + } + + @Test + @Transactional + @Rollback(true) + public void testGetUpdatedFrames() { + final JobDetail job = launchJob(); + List jobs = new ArrayList(); + + jobs.add(new JobInterface() { + public String getJobId() { + return job.getId(); + } + + public String getShowId() { + return null; + } + + public String getId() { + return job.getId(); + } + + public String getName() { + return null; + } + + public String getFacilityId() { + throw new RuntimeException("not implemented"); + } + }); + + whiteboardDao.getUpdatedFrames(job, new ArrayList(), + (int) (System.currentTimeMillis() / 1000)); + + } + + @Test(expected = IllegalArgumentException.class) + @Transactional + @Rollback(true) + public void testGetUpdatedFramesFailure() { + final JobDetail job = launchJob(); + List jobs = new ArrayList(); + + jobs.add(new JobInterface() { + public String getJobId() { + return job.getId(); + } + + public String getShowId() { + return null; + } + + public String getId() { + return job.getId(); + } + + public String getName() { + return null; + } + + public String getFacilityId() { + throw new RuntimeException("not implemented"); + } + }); + + // this one should fail + whiteboardDao.getUpdatedFrames(job, new ArrayList(), + (int) (System.currentTimeMillis() / 1000 - 1000000)); + } + + @Test + @Transactional + @Rollback(true) + public void testFindJob() { + JobDetail job = launchJob(); + whiteboardDao.findJob(job.name); + } + + @Test + @Transactional + @Rollback(true) + public void testGetJob() { + JobDetail job = launchJob(); + whiteboardDao.getJob(job.id); + } + + @Test + @Transactional + @Rollback(true) + public void testGetSubscriptionByID() { + whiteboardDao.getSubscription("00000000-0000-0000-0000-000000000001"); + } + + @Test + @Transactional + @Rollback(true) + public void findFindSubscription() { + whiteboardDao.findSubscription("pipe", "spi.general"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetSubscriptions() { + whiteboardDao.getSubscriptions(getShow()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetSubscriptionsByAllocation() { + whiteboardDao.getSubscriptions(allocationDao.findAllocationEntity("spi", "general")); + } + + @Test + @Transactional + @Rollback(true) + public void testGetShow() { + whiteboardDao.getShow(getShow().id); + } + + @Test + @Transactional + @Rollback(true) + public void testFindShow() { + whiteboardDao.findShow(getShow().name); + } + + @Test + @Transactional + @Rollback(true) + public void testGetShows() { + whiteboardDao.getShows(); + } + + @Test + @Transactional + @Rollback(true) + public void testGetActiveShows() { + whiteboardDao.getActiveShows(); + } + + @Test + @Transactional + @Rollback(true) + public void testFindHost() { + + try { + HostEntity h = hostManager.findHostDetail(HOST); + hostManager.deleteHost(h); + } catch (Exception e) { + } + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + hostDao.updateHostLock(hd, LockState.LOCKED, new Source("TEST")); + Host h = whiteboardDao.findHost(host.getName()); + assertEquals(host.getName(), h.getName()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetHosts() { + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + HostSearchCriteria h = HostSearchInterface.criteriaFactory(); + h = h.toBuilder().addHosts(HOST).build(); + assertEquals(1, whiteboardDao.getHosts(hostSearchFactory.create(h)).getHostsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetHostsByAllocation() { + RenderHost host = getRenderHost(); + AllocationEntity alloc = + allocationDao.getAllocationEntity("00000000-0000-0000-0000-000000000006"); + DispatchHost hd = hostManager.createHost(host, alloc); + + HostSearchCriteria h = HostSearchInterface.criteriaFactory(); + h = h.toBuilder().addAllocs(alloc.getName()).build(); + assertEquals(1, whiteboardDao.getHosts(hostSearchFactory.create(h)).getHostsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetAllocation() { + whiteboardDao.getAllocation("00000000-0000-0000-0000-000000000000"); + } + + @Test + @Transactional + @Rollback(true) + public void testFindAllocation() { + whiteboardDao.findAllocation("spi.general"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetAllocations() { + whiteboardDao.getAllocations(); + } + + @Test + @Transactional + @Rollback(true) + public void testGetRootGroup() { + whiteboardDao.getRootGroup(getShow()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetGroup() { + whiteboardDao.getGroup("A0000000-0000-0000-0000-000000000000"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetGroups() { + whiteboardDao.getGroups(getShow()); + whiteboardDao.getGroup(groupDao.getRootGroupId(getShow())); + whiteboardDao.getGroups(groupDao.getRootGroupDetail(getShow())); + } + + @Test + @Transactional + @Rollback(true) + public void testFindGroup() { + whiteboardDao.findGroup("pipe", "pipe"); + } + + @Test + @Transactional + @Rollback(true) + public void testFindFrame() { + JobDetail job = launchJob(); + whiteboardDao.findFrame(job.name, "pass_1", 1); + } + + @Test + @Transactional + @Rollback(true) + public void testFindFilterByName() { + createFilter(); + whiteboardDao.findFilter("pipe", "Default"); + } + + @Test + @Transactional + @Rollback(true) + public void testFindLayer() { + JobDetail job = launchJob(); + whiteboardDao.findLayer(job.name, "pass_1"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDepartment() { + ShowInterface show = showDao.findShowDetail("pipe"); + DepartmentInterface dept = departmentDao.getDefaultDepartment(); + + Department d = whiteboardDao.getDepartment(show, dept.getName()); + + assertEquals("pipe.Unknown", d.getName()); + assertEquals("Unknown", d.getDept()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDepartments() { + ShowInterface show = showDao.findShowDetail("pipe"); + whiteboardDao.getDepartments(show); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDepartmentNames() { + assertTrue(whiteboardDao.getDepartmentNames().size() > 0); + } + + @Test + @Transactional + @Rollback(true) + public void testGetTasks() { + whiteboardDao.getTasks(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + } + + @Test + @Transactional + @Rollback(true) + public void testGetTask() { + PointInterface p = pointDao.getPointConfigDetail(showDao.findShowDetail("pipe"), + departmentDao.getDefaultDepartment()); + + TaskEntity t = new TaskEntity(p, "dev.cue"); + departmentManager.createTask(t); + + whiteboardDao.getTask(showDao.findShowDetail("pipe"), departmentDao.getDefaultDepartment(), + "dev.cue"); + } + + @Test + @Transactional + @Rollback(true) + public void getFrame() { + JobDetail job = launchJob(); + FrameInterface frame = frameDao.findFrame(job, "0001-pass_1_preprocess"); + assertEquals(1, whiteboardDao.getFrame(frame.getFrameId()).getNumber()); + } + + @Test + @Transactional + @Rollback(true) + public void getLayer() { + JobDetail job = launchJob(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); + assertEquals(layer.getName(), whiteboardDao.getLayer(layer.getLayerId()).getName()); + } + + @Test + @Transactional + @Rollback(true) + public void getHost() { + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = null; + proc.coresReserved = 100; + proc.hostId = hd.id; + proc.hostName = host.getName(); + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + assertEquals(hd.getName(), whiteboardDao.getHost(proc.getHostId()).getName()); + } + + @Test + @Transactional + @Rollback(true) + public void getProcs() { + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + VirtualProc proc = new VirtualProc(); + proc.allocationId = null; + proc.coresReserved = 100; + proc.hostId = hd.id; + proc.hostName = host.getName(); + proc.jobId = job.id; + proc.frameId = frame.id; + proc.layerId = frame.layerId; + proc.showId = frame.showId; + + procDao.insertVirtualProc(proc); + assertEquals(1, whiteboardDao.getProcs(proc).getProcsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void getProcsBySearch() { + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); + + /* + * Book 5 procs. + */ + for (int i = 1; i < 6; i++) { + FrameDetail f = frameDao.findFrameDetail(job, String.format("%04d-pass_1", i)); + VirtualProc proc = new VirtualProc(); + proc.allocationId = null; + proc.coresReserved = 100; + proc.hostId = hd.id; + proc.hostName = host.getName(); + proc.jobId = job.id; + proc.frameId = f.id; + proc.layerId = f.layerId; + proc.showId = f.showId; + procDao.insertVirtualProc(proc); + } + + ProcSearchInterface r; + + /* + * Search for all 5 running procs + */ + r = procSearchFactory.create(); + ProcSearchCriteria criteria = r.getCriteria(); + r.setCriteria(criteria.toBuilder().addShows("pipe").build()); + assertEquals(5, whiteboardDao.getProcs(r).getProcsCount()); + + /* + * Limit the result to 1 result. + */ + r = procSearchFactory.create(); + ProcSearchCriteria criteriaA = r.getCriteria(); + r.setCriteria(criteriaA.toBuilder().addShows("pipe").addMaxResults(1).build()); + assertEquals(1, whiteboardDao.getProcs(r).getProcsCount()); + + /* + * Change the first result to 1, which should limit the result to 4. + */ + r = procSearchFactory.create(); + ProcSearchCriteria criteriaB = r.getCriteria(); + r.setCriteria(criteriaB.toBuilder().addShows("pipe").setFirstResult(2).build()); + assertEquals(4, whiteboardDao.getProcs(r).getProcsCount()); + + /* + * Now try to do the equivalent of a limit/offset + */ + r = procSearchFactory.create(); + ProcSearchCriteria criteriaC = r.getCriteria(); + r.setCriteria( + criteriaC.toBuilder().addShows("pipe").setFirstResult(3).addMaxResults(2).build()); + assertEquals(2, whiteboardDao.getProcs(r).getProcsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void getOwner() { + ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); + whiteboardDao.getOwner("spongebob"); + } + + @Test + @Transactional + @Rollback(true) + public void getOwnersByShow() { + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + OwnerEntity owner = ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); + + ownerManager.takeOwnership(owner, hd); + + assertTrue(whiteboardDao.getOwners(showDao.findShowDetail("pipe")).size() != 0); + } + + @Test + @Transactional + @Rollback(true) + public void getDeedsByShow() { + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + OwnerEntity owner = ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); + + ownerManager.takeOwnership(owner, hd); + assertTrue(whiteboardDao.getDeeds(showDao.findShowDetail("pipe")).getDeedsCount() != 0); + + assertTrue(whiteboardDao.getDeeds(showDao.findShowDetail("pipe")).getDeedsCount() != 0); + } + + @Test + @Transactional + @Rollback(true) + public void getDeedsByOwner() { + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + OwnerEntity owner = ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); + + ownerManager.takeOwnership(owner, hd); + assertTrue(whiteboardDao.getDeeds(owner).getDeedsCount() != 0); + } + + @Test + @Transactional + @Rollback(true) + public void getHostsByOwner() { + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + OwnerEntity owner = ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); + ownerManager.takeOwnership(owner, hd); + + assertEquals(1, whiteboardDao.getHosts(owner).getHostsCount()); + } + + @Test + @Transactional + @Rollback(true) + public void getOwnerFromDeed() { + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + OwnerEntity owner = ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); + DeedEntity deed = ownerManager.takeOwnership(owner, hd); + + Owner o2 = whiteboardDao.getOwner(deed); + + assertEquals(owner.getName(), o2.getName()); + assertEquals(1, o2.getHostCount()); + } + + @Test + @Transactional + @Rollback(true) + public void getOwnerFromHost() { + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + OwnerEntity owner = ownerManager.createOwner("spongebob", showDao.findShowDetail("pipe")); + ownerManager.takeOwnership(owner, hd); + + Owner o2 = whiteboardDao.getOwner(hd); + + assertEquals(owner.getName(), o2.getName()); + assertEquals(1, o2.getHostCount()); + } + + @Test + @Transactional + @Rollback(true) + public void getRenderPartition() { + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); + + LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 1, 1); + bookingManager.createLocalHostAssignment(hd, job, lba); + + whiteboardDao.getRenderPartition(lba); + } + + @Test + @Transactional + @Rollback(true) + public void getRenderPartitionsByHost() { + + RenderHost host = getRenderHost(); + DispatchHost hd = hostManager.createHost(host); + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); + + LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 1, 1); + bookingManager.createLocalHostAssignment(hd, job, lba); + + assertEquals(1, whiteboardDao.getRenderPartitions(hd).getRenderPartitionsCount()); + + } + + @Test + @Transactional + @Rollback(true) + public void getFacility() { + whiteboardDao.getFacilities(); + whiteboardDao.getFacility("spi"); + } + + @Test + @Transactional + @Rollback(true) + public void getFrameWithNoDisplayOverride() { + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + Frame retrievedFrame = whiteboardDao.getFrame(frame.getFrameId()); + assertEquals(false, retrievedFrame.hasFrameStateDisplayOverride()); + } + + public FrameStateDisplayOverride createFrameStateDisplayOverride(String frameId) { + FrameStateDisplayOverride override = + FrameStateDisplayOverride.newBuilder().setState(FrameState.SUCCEEDED) + .setText("FINISHED").setColor(FrameStateDisplayOverride.RGB.newBuilder() + .setRed(114).setGreen(42).setBlue(200).build()) + .build(); + frameDao.setFrameStateDisplayOverride(frameId, override); + + return override; + } + + @Test + @Transactional + @Rollback(true) + public void testFramesWithDisplayOverride() { + // since current_timestamp does not update, we need to make sure the + // timestamp we use when retrieving updated frames is older than when + // the frame's ts_updated value is set to during insertion. + long timestamp = System.currentTimeMillis(); + + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + + // Create override + FrameStateDisplayOverride override = createFrameStateDisplayOverride(frame.getFrameId()); + FrameStateDisplayOverrideSeq results = + frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); + assertEquals(1, results.getOverridesCount()); + + frameDao.updateFrameState(frame, FrameState.SUCCEEDED); + + // Test GET_FRAME + Frame retrievedFrame = whiteboardDao.getFrame(frame.getFrameId()); + assertTrue(retrievedFrame.hasFrameStateDisplayOverride()); + assertEquals(override, retrievedFrame.getFrameStateDisplayOverride()); + + // Test GET_UPDATED_FRAME + UpdatedFrameCheckResult rs = whiteboardDao.getUpdatedFrames(job, + new ArrayList(), (int) (timestamp / 1000)); + UpdatedFrameSeq uFrames = rs.getUpdatedFrames(); + // We'll end up getting all the frames for the job so we need to find + // the one we want. + for (UpdatedFrame uFrame : uFrames.getUpdatedFramesList()) { + if (uFrame.getId().equals(frame.getFrameId())) { + assertTrue(uFrame.hasFrameStateDisplayOverride()); + assertEquals(override, uFrame.getFrameStateDisplayOverride()); + break; + } + } + + // Test GET_FRAMES_CRITERIA + FrameSearchInterface r = frameSearchFactory.create(job); + FrameSearchCriteria criteria = r.getCriteria(); + r.setCriteria( + criteria.toBuilder().setPage(1).setLimit(5).addLayers("pass_1_preprocess").build()); + FrameSeq frames = whiteboardDao.getFrames(r); + Frame fcFrame = frames.getFrames(0); + assertTrue(fcFrame.hasFrameStateDisplayOverride()); + assertEquals(override, fcFrame.getFrameStateDisplayOverride()); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuJobTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuJobTests.java index b27e04f08..cfe858ddc 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuJobTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuJobTests.java @@ -48,141 +48,143 @@ @ContextConfiguration public class CoreUnitDispatcherGpuJobTests extends TransactionalTest { - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - GroupManager groupManager; - - @Resource - Dispatcher dispatcher; - - @Resource - DispatchSupport dispatchSupport; - - @Resource - FrameDao frameDao; - - private static final String HOSTNAME = "beta"; - - private static final String JOBNAME = "pipe-dev.cue-middletier_shell_dispatch_gpu_test_v1"; - - private static final String TARGET_JOB = "pipe-dev.cue-middletier_shell_dispatch_gpu_test_v2"; - - @Before - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_gpu_test.xml")); - } - - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem((int) CueUtil.GB8).setFreeSwap(20760).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB8).setTotalSwap((int) CueUtil.GB2) - .setNimbyEnabled(false).setNumProcs(1).setCoresPerProc(200).addTags("test") - .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux") - .setFreeGpuMem((int) CueUtil.MB512).setTotalGpuMem((int) CueUtil.MB512).build(); - - hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); - } - - public JobDetail getJob() { - return jobManager.findJobDetail(JOBNAME); - } - - public JobDetail getTargetJob() { - return jobManager.findJobDetail(TARGET_JOB); - } - - public DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHost() { - DispatchHost host = getHost(); - - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchGpuRemovedHostToNonGpuJob() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - - host.idleMemory = host.idleMemory - Math.min(CueUtil.GB4, host.idleMemory); - host.idleCores = host.idleCores - Math.min(100, host.idleCores); - host.idleGpuMemory = 0; - List procs = dispatcher.dispatchHost(host, job); - assertEquals(0, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchGpuHostToGroup() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - GroupDetail group = groupManager.getGroupDetail(job); - - List procs = dispatcher.dispatchHost(host, group); - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchGpuHostToShowNoPrefer() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - ShowEntity show = adminManager.findShowEntity("edu"); - - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchRemovedGpuHostToShowPrefer() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - ShowEntity show = adminManager.findShowEntity("edu"); - - List procs = dispatcher.dispatchHost(host, show); - assertEquals(0, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void dispatchProcToJob() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - - List procs = dispatcher.dispatchHost(host, job); - VirtualProc proc = procs.get(0); - dispatcher.dispatchProcToJob(proc, job); - } + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + HostManager hostManager; + + @Resource + AdminManager adminManager; + + @Resource + GroupManager groupManager; + + @Resource + Dispatcher dispatcher; + + @Resource + DispatchSupport dispatchSupport; + + @Resource + FrameDao frameDao; + + private static final String HOSTNAME = "beta"; + + private static final String JOBNAME = "pipe-dev.cue-middletier_shell_dispatch_gpu_test_v1"; + + private static final String TARGET_JOB = "pipe-dev.cue-middletier_shell_dispatch_gpu_test_v2"; + + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher + .launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_gpu_test.xml")); + } + + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem((int) CueUtil.GB8).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB8) + .setTotalSwap((int) CueUtil.GB2).setNimbyEnabled(false).setNumProcs(1) + .setCoresPerProc(200).addTags("test").setState(HardwareState.UP).setFacility("spi") + .putAttributes("SP_OS", "Linux").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } + + public JobDetail getJob() { + return jobManager.findJobDetail(JOBNAME); + } + + public JobDetail getTargetJob() { + return jobManager.findJobDetail(TARGET_JOB); + } + + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHost() { + DispatchHost host = getHost(); + + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuRemovedHostToNonGpuJob() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + + host.idleMemory = host.idleMemory - Math.min(CueUtil.GB4, host.idleMemory); + host.idleCores = host.idleCores - Math.min(100, host.idleCores); + host.idleGpuMemory = 0; + List procs = dispatcher.dispatchHost(host, job); + assertEquals(0, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuHostToGroup() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + GroupDetail group = groupManager.getGroupDetail(job); + + List procs = dispatcher.dispatchHost(host, group); + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuHostToShowNoPrefer() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + ShowEntity show = adminManager.findShowEntity("edu"); + + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchRemovedGpuHostToShowPrefer() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + ShowEntity show = adminManager.findShowEntity("edu"); + + List procs = dispatcher.dispatchHost(host, show); + assertEquals(0, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void dispatchProcToJob() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + + List procs = dispatcher.dispatchHost(host, job); + VirtualProc proc = procs.get(0); + dispatcher.dispatchProcToJob(proc, job); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuTests.java index 861bbd165..2bb801ffe 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuTests.java @@ -48,166 +48,167 @@ @ContextConfiguration public class CoreUnitDispatcherGpuTests extends TransactionalTest { - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - GroupManager groupManager; - - @Resource - Dispatcher dispatcher; - - @Resource - DispatchSupport dispatchSupport; - - @Resource - FrameDao frameDao; - - private static final String HOSTNAME = "beta"; - - private static final String JOBNAME = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; - - private static final String TARGET_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; - - @Before - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - } - - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem((int) CueUtil.GB8).setFreeSwap(20760).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB8).setTotalSwap((int) CueUtil.GB2) - .setNimbyEnabled(false).setNumProcs(1).setCoresPerProc(200).addTags("test") - .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux") - .setFreeGpuMem((int) CueUtil.MB512).setTotalGpuMem((int) CueUtil.MB512).build(); - - hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); - } - - public JobDetail getJob() { - return jobManager.findJobDetail(JOBNAME); - } - - public JobDetail getTargetJob() { - return jobManager.findJobDetail(TARGET_JOB); - } - - public DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHost() { - DispatchHost host = getHost(); - - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchGpuRemovedHostToNonGpuJob() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - - host.idleMemory = host.idleMemory - Math.min(CueUtil.GB4, host.idleMemory); - host.idleCores = host.idleCores - Math.min(100, host.idleCores); - host.idleGpuMemory = 0; - List procs = dispatcher.dispatchHost(host, job); - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchGpuHostToGroup() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - GroupDetail group = groupManager.getGroupDetail(job); - - List procs = dispatcher.dispatchHost(host, group); - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchGpuHostToShowNoPrefer() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - ShowEntity show = adminManager.findShowEntity("edu"); - - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchRemovedGpuHostToShowPrefer() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - ShowEntity show = adminManager.findShowEntity("edu"); - - List procs = dispatcher.dispatchHost(host, show); - assertEquals(0, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostRemoveRestoreGpu() { - DispatchHost host = getHost(); - - long idleMemoryOrig = host.idleMemory; - int idleCoresOrig = host.idleCores; - long idleGpuMemoryOrig = host.idleGpuMemory; - int idleGpusOrig = host.idleGpus; - - host.removeGpu(); - assertEquals(0, host.idleGpuMemory); - assertEquals(0, host.idleGpus); - assertEquals(idleMemoryOrig - CueUtil.GB4, host.idleMemory); - assertEquals(idleCoresOrig - 100, host.idleCores); - - host.restoreGpu(); - assertEquals(idleMemoryOrig, host.idleMemory); - assertEquals(idleCoresOrig, host.idleCores); - assertEquals(idleGpuMemoryOrig, host.idleGpuMemory); - assertEquals(idleGpusOrig, host.idleGpus); - } - - @Test - @Transactional - @Rollback(true) - public void dispatchProcToJob() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - - host.idleGpuMemory = 0; - List procs = dispatcher.dispatchHost(host, job); - VirtualProc proc = procs.get(0); - dispatcher.dispatchProcToJob(proc, job); - } + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + HostManager hostManager; + + @Resource + AdminManager adminManager; + + @Resource + GroupManager groupManager; + + @Resource + Dispatcher dispatcher; + + @Resource + DispatchSupport dispatchSupport; + + @Resource + FrameDao frameDao; + + private static final String HOSTNAME = "beta"; + + private static final String JOBNAME = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; + + private static final String TARGET_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; + + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + } + + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem((int) CueUtil.GB8).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB8) + .setTotalSwap((int) CueUtil.GB2).setNimbyEnabled(false).setNumProcs(1) + .setCoresPerProc(200).addTags("test").setState(HardwareState.UP).setFacility("spi") + .putAttributes("SP_OS", "Linux").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } + + public JobDetail getJob() { + return jobManager.findJobDetail(JOBNAME); + } + + public JobDetail getTargetJob() { + return jobManager.findJobDetail(TARGET_JOB); + } + + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHost() { + DispatchHost host = getHost(); + + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuRemovedHostToNonGpuJob() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + + host.idleMemory = host.idleMemory - Math.min(CueUtil.GB4, host.idleMemory); + host.idleCores = host.idleCores - Math.min(100, host.idleCores); + host.idleGpuMemory = 0; + List procs = dispatcher.dispatchHost(host, job); + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuHostToGroup() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + GroupDetail group = groupManager.getGroupDetail(job); + + List procs = dispatcher.dispatchHost(host, group); + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuHostToShowNoPrefer() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + ShowEntity show = adminManager.findShowEntity("edu"); + + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchRemovedGpuHostToShowPrefer() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + ShowEntity show = adminManager.findShowEntity("edu"); + + List procs = dispatcher.dispatchHost(host, show); + assertEquals(0, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostRemoveRestoreGpu() { + DispatchHost host = getHost(); + + long idleMemoryOrig = host.idleMemory; + int idleCoresOrig = host.idleCores; + long idleGpuMemoryOrig = host.idleGpuMemory; + int idleGpusOrig = host.idleGpus; + + host.removeGpu(); + assertEquals(0, host.idleGpuMemory); + assertEquals(0, host.idleGpus); + assertEquals(idleMemoryOrig - CueUtil.GB4, host.idleMemory); + assertEquals(idleCoresOrig - 100, host.idleCores); + + host.restoreGpu(); + assertEquals(idleMemoryOrig, host.idleMemory); + assertEquals(idleCoresOrig, host.idleCores); + assertEquals(idleGpuMemoryOrig, host.idleGpuMemory); + assertEquals(idleGpusOrig, host.idleGpus); + } + + @Test + @Transactional + @Rollback(true) + public void dispatchProcToJob() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + + host.idleGpuMemory = 0; + List procs = dispatcher.dispatchHost(host, job); + VirtualProc proc = procs.get(0); + dispatcher.dispatchProcToJob(proc, job); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpusJobTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpusJobTests.java index b1a86da99..8de96e05b 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpusJobTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpusJobTests.java @@ -53,197 +53,199 @@ @ContextConfiguration public class CoreUnitDispatcherGpusJobTests extends TransactionalTest { - @Resource - JobManager jobManager; + @Resource + JobManager jobManager; - @Resource - JobLauncher jobLauncher; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - Dispatcher dispatcher; - - @Resource - DispatchSupport dispatchSupport; - - @Resource - LayerDao layerDao; - - @Resource - FrameDao frameDao; - - @Resource - FrameSearchFactory frameSearchFactory; - - @Resource - DependManager dependManager; - - private static final String HOSTNAME = "beta"; - - private static final String CPU_JOB = "pipe-default-testuser_test_cpu"; - - private static final String GPU_JOB = "pipe-default-testuser_test_gpu"; - - private static final String GPU_OVERBOOK_JOB = "pipe-default-testuser_test_gpu_overbook"; - - @Before - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_gpus_test.xml")); - } - - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem((int) CueUtil.GB8).setFreeSwap(20760).setLoad(0) - .setTotalMcp(CueUtil.GB4).setTotalMem(CueUtil.GB8).setTotalSwap(CueUtil.GB2) - .setNimbyEnabled(false).setNumProcs(40).setCoresPerProc(100).addTags("test") - .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux").setNumGpus(8) - .setFreeGpuMem(CueUtil.GB32).setTotalGpuMem(CueUtil.GB32).build(); - - hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); - } - - public DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHost() { - DispatchHost host = getHost(); - - List procs = dispatcher.dispatchHost(host); - // All jobs are paused. procs should be empty. - assertTrue(procs.isEmpty()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchCpuJob() { - JobDetail job = jobManager.findJobDetail(CPU_JOB); - jobManager.setJobPaused(job, false); - - DispatchHost host = getHost(); - List procs = dispatcher.dispatchHost(host, job); - // Cuebot doesn't dispatch non-GPU job to GPU host. procs should be empty. - assertTrue(procs.isEmpty()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchGpuJob() { - JobDetail job = jobManager.findJobDetail(GPU_JOB); - jobManager.setJobPaused(job, false); - - DispatchHost host = getHost(); - List procs = dispatcher.dispatchHost(host, job); - - /* - * The job contains 4 layers. - test_gpus_0_layer gpus=0 gpu_memory=1 - test_gpu_memory_0_layer - * gpus=1 gpu_memory=0 - test_gpus_1_layer gpus=1 gpu_memory=1 - test_gpus_4_kayer gpus=4 - * gpu_memory=7g - * - * Cuebot doesn't dispatch test_gpu_memory_0_layer because gpu_memory is 0. Also - * job_frame_dispatch_max is 2, the procs should be test_gpus_0_layer and test_gpus_1_layer. - */ - assertEquals(2, procs.size()); - - VirtualProc proc0 = procs.get(0); - LayerDetail layer0 = layerDao.findLayerDetail(job, "test_gpus_0_layer"); - assertEquals(layer0.id, proc0.layerId); - assertEquals(100, proc0.coresReserved); - assertEquals(3355443, proc0.memoryReserved); - assertEquals(0, proc0.gpusReserved); - assertEquals(1048576, proc0.gpuMemoryReserved); - - VirtualProc proc1 = procs.get(1); - LayerDetail layer1 = layerDao.findLayerDetail(job, "test_gpus_1_layer"); - assertEquals(layer1.id, proc1.layerId); - assertEquals(100, proc1.coresReserved); - assertEquals(3355443, proc1.memoryReserved); - assertEquals(1, proc1.gpusReserved); - assertEquals(1048576, proc0.gpuMemoryReserved); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchGpuJobWithDependency() { - JobDetail job = jobManager.findJobDetail(GPU_JOB); - LayerDetail dl0 = layerDao.findLayerDetail(job, "test_gpus_0_layer"); - LayerDetail dl1 = layerDao.findLayerDetail(job, "test_gpu_memory_0_layer"); - LayerOnLayer depend = new LayerOnLayer(dl0, dl1); - dependManager.createDepend(depend); - jobManager.setJobPaused(job, false); - - DispatchHost host = getHost(); - List procs = dispatcher.dispatchHost(host, job); - - /* - * The job contains 4 layers. - test_gpus_0_layer gpus=0 gpu_memory=1 - test_gpu_memory_0_layer - * gpus=1 gpu_memory=0 - test_gpus_1_layer gpus=1 gpu_memory=1 - test_gpus_4_kayer gpus=4 - * gpu_memory=7g - * - * Cuebot doesn't dispatch test_gpu_memory_0_layer because gpu_memory is 0. And - * test_gpus_0_layer depends on test_gpu_memory_0_layer. So the procs should be - * test_gpus_1_layer and test_gpus_4_layer. - */ - assertEquals(2, procs.size()); - - VirtualProc proc0 = procs.get(0); - LayerDetail layer0 = layerDao.findLayerDetail(job, "test_gpus_1_layer"); - assertEquals(layer0.id, proc0.layerId); - assertEquals(100, proc0.coresReserved); - assertEquals(3355443, proc0.memoryReserved); - assertEquals(1, proc0.gpusReserved); - assertEquals(1048576, proc0.gpuMemoryReserved); - - VirtualProc proc1 = procs.get(1); - LayerDetail layer1 = layerDao.findLayerDetail(job, "test_gpus_4_layer"); - assertEquals(layer1.id, proc1.layerId); - assertEquals(100, proc1.coresReserved); - assertEquals(3355443, proc1.memoryReserved); - assertEquals(4, proc1.gpusReserved); - assertEquals(7340032, proc1.gpuMemoryReserved); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchGpuOverbookJob() { - JobDetail job = jobManager.findJobDetail(GPU_OVERBOOK_JOB); - jobManager.setJobPaused(job, false); - - DispatchHost host = getHost(); - List procs = dispatcher.dispatchHost(host, job); - - /* - * The job contains 2 layers. - test_gpus_6_layer gpus=6 gpu_memory=1 - test_gpus_3_layer gpus=3 - * gpu_memory=1 the procs should be only test_gpus_6_layer since host only has 8 GPUs. - */ - assertEquals(1, procs.size()); - - VirtualProc proc0 = procs.get(0); - LayerDetail layer0 = layerDao.findLayerDetail(job, "test_gpus_6_layer"); - assertEquals(layer0.id, proc0.layerId); - assertEquals(100, proc0.coresReserved); - assertEquals(3355443, proc0.memoryReserved); - assertEquals(6, proc0.gpusReserved); - assertEquals(1048576, proc0.gpuMemoryReserved); - } + @Resource + JobLauncher jobLauncher; + + @Resource + HostManager hostManager; + + @Resource + AdminManager adminManager; + + @Resource + Dispatcher dispatcher; + + @Resource + DispatchSupport dispatchSupport; + + @Resource + LayerDao layerDao; + + @Resource + FrameDao frameDao; + + @Resource + FrameSearchFactory frameSearchFactory; + + @Resource + DependManager dependManager; + + private static final String HOSTNAME = "beta"; + + private static final String CPU_JOB = "pipe-default-testuser_test_cpu"; + + private static final String GPU_JOB = "pipe-default-testuser_test_gpu"; + + private static final String GPU_OVERBOOK_JOB = "pipe-default-testuser_test_gpu_overbook"; + + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher + .launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_gpus_test.xml")); + } + + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem((int) CueUtil.GB8).setFreeSwap(20760).setLoad(0) + .setTotalMcp(CueUtil.GB4).setTotalMem(CueUtil.GB8).setTotalSwap(CueUtil.GB2) + .setNimbyEnabled(false).setNumProcs(40).setCoresPerProc(100).addTags("test") + .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux") + .setNumGpus(8).setFreeGpuMem(CueUtil.GB32).setTotalGpuMem(CueUtil.GB32).build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } + + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHost() { + DispatchHost host = getHost(); + + List procs = dispatcher.dispatchHost(host); + // All jobs are paused. procs should be empty. + assertTrue(procs.isEmpty()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchCpuJob() { + JobDetail job = jobManager.findJobDetail(CPU_JOB); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(); + List procs = dispatcher.dispatchHost(host, job); + // Cuebot doesn't dispatch non-GPU job to GPU host. procs should be empty. + assertTrue(procs.isEmpty()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuJob() { + JobDetail job = jobManager.findJobDetail(GPU_JOB); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(); + List procs = dispatcher.dispatchHost(host, job); + + /* + * The job contains 4 layers. - test_gpus_0_layer gpus=0 gpu_memory=1 - + * test_gpu_memory_0_layer gpus=1 gpu_memory=0 - test_gpus_1_layer gpus=1 gpu_memory=1 - + * test_gpus_4_kayer gpus=4 gpu_memory=7g + * + * Cuebot doesn't dispatch test_gpu_memory_0_layer because gpu_memory is 0. Also + * job_frame_dispatch_max is 2, the procs should be test_gpus_0_layer and test_gpus_1_layer. + */ + assertEquals(2, procs.size()); + + VirtualProc proc0 = procs.get(0); + LayerDetail layer0 = layerDao.findLayerDetail(job, "test_gpus_0_layer"); + assertEquals(layer0.id, proc0.layerId); + assertEquals(100, proc0.coresReserved); + assertEquals(3355443, proc0.memoryReserved); + assertEquals(0, proc0.gpusReserved); + assertEquals(1048576, proc0.gpuMemoryReserved); + + VirtualProc proc1 = procs.get(1); + LayerDetail layer1 = layerDao.findLayerDetail(job, "test_gpus_1_layer"); + assertEquals(layer1.id, proc1.layerId); + assertEquals(100, proc1.coresReserved); + assertEquals(3355443, proc1.memoryReserved); + assertEquals(1, proc1.gpusReserved); + assertEquals(1048576, proc0.gpuMemoryReserved); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuJobWithDependency() { + JobDetail job = jobManager.findJobDetail(GPU_JOB); + LayerDetail dl0 = layerDao.findLayerDetail(job, "test_gpus_0_layer"); + LayerDetail dl1 = layerDao.findLayerDetail(job, "test_gpu_memory_0_layer"); + LayerOnLayer depend = new LayerOnLayer(dl0, dl1); + dependManager.createDepend(depend); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(); + List procs = dispatcher.dispatchHost(host, job); + + /* + * The job contains 4 layers. - test_gpus_0_layer gpus=0 gpu_memory=1 - + * test_gpu_memory_0_layer gpus=1 gpu_memory=0 - test_gpus_1_layer gpus=1 gpu_memory=1 - + * test_gpus_4_kayer gpus=4 gpu_memory=7g + * + * Cuebot doesn't dispatch test_gpu_memory_0_layer because gpu_memory is 0. And + * test_gpus_0_layer depends on test_gpu_memory_0_layer. So the procs should be + * test_gpus_1_layer and test_gpus_4_layer. + */ + assertEquals(2, procs.size()); + + VirtualProc proc0 = procs.get(0); + LayerDetail layer0 = layerDao.findLayerDetail(job, "test_gpus_1_layer"); + assertEquals(layer0.id, proc0.layerId); + assertEquals(100, proc0.coresReserved); + assertEquals(3355443, proc0.memoryReserved); + assertEquals(1, proc0.gpusReserved); + assertEquals(1048576, proc0.gpuMemoryReserved); + + VirtualProc proc1 = procs.get(1); + LayerDetail layer1 = layerDao.findLayerDetail(job, "test_gpus_4_layer"); + assertEquals(layer1.id, proc1.layerId); + assertEquals(100, proc1.coresReserved); + assertEquals(3355443, proc1.memoryReserved); + assertEquals(4, proc1.gpusReserved); + assertEquals(7340032, proc1.gpuMemoryReserved); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuOverbookJob() { + JobDetail job = jobManager.findJobDetail(GPU_OVERBOOK_JOB); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(); + List procs = dispatcher.dispatchHost(host, job); + + /* + * The job contains 2 layers. - test_gpus_6_layer gpus=6 gpu_memory=1 - test_gpus_3_layer + * gpus=3 gpu_memory=1 the procs should be only test_gpus_6_layer since host only has 8 + * GPUs. + */ + assertEquals(1, procs.size()); + + VirtualProc proc0 = procs.get(0); + LayerDetail layer0 = layerDao.findLayerDetail(job, "test_gpus_6_layer"); + assertEquals(layer0.id, proc0.layerId); + assertEquals(100, proc0.coresReserved); + assertEquals(3355443, proc0.memoryReserved); + assertEquals(6, proc0.gpusReserved); + assertEquals(1048576, proc0.gpuMemoryReserved); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherTests.java index 9b9ddda3e..2a6e9e302 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherTests.java @@ -48,150 +48,151 @@ @ContextConfiguration public class CoreUnitDispatcherTests extends TransactionalTest { - @Resource - JobManager jobManager; + @Resource + JobManager jobManager; - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; + + @Resource + HostManager hostManager; + + @Resource + AdminManager adminManager; - @Resource - HostManager hostManager; + @Resource + GroupManager groupManager; - @Resource - AdminManager adminManager; - - @Resource - GroupManager groupManager; - - @Resource - Dispatcher dispatcher; - - @Resource - DispatchSupport dispatchSupport; - - @Resource - FrameDao frameDao; - - private static final String HOSTNAME = "beta"; - - private static final String JOBNAME = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; - - private static final String TARGET_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; - - @Before - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - } - - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) - .setNumProcs(1).setCoresPerProc(100).addTags("test").setState(HardwareState.UP) - .setFacility("spi").putAttributes("SP_OS", "Linux").build(); - - hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); - } - - public JobDetail getJob() { - return jobManager.findJobDetail(JOBNAME); - } - - public JobDetail getTargetJob() { - return jobManager.findJobDetail(TARGET_JOB); - } - - public DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHost() { - DispatchHost host = getHost(); - - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testdispatchHostToAllShows() { - DispatchHost host = getHost(); - - List procs = dispatcher.dispatchHostToAllShows(host); - // The first show is removed. findDispatchJobs: shows.remove(0). - assertEquals(0, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostToJob() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - - List procs = dispatcher.dispatchHost(host, job); - - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostToGroup() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - GroupDetail group = groupManager.getGroupDetail(job); - - List procs = dispatcher.dispatchHost(host, group); - assertEquals(1, procs.size()); - - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostToShowNoPrefer() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - ShowEntity show = adminManager.findShowEntity("edu"); - - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostToShowPrefer() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - ShowEntity show = adminManager.findShowEntity("edu"); - - List procs = dispatcher.dispatchHost(host, show); - assertEquals(0, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchProcToJob() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - - List procs = dispatcher.dispatchHost(host, job); - VirtualProc proc = procs.get(0); - dispatcher.dispatchProcToJob(proc, job); - } + @Resource + Dispatcher dispatcher; + + @Resource + DispatchSupport dispatchSupport; + + @Resource + FrameDao frameDao; + + private static final String HOSTNAME = "beta"; + + private static final String JOBNAME = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; + + private static final String TARGET_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; + + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + } + + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960) + .setNimbyEnabled(false).setNumProcs(1).setCoresPerProc(100).addTags("test") + .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux") + .build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } + + public JobDetail getJob() { + return jobManager.findJobDetail(JOBNAME); + } + + public JobDetail getTargetJob() { + return jobManager.findJobDetail(TARGET_JOB); + } + + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHost() { + DispatchHost host = getHost(); + + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testdispatchHostToAllShows() { + DispatchHost host = getHost(); + + List procs = dispatcher.dispatchHostToAllShows(host); + // The first show is removed. findDispatchJobs: shows.remove(0). + assertEquals(0, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostToJob() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + + List procs = dispatcher.dispatchHost(host, job); + + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostToGroup() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + GroupDetail group = groupManager.getGroupDetail(job); + + List procs = dispatcher.dispatchHost(host, group); + assertEquals(1, procs.size()); + + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostToShowNoPrefer() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + ShowEntity show = adminManager.findShowEntity("edu"); + + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostToShowPrefer() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + ShowEntity show = adminManager.findShowEntity("edu"); + + List procs = dispatcher.dispatchHost(host, show); + assertEquals(0, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchProcToJob() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + + List procs = dispatcher.dispatchHost(host, job); + VirtualProc proc = procs.get(0); + dispatcher.dispatchProcToJob(proc, job); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/DispatchSupportTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/DispatchSupportTests.java index 34d9eaabc..3e94b4ecd 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/DispatchSupportTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/DispatchSupportTests.java @@ -44,102 +44,102 @@ @ContextConfiguration public class DispatchSupportTests extends TransactionalTest { - @Resource - JobManager jobManager; + @Resource + JobManager jobManager; - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - HostManager hostManager; + @Resource + HostManager hostManager; - @Resource - AdminManager adminManager; + @Resource + AdminManager adminManager; - @Resource - GroupManager groupManager; + @Resource + GroupManager groupManager; - @Resource - Dispatcher dispatcher; + @Resource + Dispatcher dispatcher; - @Resource - DispatchSupport dispatchSupport; + @Resource + DispatchSupport dispatchSupport; - @Resource - FrameDao frameDao; + @Resource + FrameDao frameDao; - private static final String HOSTNAME = "beta"; + private static final String HOSTNAME = "beta"; - private static final String JOBNAME = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; + private static final String JOBNAME = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; - private static final String TARGET_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; + private static final String TARGET_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; - @Before - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - } + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + } - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(0) - .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) - .setNumProcs(2).setCoresPerProc(400).addTags("test").setState(HardwareState.UP) - .setFacility("spi").putAttributes("SP_OS", "Linux").setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512).build(); + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(0) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960) + .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(400).addTags("test") + .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux") + .setFreeGpuMem((int) CueUtil.MB512).setTotalGpuMem((int) CueUtil.MB512).build(); - hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); - } + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } - public JobDetail getJob() { - return jobManager.findJobDetail(JOBNAME); - } + public JobDetail getJob() { + return jobManager.findJobDetail(JOBNAME); + } - public JobDetail getTargetJob() { - return jobManager.findJobDetail(TARGET_JOB); - } + public JobDetail getTargetJob() { + return jobManager.findJobDetail(TARGET_JOB); + } - public DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } - @Test - @Transactional - @Rollback(true) - public void testDetermineIdleCores() { - DispatchHost host = getHost(); + @Test + @Transactional + @Rollback(true) + public void testDetermineIdleCores() { + DispatchHost host = getHost(); - int grace_load = Dispatcher.CORE_LOAD_THRESHOLD * (host.cores / 100); + int grace_load = Dispatcher.CORE_LOAD_THRESHOLD * (host.cores / 100); - // Machine is idle, no load. - dispatchSupport.determineIdleCores(host, 0); - assertEquals(800, host.idleCores); + // Machine is idle, no load. + dispatchSupport.determineIdleCores(host, 0); + assertEquals(800, host.idleCores); - // Machine is idle but shows load of 200. - host.idleCores = 800; - dispatchSupport.determineIdleCores(host, 200); - assertEquals(grace_load + 600, host.idleCores); + // Machine is idle but shows load of 200. + host.idleCores = 800; + dispatchSupport.determineIdleCores(host, 200); + assertEquals(grace_load + 600, host.idleCores); - // Machine is idle but has the grace load. - host.idleCores = 800; - dispatchSupport.determineIdleCores(host, grace_load); - assertEquals(800, host.idleCores); + // Machine is idle but has the grace load. + host.idleCores = 800; + dispatchSupport.determineIdleCores(host, grace_load); + assertEquals(800, host.idleCores); - // Machine has 100 units idle, grace_load -1 - host.idleCores = 100; - dispatchSupport.determineIdleCores(host, 700 + grace_load - 1); - assertEquals(100, host.idleCores); + // Machine has 100 units idle, grace_load -1 + host.idleCores = 100; + dispatchSupport.determineIdleCores(host, 700 + grace_load - 1); + assertEquals(100, host.idleCores); - // Machine has 100 units idle, grace_load + 1 - host.idleCores = 100; - dispatchSupport.determineIdleCores(host, 700 + grace_load + 1); - assertEquals(99, host.idleCores); - } + // Machine has 100 units idle, grace_load + 1 + host.idleCores = 100; + dispatchSupport.determineIdleCores(host, 700 + grace_load + 1); + assertEquals(99, host.idleCores); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/FrameCompleteHandlerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/FrameCompleteHandlerTests.java index 04dee5927..dfe0fb9ab 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/FrameCompleteHandlerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/FrameCompleteHandlerTests.java @@ -60,362 +60,364 @@ @ContextConfiguration public class FrameCompleteHandlerTests extends TransactionalTest { - @Resource - AdminManager adminManager; - - @Resource - FrameCompleteHandler frameCompleteHandler; - - @Resource - HostManager hostManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - JobManager jobManager; - - @Resource - FrameDao frameDao; - - @Resource - LayerDao layerDao; - - @Resource - Dispatcher dispatcher; - - @Resource - DispatchSupport dispatchSupport; - - @Resource - ServiceManager serviceManager; - - private static final String HOSTNAME = "beta"; - private static final String HOSTNAME2 = "zeta"; - - @Before - public void setTestMode() { - - dispatcher.setTestMode(true); - } - - @Before - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_gpus_test.xml")); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem((int) CueUtil.GB8).setFreeSwap(20760).setLoad(0) - .setTotalMcp(CueUtil.GB4).setTotalMem(CueUtil.GB8).setTotalSwap(CueUtil.GB2) - .setNimbyEnabled(false).setNumProcs(40).setCoresPerProc(100).setState(HardwareState.UP) - .setFacility("spi").putAttributes("SP_OS", "Linux").setNumGpus(8) - .setFreeGpuMem(CueUtil.GB16 * 8).setTotalGpuMem(CueUtil.GB16 * 8).build(); - - hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); - - RenderHost host2 = RenderHost.newBuilder().setName(HOSTNAME2).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem((int) CueUtil.GB4).setFreeSwap((int) CueUtil.GB4) - .setLoad(0).setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB8) - .setTotalSwap((int) CueUtil.GB8).setNimbyEnabled(false).setNumProcs(8).setCoresPerProc(100) - .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux").build(); - - hostManager.createHost(host2, adminManager.findAllocationDetail("spi", "general")); - } - - public DispatchHost getHost(String hostname) { - return hostManager.findDispatchHost(hostname); - } - - @Test - @Transactional - @Rollback(true) - public void testGpuReport() { - JobDetail job = jobManager.findJobDetail("pipe-default-testuser_test0"); - LayerDetail layer = layerDao.findLayerDetail(job, "layer0"); - jobManager.setJobPaused(job, false); - - DispatchHost host = getHost(HOSTNAME); - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); - - assertEquals(7, host.idleGpus); - assertEquals(CueUtil.GB16 * 8 - CueUtil.GB, host.idleGpuMemory); - - RunningFrameInfo info = - RunningFrameInfo.newBuilder().setJobId(proc.getJobId()).setLayerId(proc.getLayerId()) - .setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()).build(); - FrameCompleteReport report = - FrameCompleteReport.newBuilder().setFrame(info).setExitStatus(0).build(); - frameCompleteHandler.handleFrameCompleteReport(report); - - assertTrue(jobManager.isLayerComplete(layer)); - assertTrue(jobManager.isJobComplete(job)); - } - - @Test - @Transactional - @Rollback(true) - public void testGpuReportMultiple() { - JobDetail job0 = jobManager.findJobDetail("pipe-default-testuser_test0"); - LayerDetail layer0_0 = layerDao.findLayerDetail(job0, "layer0"); - jobManager.setJobPaused(job0, false); - - JobDetail job1 = jobManager.findJobDetail("pipe-default-testuser_test1"); - LayerDetail layer1_0 = layerDao.findLayerDetail(job1, "layer0"); - jobManager.setJobPaused(job1, false); - - DispatchHost host = getHost(HOSTNAME); - List procs = dispatcher.dispatchHost(host); - assertEquals(2, procs.size()); - - assertEquals(4, host.idleGpus); - assertEquals(CueUtil.GB16 * 8 - CueUtil.GB2, host.idleGpuMemory); - - for (VirtualProc proc : procs) { - RunningFrameInfo info = - RunningFrameInfo.newBuilder().setJobId(proc.getJobId()).setLayerId(proc.getLayerId()) - .setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()).build(); - FrameCompleteReport report = - FrameCompleteReport.newBuilder().setFrame(info).setExitStatus(0).build(); - frameCompleteHandler.handleFrameCompleteReport(report); + @Resource + AdminManager adminManager; + + @Resource + FrameCompleteHandler frameCompleteHandler; + + @Resource + HostManager hostManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + JobManager jobManager; + + @Resource + FrameDao frameDao; + + @Resource + LayerDao layerDao; + + @Resource + Dispatcher dispatcher; + + @Resource + DispatchSupport dispatchSupport; + + @Resource + ServiceManager serviceManager; + + private static final String HOSTNAME = "beta"; + private static final String HOSTNAME2 = "zeta"; + + @Before + public void setTestMode() { + + dispatcher.setTestMode(true); + } + + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_gpus_test.xml")); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem((int) CueUtil.GB8).setFreeSwap(20760).setLoad(0) + .setTotalMcp(CueUtil.GB4).setTotalMem(CueUtil.GB8).setTotalSwap(CueUtil.GB2) + .setNimbyEnabled(false).setNumProcs(40).setCoresPerProc(100) + .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux") + .setNumGpus(8).setFreeGpuMem(CueUtil.GB16 * 8).setTotalGpuMem(CueUtil.GB16 * 8) + .build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + + RenderHost host2 = RenderHost.newBuilder().setName(HOSTNAME2).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem((int) CueUtil.GB4).setFreeSwap((int) CueUtil.GB4) + .setLoad(0).setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB8) + .setTotalSwap((int) CueUtil.GB8).setNimbyEnabled(false).setNumProcs(8) + .setCoresPerProc(100).setState(HardwareState.UP).setFacility("spi") + .putAttributes("SP_OS", "Linux").build(); + + hostManager.createHost(host2, adminManager.findAllocationDetail("spi", "general")); + } + + public DispatchHost getHost(String hostname) { + return hostManager.findDispatchHost(hostname); } - assertTrue(jobManager.isLayerComplete(layer0_0)); - assertTrue(jobManager.isJobComplete(job0)); - assertTrue(jobManager.isLayerComplete(layer1_0)); - assertTrue(jobManager.isJobComplete(job1)); - } - - @Test - @Transactional - @Rollback(true) - public void testGpuReportOver() { - JobDetail job1 = jobManager.findJobDetail("pipe-default-testuser_test1"); - LayerDetail layer1_0 = layerDao.findLayerDetail(job1, "layer0"); - jobManager.setJobPaused(job1, false); - - JobDetail job2 = jobManager.findJobDetail("pipe-default-testuser_test2"); - LayerDetail layer2_0 = layerDao.findLayerDetail(job2, "layer0"); - jobManager.setJobPaused(job2, false); - - DispatchHost host = getHost(HOSTNAME); - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - - assertTrue(host.idleGpus == 5 || host.idleGpus == 2); - assertEquals(CueUtil.GB16 * 8 - CueUtil.GB, host.idleGpuMemory); - - for (VirtualProc proc : procs) { - RunningFrameInfo info = - RunningFrameInfo.newBuilder().setJobId(proc.getJobId()).setLayerId(proc.getLayerId()) - .setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()).build(); - FrameCompleteReport report = - FrameCompleteReport.newBuilder().setFrame(info).setExitStatus(0).build(); - frameCompleteHandler.handleFrameCompleteReport(report); + @Test + @Transactional + @Rollback(true) + public void testGpuReport() { + JobDetail job = jobManager.findJobDetail("pipe-default-testuser_test0"); + LayerDetail layer = layerDao.findLayerDetail(job, "layer0"); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(HOSTNAME); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + + assertEquals(7, host.idleGpus); + assertEquals(CueUtil.GB16 * 8 - CueUtil.GB, host.idleGpuMemory); + + RunningFrameInfo info = RunningFrameInfo.newBuilder().setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()).setFrameId(proc.getFrameId()) + .setResourceId(proc.getProcId()).build(); + FrameCompleteReport report = + FrameCompleteReport.newBuilder().setFrame(info).setExitStatus(0).build(); + frameCompleteHandler.handleFrameCompleteReport(report); + + assertTrue(jobManager.isLayerComplete(layer)); + assertTrue(jobManager.isJobComplete(job)); } - assertEquals(1, (jobManager.isLayerComplete(layer1_0) ? 1 : 0) - + (jobManager.isLayerComplete(layer2_0) ? 1 : 0)); - assertEquals(1, - (jobManager.isJobComplete(job1) ? 1 : 0) + (jobManager.isJobComplete(job2) ? 1 : 0)); - } - - private void executeDepend(FrameState frameState, int exitStatus, int dependCount, - FrameState dependState) { - JobDetail job = jobManager.findJobDetail("pipe-default-testuser_test_depend"); - LayerDetail layerFirst = layerDao.findLayerDetail(job, "layer_first"); - LayerDetail layerSecond = layerDao.findLayerDetail(job, "layer_second"); - FrameDetail frameFirst = frameDao.findFrameDetail(job, "0000-layer_first"); - FrameDetail frameSecond = frameDao.findFrameDetail(job, "0000-layer_second"); - - assertEquals(1, frameSecond.dependCount); - assertEquals(FrameState.DEPEND, frameSecond.state); - - jobManager.setJobPaused(job, false); - - DispatchHost host = getHost(HOSTNAME); - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); - assertEquals(job.getId(), proc.getJobId()); - assertEquals(layerFirst.getId(), proc.getLayerId()); - assertEquals(frameFirst.getId(), proc.getFrameId()); - - RunningFrameInfo info = - RunningFrameInfo.newBuilder().setJobId(proc.getJobId()).setLayerId(proc.getLayerId()) - .setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()).build(); - FrameCompleteReport report = - FrameCompleteReport.newBuilder().setFrame(info).setExitStatus(exitStatus).build(); - - DispatchJob dispatchJob = jobManager.getDispatchJob(proc.getJobId()); - DispatchFrame dispatchFrame = jobManager.getDispatchFrame(report.getFrame().getFrameId()); - FrameDetail frameDetail = jobManager.getFrameDetail(report.getFrame().getFrameId()); - dispatchSupport.stopFrame(dispatchFrame, frameState, report.getExitStatus(), - report.getFrame().getMaxRss()); - frameCompleteHandler.handlePostFrameCompleteOperations(proc, report, dispatchJob, dispatchFrame, - frameState, frameDetail); - - assertTrue(jobManager.isLayerComplete(layerFirst)); - assertFalse(jobManager.isLayerComplete(layerSecond)); - - frameSecond = frameDao.findFrameDetail(job, "0000-layer_second"); - assertEquals(dependCount, frameSecond.dependCount); - assertEquals(dependState, frameSecond.state); - } - - @Test - @Transactional - @Rollback(true) - public void testDependOnSuccess() { - assertTrue(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); - executeDepend(FrameState.SUCCEEDED, 0, 0, FrameState.WAITING); - } - - @Test - @Transactional - @Rollback(true) - public void testDependOnFailure() { - assertTrue(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); - executeDepend(FrameState.EATEN, -1, 1, FrameState.DEPEND); - } - - @Test - @Transactional - @Rollback(true) - public void testDependOnSuccessSatifyOnAny() { - frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(false); - assertFalse(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); - executeDepend(FrameState.SUCCEEDED, 0, 0, FrameState.WAITING); - frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(true); - } - - @Test - @Transactional - @Rollback(true) - public void testDependOnFailureSatisfyOnAny() { - frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(false); - assertFalse(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); - executeDepend(FrameState.EATEN, -1, 0, FrameState.WAITING); - frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(true); - } - - private void executeMinMemIncrease(int expected, boolean override) { - if (override) { - ServiceOverrideEntity soe = new ServiceOverrideEntity(); - soe.showId = "00000000-0000-0000-0000-000000000000"; - soe.name = "apitest"; - soe.threadable = false; - soe.minCores = 10; - soe.minMemory = (int) CueUtil.GB2; - soe.tags = new LinkedHashSet<>(); - soe.tags.add("general"); - soe.minMemoryIncrease = (int) CueUtil.GB8; - - serviceManager.createService(soe); + @Test + @Transactional + @Rollback(true) + public void testGpuReportMultiple() { + JobDetail job0 = jobManager.findJobDetail("pipe-default-testuser_test0"); + LayerDetail layer0_0 = layerDao.findLayerDetail(job0, "layer0"); + jobManager.setJobPaused(job0, false); + + JobDetail job1 = jobManager.findJobDetail("pipe-default-testuser_test1"); + LayerDetail layer1_0 = layerDao.findLayerDetail(job1, "layer0"); + jobManager.setJobPaused(job1, false); + + DispatchHost host = getHost(HOSTNAME); + List procs = dispatcher.dispatchHost(host); + assertEquals(2, procs.size()); + + assertEquals(4, host.idleGpus); + assertEquals(CueUtil.GB16 * 8 - CueUtil.GB2, host.idleGpuMemory); + + for (VirtualProc proc : procs) { + RunningFrameInfo info = RunningFrameInfo.newBuilder().setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()).setFrameId(proc.getFrameId()) + .setResourceId(proc.getProcId()).build(); + FrameCompleteReport report = + FrameCompleteReport.newBuilder().setFrame(info).setExitStatus(0).build(); + frameCompleteHandler.handleFrameCompleteReport(report); + } + + assertTrue(jobManager.isLayerComplete(layer0_0)); + assertTrue(jobManager.isJobComplete(job0)); + assertTrue(jobManager.isLayerComplete(layer1_0)); + assertTrue(jobManager.isJobComplete(job1)); } - String jobName = "pipe-default-testuser_min_mem_test"; - JobDetail job = jobManager.findJobDetail(jobName); - LayerDetail layer = layerDao.findLayerDetail(job, "test_layer"); - FrameDetail frame = frameDao.findFrameDetail(job, "0000-test_layer"); - jobManager.setJobPaused(job, false); - - DispatchHost host = getHost(HOSTNAME2); - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); - assertEquals(job.getId(), proc.getJobId()); - assertEquals(layer.getId(), proc.getLayerId()); - assertEquals(frame.getId(), proc.getFrameId()); - - RunningFrameInfo info = - RunningFrameInfo.newBuilder().setJobId(proc.getJobId()).setLayerId(proc.getLayerId()) - .setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()).build(); - FrameCompleteReport report = FrameCompleteReport.newBuilder().setFrame(info) - .setExitStatus(Dispatcher.EXIT_STATUS_MEMORY_FAILURE).build(); - - DispatchJob dispatchJob = jobManager.getDispatchJob(proc.getJobId()); - DispatchFrame dispatchFrame = jobManager.getDispatchFrame(report.getFrame().getFrameId()); - FrameDetail frameDetail = jobManager.getFrameDetail(report.getFrame().getFrameId()); - dispatchSupport.stopFrame(dispatchFrame, FrameState.DEAD, report.getExitStatus(), - report.getFrame().getMaxRss()); - frameCompleteHandler.handlePostFrameCompleteOperations(proc, report, dispatchJob, dispatchFrame, - FrameState.WAITING, frameDetail); - - assertFalse(jobManager.isLayerComplete(layer)); - - JobDetail ujob = jobManager.findJobDetail(jobName); - LayerDetail ulayer = layerDao.findLayerDetail(ujob, "test_layer"); - assertEquals(expected, ulayer.getMinimumMemory()); - } - - private void executeMinMemIncreaseDocker(int expected, boolean override) { - if (override) { - ServiceOverrideEntity soe = new ServiceOverrideEntity(); - soe.showId = "00000000-0000-0000-0000-000000000000"; - soe.name = "apitest"; - soe.threadable = false; - soe.minCores = 10; - soe.minMemory = (int) CueUtil.GB2; - soe.tags = new LinkedHashSet<>(); - soe.tags.add("general"); - soe.minMemoryIncrease = (int) CueUtil.GB8; - - serviceManager.createService(soe); + @Test + @Transactional + @Rollback(true) + public void testGpuReportOver() { + JobDetail job1 = jobManager.findJobDetail("pipe-default-testuser_test1"); + LayerDetail layer1_0 = layerDao.findLayerDetail(job1, "layer0"); + jobManager.setJobPaused(job1, false); + + JobDetail job2 = jobManager.findJobDetail("pipe-default-testuser_test2"); + LayerDetail layer2_0 = layerDao.findLayerDetail(job2, "layer0"); + jobManager.setJobPaused(job2, false); + + DispatchHost host = getHost(HOSTNAME); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + + assertTrue(host.idleGpus == 5 || host.idleGpus == 2); + assertEquals(CueUtil.GB16 * 8 - CueUtil.GB, host.idleGpuMemory); + + for (VirtualProc proc : procs) { + RunningFrameInfo info = RunningFrameInfo.newBuilder().setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()).setFrameId(proc.getFrameId()) + .setResourceId(proc.getProcId()).build(); + FrameCompleteReport report = + FrameCompleteReport.newBuilder().setFrame(info).setExitStatus(0).build(); + frameCompleteHandler.handleFrameCompleteReport(report); + } + + assertEquals(1, (jobManager.isLayerComplete(layer1_0) ? 1 : 0) + + (jobManager.isLayerComplete(layer2_0) ? 1 : 0)); + assertEquals(1, (jobManager.isJobComplete(job1) ? 1 : 0) + + (jobManager.isJobComplete(job2) ? 1 : 0)); } - String jobName = "pipe-default-testuser_min_mem_test"; - JobDetail job = jobManager.findJobDetail(jobName); - LayerDetail layer = layerDao.findLayerDetail(job, "test_layer"); - FrameDetail frame = frameDao.findFrameDetail(job, "0000-test_layer"); - jobManager.setJobPaused(job, false); - - DispatchHost host = getHost(HOSTNAME2); - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); - assertEquals(job.getId(), proc.getJobId()); - assertEquals(layer.getId(), proc.getLayerId()); - assertEquals(frame.getId(), proc.getFrameId()); - - RunningFrameInfo info = - RunningFrameInfo.newBuilder().setJobId(proc.getJobId()).setLayerId(proc.getLayerId()) - .setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()).build(); - FrameCompleteReport report = FrameCompleteReport.newBuilder().setFrame(info) - .setExitStatus(Dispatcher.DOCKER_EXIT_STATUS_MEMORY_FAILURE).build(); - - DispatchJob dispatchJob = jobManager.getDispatchJob(proc.getJobId()); - DispatchFrame dispatchFrame = jobManager.getDispatchFrame(report.getFrame().getFrameId()); - FrameDetail frameDetail = jobManager.getFrameDetail(report.getFrame().getFrameId()); - dispatchSupport.stopFrame(dispatchFrame, FrameState.DEAD, report.getExitStatus(), - report.getFrame().getMaxRss()); - frameCompleteHandler.handlePostFrameCompleteOperations(proc, report, dispatchJob, dispatchFrame, - FrameState.WAITING, frameDetail); - - assertFalse(jobManager.isLayerComplete(layer)); - - JobDetail ujob = jobManager.findJobDetail(jobName); - LayerDetail ulayer = layerDao.findLayerDetail(ujob, "test_layer"); - assertEquals(expected, ulayer.getMinimumMemory()); - } - - @Test - @Transactional - @Rollback(true) - public void testMinMemIncrease() { - executeMinMemIncrease(6291456, false); - } - - @Test - @Transactional - @Rollback(true) - public void testMinMemIncreaseShowOverride() { - executeMinMemIncrease(10485760, true); - } + private void executeDepend(FrameState frameState, int exitStatus, int dependCount, + FrameState dependState) { + JobDetail job = jobManager.findJobDetail("pipe-default-testuser_test_depend"); + LayerDetail layerFirst = layerDao.findLayerDetail(job, "layer_first"); + LayerDetail layerSecond = layerDao.findLayerDetail(job, "layer_second"); + FrameDetail frameFirst = frameDao.findFrameDetail(job, "0000-layer_first"); + FrameDetail frameSecond = frameDao.findFrameDetail(job, "0000-layer_second"); + + assertEquals(1, frameSecond.dependCount); + assertEquals(FrameState.DEPEND, frameSecond.state); + + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(HOSTNAME); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + assertEquals(job.getId(), proc.getJobId()); + assertEquals(layerFirst.getId(), proc.getLayerId()); + assertEquals(frameFirst.getId(), proc.getFrameId()); + + RunningFrameInfo info = RunningFrameInfo.newBuilder().setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()).setFrameId(proc.getFrameId()) + .setResourceId(proc.getProcId()).build(); + FrameCompleteReport report = + FrameCompleteReport.newBuilder().setFrame(info).setExitStatus(exitStatus).build(); + + DispatchJob dispatchJob = jobManager.getDispatchJob(proc.getJobId()); + DispatchFrame dispatchFrame = jobManager.getDispatchFrame(report.getFrame().getFrameId()); + FrameDetail frameDetail = jobManager.getFrameDetail(report.getFrame().getFrameId()); + dispatchSupport.stopFrame(dispatchFrame, frameState, report.getExitStatus(), + report.getFrame().getMaxRss()); + frameCompleteHandler.handlePostFrameCompleteOperations(proc, report, dispatchJob, + dispatchFrame, frameState, frameDetail); + + assertTrue(jobManager.isLayerComplete(layerFirst)); + assertFalse(jobManager.isLayerComplete(layerSecond)); + + frameSecond = frameDao.findFrameDetail(job, "0000-layer_second"); + assertEquals(dependCount, frameSecond.dependCount); + assertEquals(dependState, frameSecond.state); + } + + @Test + @Transactional + @Rollback(true) + public void testDependOnSuccess() { + assertTrue(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); + executeDepend(FrameState.SUCCEEDED, 0, 0, FrameState.WAITING); + } + + @Test + @Transactional + @Rollback(true) + public void testDependOnFailure() { + assertTrue(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); + executeDepend(FrameState.EATEN, -1, 1, FrameState.DEPEND); + } + + @Test + @Transactional + @Rollback(true) + public void testDependOnSuccessSatifyOnAny() { + frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(false); + assertFalse(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); + executeDepend(FrameState.SUCCEEDED, 0, 0, FrameState.WAITING); + frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(true); + } + + @Test + @Transactional + @Rollback(true) + public void testDependOnFailureSatisfyOnAny() { + frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(false); + assertFalse(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); + executeDepend(FrameState.EATEN, -1, 0, FrameState.WAITING); + frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(true); + } + + private void executeMinMemIncrease(int expected, boolean override) { + if (override) { + ServiceOverrideEntity soe = new ServiceOverrideEntity(); + soe.showId = "00000000-0000-0000-0000-000000000000"; + soe.name = "apitest"; + soe.threadable = false; + soe.minCores = 10; + soe.minMemory = (int) CueUtil.GB2; + soe.tags = new LinkedHashSet<>(); + soe.tags.add("general"); + soe.minMemoryIncrease = (int) CueUtil.GB8; + + serviceManager.createService(soe); + } + + String jobName = "pipe-default-testuser_min_mem_test"; + JobDetail job = jobManager.findJobDetail(jobName); + LayerDetail layer = layerDao.findLayerDetail(job, "test_layer"); + FrameDetail frame = frameDao.findFrameDetail(job, "0000-test_layer"); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(HOSTNAME2); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + assertEquals(job.getId(), proc.getJobId()); + assertEquals(layer.getId(), proc.getLayerId()); + assertEquals(frame.getId(), proc.getFrameId()); + + RunningFrameInfo info = RunningFrameInfo.newBuilder().setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()).setFrameId(proc.getFrameId()) + .setResourceId(proc.getProcId()).build(); + FrameCompleteReport report = FrameCompleteReport.newBuilder().setFrame(info) + .setExitStatus(Dispatcher.EXIT_STATUS_MEMORY_FAILURE).build(); + + DispatchJob dispatchJob = jobManager.getDispatchJob(proc.getJobId()); + DispatchFrame dispatchFrame = jobManager.getDispatchFrame(report.getFrame().getFrameId()); + FrameDetail frameDetail = jobManager.getFrameDetail(report.getFrame().getFrameId()); + dispatchSupport.stopFrame(dispatchFrame, FrameState.DEAD, report.getExitStatus(), + report.getFrame().getMaxRss()); + frameCompleteHandler.handlePostFrameCompleteOperations(proc, report, dispatchJob, + dispatchFrame, FrameState.WAITING, frameDetail); + + assertFalse(jobManager.isLayerComplete(layer)); + + JobDetail ujob = jobManager.findJobDetail(jobName); + LayerDetail ulayer = layerDao.findLayerDetail(ujob, "test_layer"); + assertEquals(expected, ulayer.getMinimumMemory()); + } + + private void executeMinMemIncreaseDocker(int expected, boolean override) { + if (override) { + ServiceOverrideEntity soe = new ServiceOverrideEntity(); + soe.showId = "00000000-0000-0000-0000-000000000000"; + soe.name = "apitest"; + soe.threadable = false; + soe.minCores = 10; + soe.minMemory = (int) CueUtil.GB2; + soe.tags = new LinkedHashSet<>(); + soe.tags.add("general"); + soe.minMemoryIncrease = (int) CueUtil.GB8; + + serviceManager.createService(soe); + } + + String jobName = "pipe-default-testuser_min_mem_test"; + JobDetail job = jobManager.findJobDetail(jobName); + LayerDetail layer = layerDao.findLayerDetail(job, "test_layer"); + FrameDetail frame = frameDao.findFrameDetail(job, "0000-test_layer"); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(HOSTNAME2); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + assertEquals(job.getId(), proc.getJobId()); + assertEquals(layer.getId(), proc.getLayerId()); + assertEquals(frame.getId(), proc.getFrameId()); + + RunningFrameInfo info = RunningFrameInfo.newBuilder().setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()).setFrameId(proc.getFrameId()) + .setResourceId(proc.getProcId()).build(); + FrameCompleteReport report = FrameCompleteReport.newBuilder().setFrame(info) + .setExitStatus(Dispatcher.DOCKER_EXIT_STATUS_MEMORY_FAILURE).build(); + + DispatchJob dispatchJob = jobManager.getDispatchJob(proc.getJobId()); + DispatchFrame dispatchFrame = jobManager.getDispatchFrame(report.getFrame().getFrameId()); + FrameDetail frameDetail = jobManager.getFrameDetail(report.getFrame().getFrameId()); + dispatchSupport.stopFrame(dispatchFrame, FrameState.DEAD, report.getExitStatus(), + report.getFrame().getMaxRss()); + frameCompleteHandler.handlePostFrameCompleteOperations(proc, report, dispatchJob, + dispatchFrame, FrameState.WAITING, frameDetail); + + assertFalse(jobManager.isLayerComplete(layer)); + + JobDetail ujob = jobManager.findJobDetail(jobName); + LayerDetail ulayer = layerDao.findLayerDetail(ujob, "test_layer"); + assertEquals(expected, ulayer.getMinimumMemory()); + } + + @Test + @Transactional + @Rollback(true) + public void testMinMemIncrease() { + executeMinMemIncrease(6291456, false); + } + + @Test + @Transactional + @Rollback(true) + public void testMinMemIncreaseShowOverride() { + executeMinMemIncrease(10485760, true); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HistoryControlTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HistoryControlTests.java index 9eabfb1fb..97e249ef0 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HistoryControlTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HistoryControlTests.java @@ -51,119 +51,120 @@ @ContextConfiguration public class HistoryControlTests extends TransactionalTest { - @Resource - AdminManager adminManager; - - @Resource - FrameCompleteHandler frameCompleteHandler; - - @Resource - HostManager hostManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - JobManager jobManager; - - @Resource - LayerDao layerDao; - - @Resource - Dispatcher dispatcher; - - private static final String HOSTNAME = "beta"; - private static final String DELETE_HISTORY = - "DELETE FROM frame_history; " + "DELETE FROM job_history; "; - private static final String DISABLE_HISTORY = "INSERT INTO " + "config (pk_config,str_key) " - + "VALUES " + "(uuid_generate_v1(),'DISABLE_HISTORY');"; - - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } - - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_gpus_test.xml")); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem((int) CueUtil.GB8).setFreeSwap(20760).setLoad(0) - .setTotalMcp(CueUtil.GB4).setTotalMem(CueUtil.GB8).setTotalSwap(CueUtil.GB2) - .setNimbyEnabled(false).setNumProcs(40).setCoresPerProc(100).setState(HardwareState.UP) - .setFacility("spi").putAttributes("SP_OS", "Linux").setNumGpus(8) - .setFreeGpuMem(CueUtil.GB16 * 8).setTotalGpuMem(CueUtil.GB16 * 8).build(); - - hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); - } - - public DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } - - public void launchAndDeleteJob() { - launchJob(); - - JobDetail job = jobManager.findJobDetail("pipe-default-testuser_test0"); - LayerDetail layer = layerDao.findLayerDetail(job, "layer0"); - jobManager.setJobPaused(job, false); - - DispatchHost host = getHost(); - List procs = dispatcher.dispatchHost(host); - VirtualProc proc = procs.get(0); - - RunningFrameInfo info = - RunningFrameInfo.newBuilder().setJobId(proc.getJobId()).setLayerId(proc.getLayerId()) - .setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()).build(); - FrameCompleteReport report = - FrameCompleteReport.newBuilder().setFrame(info).setExitStatus(0).build(); - frameCompleteHandler.handleFrameCompleteReport(report); - - assertTrue(jobManager.isLayerComplete(layer)); - assertTrue(jobManager.isJobComplete(job)); - - jdbcTemplate.update("DELETE FROM job WHERE pk_job=?", job.getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testEnabled() { - jdbcTemplate.update(DELETE_HISTORY); - assertEquals(Integer.valueOf(0), - jdbcTemplate.queryForObject("SELECT COUNT(*) FROM job_history", Integer.class)); - assertEquals(Integer.valueOf(0), - jdbcTemplate.queryForObject("SELECT COUNT(*) FROM frame_history", Integer.class)); - - launchAndDeleteJob(); - - assertEquals(Integer.valueOf(5), - jdbcTemplate.queryForObject("SELECT COUNT(*) FROM job_history", Integer.class)); - assertEquals(Integer.valueOf(1), - jdbcTemplate.queryForObject("SELECT COUNT(*) FROM frame_history", Integer.class)); - } - - @Test - @Transactional - @Rollback(true) - public void testDisabled() { - jdbcTemplate.update(DELETE_HISTORY); - jdbcTemplate.update(DISABLE_HISTORY); - - assertEquals(Integer.valueOf(0), - jdbcTemplate.queryForObject("SELECT COUNT(*) FROM job_history", Integer.class)); - assertEquals(Integer.valueOf(0), - jdbcTemplate.queryForObject("SELECT COUNT(*) FROM frame_history", Integer.class)); - - launchAndDeleteJob(); - - assertEquals(Integer.valueOf(0), - jdbcTemplate.queryForObject("SELECT COUNT(*) FROM job_history", Integer.class)); - assertEquals(Integer.valueOf(0), - jdbcTemplate.queryForObject("SELECT COUNT(*) FROM frame_history", Integer.class)); - } + @Resource + AdminManager adminManager; + + @Resource + FrameCompleteHandler frameCompleteHandler; + + @Resource + HostManager hostManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + JobManager jobManager; + + @Resource + LayerDao layerDao; + + @Resource + Dispatcher dispatcher; + + private static final String HOSTNAME = "beta"; + private static final String DELETE_HISTORY = + "DELETE FROM frame_history; " + "DELETE FROM job_history; "; + private static final String DISABLE_HISTORY = "INSERT INTO " + "config (pk_config,str_key) " + + "VALUES " + "(uuid_generate_v1(),'DISABLE_HISTORY');"; + + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } + + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_gpus_test.xml")); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem((int) CueUtil.GB8).setFreeSwap(20760).setLoad(0) + .setTotalMcp(CueUtil.GB4).setTotalMem(CueUtil.GB8).setTotalSwap(CueUtil.GB2) + .setNimbyEnabled(false).setNumProcs(40).setCoresPerProc(100) + .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux") + .setNumGpus(8).setFreeGpuMem(CueUtil.GB16 * 8).setTotalGpuMem(CueUtil.GB16 * 8) + .build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } + + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } + + public void launchAndDeleteJob() { + launchJob(); + + JobDetail job = jobManager.findJobDetail("pipe-default-testuser_test0"); + LayerDetail layer = layerDao.findLayerDetail(job, "layer0"); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(); + List procs = dispatcher.dispatchHost(host); + VirtualProc proc = procs.get(0); + + RunningFrameInfo info = RunningFrameInfo.newBuilder().setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()).setFrameId(proc.getFrameId()) + .setResourceId(proc.getProcId()).build(); + FrameCompleteReport report = + FrameCompleteReport.newBuilder().setFrame(info).setExitStatus(0).build(); + frameCompleteHandler.handleFrameCompleteReport(report); + + assertTrue(jobManager.isLayerComplete(layer)); + assertTrue(jobManager.isJobComplete(job)); + + jdbcTemplate.update("DELETE FROM job WHERE pk_job=?", job.getId()); + } + + @Test + @Transactional + @Rollback(true) + public void testEnabled() { + jdbcTemplate.update(DELETE_HISTORY); + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject("SELECT COUNT(*) FROM job_history", Integer.class)); + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject("SELECT COUNT(*) FROM frame_history", Integer.class)); + + launchAndDeleteJob(); + + assertEquals(Integer.valueOf(5), + jdbcTemplate.queryForObject("SELECT COUNT(*) FROM job_history", Integer.class)); + assertEquals(Integer.valueOf(1), + jdbcTemplate.queryForObject("SELECT COUNT(*) FROM frame_history", Integer.class)); + } + + @Test + @Transactional + @Rollback(true) + public void testDisabled() { + jdbcTemplate.update(DELETE_HISTORY); + jdbcTemplate.update(DISABLE_HISTORY); + + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject("SELECT COUNT(*) FROM job_history", Integer.class)); + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject("SELECT COUNT(*) FROM frame_history", Integer.class)); + + launchAndDeleteJob(); + + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject("SELECT COUNT(*) FROM job_history", Integer.class)); + assertEquals(Integer.valueOf(0), + jdbcTemplate.queryForObject("SELECT COUNT(*) FROM frame_history", Integer.class)); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerGpuTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerGpuTests.java index f0c1ffe64..93426f756 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerGpuTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerGpuTests.java @@ -41,58 +41,60 @@ @ContextConfiguration public class HostReportHandlerGpuTests extends TransactionalTest { - @Resource - AdminManager adminManager; - - @Resource - HostManager hostManager; - - @Resource - HostReportHandler hostReportHandler; - - @Resource - Dispatcher dispatcher; - - private static final String HOSTNAME = "beta"; - - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } - - private static CoreDetail getCoreDetail(int total, int idle, int booked, int locked) { - return CoreDetail.newBuilder().setTotalCores(total).setIdleCores(idle).setBookedCores(booked) - .setLockedCores(locked).build(); - } - - private DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } - - private static RenderHost getRenderHost() { - return RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(CueUtil.GB8).setFreeSwap(CueUtil.GB2).setLoad(0) - .setTotalMcp(CueUtil.GB4).setTotalMem(CueUtil.GB8).setTotalSwap(CueUtil.GB2) - .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(100).addTags("test") - .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux") - .setNumGpus(64).setFreeGpuMem(1048576L * 2000).setTotalGpuMem(1048576L * 2048).build(); - } - - @Test - @Transactional - @Rollback(true) - public void testHandleHostReport() { - CoreDetail cores = getCoreDetail(200, 200, 0, 0); - HostReport report = HostReport.newBuilder().setHost(getRenderHost()).setCoreInfo(cores).build(); - - hostReportHandler.handleHostReport(report, true); - DispatchHost host = getHost(); - assertEquals(host.lockState, LockState.OPEN); - assertEquals(host.memory, CueUtil.GB8 - 524288); - assertEquals(host.gpus, 64); - assertEquals(host.idleGpus, 64); - assertEquals(host.gpuMemory, 1048576L * 2048); - assertEquals(host.idleGpuMemory, 2147483648L); - } + @Resource + AdminManager adminManager; + + @Resource + HostManager hostManager; + + @Resource + HostReportHandler hostReportHandler; + + @Resource + Dispatcher dispatcher; + + private static final String HOSTNAME = "beta"; + + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } + + private static CoreDetail getCoreDetail(int total, int idle, int booked, int locked) { + return CoreDetail.newBuilder().setTotalCores(total).setIdleCores(idle) + .setBookedCores(booked).setLockedCores(locked).build(); + } + + private DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } + + private static RenderHost getRenderHost() { + return RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(CueUtil.GB8).setFreeSwap(CueUtil.GB2).setLoad(0) + .setTotalMcp(CueUtil.GB4).setTotalMem(CueUtil.GB8).setTotalSwap(CueUtil.GB2) + .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(100).addTags("test") + .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux") + .setNumGpus(64).setFreeGpuMem(1048576L * 2000).setTotalGpuMem(1048576L * 2048) + .build(); + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReport() { + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + HostReport report = + HostReport.newBuilder().setHost(getRenderHost()).setCoreInfo(cores).build(); + + hostReportHandler.handleHostReport(report, true); + DispatchHost host = getHost(); + assertEquals(host.lockState, LockState.OPEN); + assertEquals(host.memory, CueUtil.GB8 - 524288); + assertEquals(host.gpus, 64); + assertEquals(host.idleGpus, 64); + assertEquals(host.gpuMemory, 1048576L * 2048); + assertEquals(host.idleGpuMemory, 2147483648L); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerTests.java index b37eca5b8..3db3597fd 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerTests.java @@ -64,460 +64,470 @@ @ContextConfiguration public class HostReportHandlerTests extends TransactionalTest { - @Resource - AdminManager adminManager; - - @Resource - HostManager hostManager; - - @Resource - HostReportHandler hostReportHandler; - - @Resource - FrameCompleteHandler frameCompleteHandler; - - @Resource - Dispatcher dispatcher; - - @Resource - JobLauncher jobLauncher; - - @Resource - JobManager jobManager; - - @Resource - CommentManager commentManager; - - private static final String HOSTNAME = "beta"; - private static final String NEW_HOSTNAME = "gamma"; - private String hostname; - private String hostname2; - private static final String SUBJECT_COMMENT_FULL_TEMP_DIR = - "Host set to REPAIR for not having enough storage " - + "space on the temporary directory (mcp)"; - private static final String CUEBOT_COMMENT_USER = "cuebot"; - - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } - - @Before - public void createHost() { - hostname = UUID.randomUUID().toString().substring(0, 8); - hostname2 = UUID.randomUUID().toString().substring(0, 8); - hostManager.createHost(getRenderHost(hostname), - adminManager.findAllocationDetail("spi", "general")); - hostManager.createHost(getRenderHost(hostname2), - adminManager.findAllocationDetail("spi", "general")); - } - - private static CoreDetail getCoreDetail(int total, int idle, int booked, int locked) { - return CoreDetail.newBuilder().setTotalCores(total).setIdleCores(idle).setBookedCores(booked) - .setLockedCores(locked).build(); - } - - private DispatchHost getHost(String hostname) { - return hostManager.findDispatchHost(hostname); - } - - private static RenderHost.Builder getRenderHostBuilder(String hostname) { - return RenderHost.newBuilder().setName(hostname).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(CueUtil.GB8).setFreeSwap(CueUtil.GB2).setLoad(0) - .setTotalMcp(CueUtil.GB4).setTotalMem(CueUtil.GB8).setTotalSwap(CueUtil.GB2) - .setNimbyEnabled(false).setNumProcs(16).setCoresPerProc(100).addTags("test") - .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux").setNumGpus(0) - .setFreeGpuMem(0).setTotalGpuMem(0); - } - - private static RenderHost getRenderHost(String hostname) { - return getRenderHostBuilder(hostname).build(); - } - - private static RenderHost getNewRenderHost(String tags) { - return RenderHost.newBuilder().setName(NEW_HOSTNAME).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(CueUtil.GB8).setFreeSwap(CueUtil.GB2).setLoad(0) - .setTotalMcp(195430).setTotalMem(CueUtil.GB8).setTotalSwap(CueUtil.GB2) - .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(100).addTags(tags) - .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)).build(); - } - - @Test - @Transactional - @Rollback(true) - public void testHandleHostReport() throws InterruptedException { - CoreDetail cores = getCoreDetail(200, 200, 0, 0); - HostReport report1 = - HostReport.newBuilder().setHost(getRenderHost(hostname)).setCoreInfo(cores).build(); - HostReport report2 = - HostReport.newBuilder().setHost(getRenderHost(hostname2)).setCoreInfo(cores).build(); - HostReport report1_2 = HostReport.newBuilder().setHost(getRenderHost(hostname)) - .setCoreInfo(getCoreDetail(200, 200, 100, 0)).build(); - - hostReportHandler.handleHostReport(report1, false); - DispatchHost host = getHost(hostname); - assertEquals(LockState.OPEN, host.lockState); - assertEquals(HardwareState.UP, host.hardwareState); - hostReportHandler.handleHostReport(report1_2, false); - host = getHost(hostname); - assertEquals(HardwareState.UP, host.hardwareState); - - // Test Queue thread handling - ThreadPoolExecutor queue = hostReportHandler.getReportQueue(); - // Make sure jobs flow normally without any nullpointer exception - // Expecting results from a ThreadPool based class on JUnit is tricky - // A future test will be developed in the future to better address the behavior - // of - // this feature - hostReportHandler.queueHostReport(report1); // HOSTNAME - hostReportHandler.queueHostReport(report2); // HOSTNAME2 - hostReportHandler.queueHostReport(report1); // HOSTNAME - hostReportHandler.queueHostReport(report1); // HOSTNAME - hostReportHandler.queueHostReport(report1_2); // HOSTNAME - } - - @Test - @Transactional - @Rollback(true) - public void testHandleHostReportWithNewAllocation() { - FacilityInterface facility = adminManager.getFacility("AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"); - assertEquals(facility.getName(), "spi"); - - AllocationEntity detail = new AllocationEntity(); - detail.name = "test"; - detail.tag = "test"; - adminManager.createAllocation(facility, detail); - detail = adminManager.findAllocationDetail("spi", "test"); - - boolean isBoot = true; - CoreDetail cores = getCoreDetail(200, 200, 0, 0); - HostReport report = - HostReport.newBuilder().setHost(getNewRenderHost("test")).setCoreInfo(cores).build(); - - hostReportHandler.handleHostReport(report, isBoot); - DispatchHost host = hostManager.findDispatchHost(NEW_HOSTNAME); - assertEquals(host.getAllocationId(), detail.id); - } - - @Test - @Transactional - @Rollback(true) - public void testHandleHostReportWithExistentAllocation() { - AllocationEntity alloc = - adminManager.getAllocationDetail("00000000-0000-0000-0000-000000000006"); - assertEquals(alloc.getName(), "spi.general"); - - boolean isBoot = true; - CoreDetail cores = getCoreDetail(200, 200, 0, 0); - HostReport report = - HostReport.newBuilder().setHost(getNewRenderHost("general")).setCoreInfo(cores).build(); - - hostReportHandler.handleHostReport(report, isBoot); - DispatchHost host = hostManager.findDispatchHost(NEW_HOSTNAME); - assertEquals(host.getAllocationId(), alloc.id); - } - - @Test - @Transactional - @Rollback(true) - public void testHandleHostReportWithNonExistentTags() { - AllocationEntity alloc = - adminManager.getAllocationDetail("00000000-0000-0000-0000-000000000002"); - assertEquals(alloc.getName(), "lax.unassigned"); - - boolean isBoot = true; - CoreDetail cores = getCoreDetail(200, 200, 0, 0); - HostReport report = - HostReport.newBuilder().setHost(getNewRenderHost("nonexistent")).setCoreInfo(cores).build(); - - hostReportHandler.handleHostReport(report, isBoot); - DispatchHost host = hostManager.findDispatchHost(NEW_HOSTNAME); - assertEquals(host.getAllocationId(), alloc.id); - } - - @Test - @Transactional - @Rollback(true) - public void testHandleHostReportWithFullTemporaryDirectories() { - // Create CoreDetail - CoreDetail cores = getCoreDetail(200, 200, 0, 0); - - /* - * Test 1: Precondition: - HardwareState=UP Action: - Receives a HostReport with less - * freeTempDir than the threshold (opencue.properties: min_available_temp_storage_percentage) - * Postcondition: - Host hardwareState changes to REPAIR - A comment is created with - * subject=SUBJECT_COMMENT_FULL_TEMP_DIR and user=CUEBOT_COMMENT_USER - */ - // Create HostReport with totalMcp=4GB and freeMcp=128MB - HostReport report1 = HostReport.newBuilder() - .setHost(getRenderHostBuilder(hostname).setFreeMcp(CueUtil.MB128).build()) - .setCoreInfo(cores).build(); - // Call handleHostReport() => Create the comment with - // subject=SUBJECT_COMMENT_FULL_TEMP_DIR and change the - // host's hardwareState to REPAIR - hostReportHandler.handleHostReport(report1, false); - // Get host - DispatchHost host = getHost(hostname); - // Get list of comments by host, user, and subject - List comments = commentManager.getCommentsByHostUserAndSubject(host, - CUEBOT_COMMENT_USER, SUBJECT_COMMENT_FULL_TEMP_DIR); - // Check if there is 1 comment - assertEquals(comments.size(), 1); - // Get host comment - CommentDetail comment = comments.get(0); - // Check if the comment has the user = CUEBOT_COMMENT_USER - assertEquals(comment.user, CUEBOT_COMMENT_USER); - // Check if the comment has the subject = SUBJECT_COMMENT_FULL_TEMP_DIR - assertEquals(comment.subject, SUBJECT_COMMENT_FULL_TEMP_DIR); - // Check host lock state - assertEquals(LockState.OPEN, host.lockState); - // Check if host hardware state is REPAIR - assertEquals(HardwareState.REPAIR, host.hardwareState); - // Test Queue thread handling - ThreadPoolExecutor queue = hostReportHandler.getReportQueue(); - // Make sure jobs flow normally without any nullpointer exception - hostReportHandler.queueHostReport(report1); // HOSTNAME - hostReportHandler.queueHostReport(report1); // HOSTNAME - - /* - * Test 2: Precondition: - HardwareState=REPAIR - There is a comment for the host with - * subject=SUBJECT_COMMENT_FULL_TEMP_DIR and user=CUEBOT_COMMENT_USER Action: Receives a - * HostReport with more freeTempDir than the threshold (opencue.properties: - * min_available_temp_storage_percentage) Postcondition: - Host hardwareState changes to UP - - * Comment with subject=SUBJECT_COMMENT_FULL_TEMP_DIR and user=CUEBOT_COMMENT_USER gets deleted - */ - // Set the host freeTempDir to the minimum size required = 1GB (1048576 KB) - HostReport report2 = HostReport.newBuilder() - .setHost(getRenderHostBuilder(hostname).setFreeMcp(CueUtil.GB).build()).setCoreInfo(cores) - .build(); - // Call handleHostReport() => Delete the comment with - // subject=SUBJECT_COMMENT_FULL_TEMP_DIR and change the - // host's hardwareState to UP - hostReportHandler.handleHostReport(report2, false); - // Get host - host = getHost(hostname); - // Get list of comments by host, user, and subject - comments = commentManager.getCommentsByHostUserAndSubject(host, CUEBOT_COMMENT_USER, - SUBJECT_COMMENT_FULL_TEMP_DIR); - // Check if there is no comment associated with the host - assertEquals(comments.size(), 0); - // Check host lock state - assertEquals(LockState.OPEN, host.lockState); - // Check if host hardware state is UP - assertEquals(HardwareState.UP, host.hardwareState); - // Test Queue thread handling - queue = hostReportHandler.getReportQueue(); - // Make sure jobs flow normally without any nullpointer exception - hostReportHandler.queueHostReport(report1); // HOSTNAME - hostReportHandler.queueHostReport(report1); // HOSTNAME - } - - @Test - @Transactional - @Rollback(true) - public void testHandleHostReportWithHardwareStateRepairNotRelatedToFullTempDir() { - // Create CoreDetail - CoreDetail cores = getCoreDetail(200, 200, 0, 0); - - /* - * Test if host.hardwareState == HardwareState.REPAIR (Not related to freeMcp < - * dispatcher.min_bookable_free_mcp_kb (opencue.properties)) - * - * - There is no comment with subject=SUBJECT_COMMENT_FULL_MCP_DIR and user=CUEBOT_COMMENT_USER - * associated with the host The host.hardwareState continue as HardwareState.REPAIR - */ - // Create HostReport - HostReport report = HostReport.newBuilder() - .setHost(getRenderHostBuilder(hostname).setFreeMcp(CueUtil.GB).build()).setCoreInfo(cores) - .build(); - // Get host - DispatchHost host = getHost(hostname); - // Host's HardwareState set to REPAIR - hostManager.setHostState(host, HardwareState.REPAIR); - host.hardwareState = HardwareState.REPAIR; - // Get list of comments by host, user, and subject - List hostComments = commentManager.getCommentsByHostUserAndSubject(host, - CUEBOT_COMMENT_USER, SUBJECT_COMMENT_FULL_TEMP_DIR); - // Check if there is no comment - assertEquals(hostComments.size(), 0); - // There is no comment to delete - boolean commentsDeleted = commentManager.deleteCommentByHostUserAndSubject(host, - CUEBOT_COMMENT_USER, SUBJECT_COMMENT_FULL_TEMP_DIR); - assertFalse(commentsDeleted); - // Call handleHostReport() - hostReportHandler.handleHostReport(report, false); - // Check host lock state - assertEquals(LockState.OPEN, host.lockState); - // Check if host hardware state is REPAIR - assertEquals(HardwareState.REPAIR, host.hardwareState); - // Test Queue thread handling - ThreadPoolExecutor queueThread = hostReportHandler.getReportQueue(); - // Make sure jobs flow normally without any nullpointer exception - hostReportHandler.queueHostReport(report); // HOSTNAME - hostReportHandler.queueHostReport(report); // HOSTNAME - } - - @Test - @Transactional - @Rollback(true) - public void testMemoryAndLlu() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); - - DispatchHost host = getHost(hostname); - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); - - CoreDetail cores = getCoreDetail(200, 200, 0, 0); - long now = System.currentTimeMillis(); - - RunningFrameInfo info = RunningFrameInfo.newBuilder().setJobId(proc.getJobId()) - .setLayerId(proc.getLayerId()).setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()) - .setLluTime(now / 1000).setMaxRss(420000).build(); - HostReport report = HostReport.newBuilder().setHost(getRenderHost(hostname)).setCoreInfo(cores) - .addFrames(info).build(); - - hostReportHandler.handleHostReport(report, false); - - FrameDetail frame = jobManager.getFrameDetail(proc.getFrameId()); - assertEquals(frame.dateLLU, new Timestamp(now / 1000 * 1000)); - assertEquals(420000, frame.maxRss); - } - - @Test - @Transactional - @Rollback(true) - public void testMemoryAggressionRss() { - jobLauncher.testMode = true; - dispatcher.setTestMode(true); - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); - - DispatchHost host = getHost(hostname); - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); - - // 1.6 = 1 + dispatcher.oom_frame_overboard_allowed_threshold - long memoryOverboard = (long) Math.ceil((double) proc.memoryReserved * 1.6); - - // Test rss overboard - RunningFrameInfo info = RunningFrameInfo.newBuilder().setJobId(proc.getJobId()) - .setLayerId(proc.getLayerId()).setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()) - .setRss(memoryOverboard).setMaxRss(memoryOverboard).build(); - HostReport report = HostReport.newBuilder().setHost(getRenderHost(hostname)) - .setCoreInfo(getCoreDetail(200, 200, 0, 0)).addFrames(info).build(); - - long killCount = DispatchSupport.killedOffenderProcs.get(); - hostReportHandler.handleHostReport(report, false); - assertEquals(killCount + 1, DispatchSupport.killedOffenderProcs.get()); - } - - @Test - @Transactional - @Rollback(true) - public void testMemoryAggressionMaxRss() { - jobLauncher.testMode = true; - dispatcher.setTestMode(true); - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); - - DispatchHost host = getHost(hostname); - List procs = dispatcher.dispatchHost(host); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); - - // 0.6 = dispatcher.oom_frame_overboard_allowed_threshold - long memoryOverboard = (long) Math.ceil((double) proc.memoryReserved * (1.0 + (2 * 0.6))); - - // Test rss>90% and maxRss overboard - RunningFrameInfo info = RunningFrameInfo.newBuilder().setJobId(proc.getJobId()) - .setLayerId(proc.getLayerId()).setFrameId(proc.getFrameId()).setResourceId(proc.getProcId()) - .setRss((long) Math.ceil(0.95 * proc.memoryReserved)).setMaxRss(memoryOverboard).build(); - HostReport report = HostReport.newBuilder().setHost(getRenderHost(hostname)) - .setCoreInfo(getCoreDetail(200, 200, 0, 0)).addFrames(info).build(); - - long killCount = DispatchSupport.killedOffenderProcs.get(); - hostReportHandler.handleHostReport(report, false); - assertEquals(killCount + 1, DispatchSupport.killedOffenderProcs.get()); - } - - @Test - @Transactional - @Rollback(true) - public void testMemoryAggressionMemoryWarning() { - jobLauncher.testMode = true; - dispatcher.setTestMode(true); - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_multiple_frames.xml")); - - DispatchHost host = getHost(hostname); - List procs = dispatcher.dispatchHost(host); - assertEquals(3, procs.size()); - VirtualProc proc1 = procs.get(0); - VirtualProc proc2 = procs.get(1); - VirtualProc proc3 = procs.get(2); - - // Ok - RunningFrameInfo info1 = RunningFrameInfo.newBuilder().setJobId(proc1.getJobId()) - .setLayerId(proc1.getLayerId()).setFrameId(proc1.getFrameId()) - .setResourceId(proc1.getProcId()).setUsedSwapMemory(CueUtil.MB512 - CueUtil.MB128) - .setVsize(CueUtil.GB2).setRss(CueUtil.GB2).setMaxRss(CueUtil.GB2).build(); - - // Overboard Rss - RunningFrameInfo info2 = RunningFrameInfo.newBuilder().setJobId(proc2.getJobId()) - .setLayerId(proc2.getLayerId()).setFrameId(proc2.getFrameId()) - .setResourceId(proc2.getProcId()).setUsedSwapMemory(CueUtil.MB512).setVsize(CueUtil.GB4) - .setRss(CueUtil.GB4).setMaxRss(CueUtil.GB4).build(); - - // Overboard Rss - long memoryUsedProc3 = CueUtil.GB8; - RunningFrameInfo info3 = RunningFrameInfo.newBuilder().setJobId(proc3.getJobId()) - .setLayerId(proc3.getLayerId()).setFrameId(proc3.getFrameId()) - .setResourceId(proc3.getProcId()).setUsedSwapMemory(CueUtil.MB512 * 2) - .setVsize(memoryUsedProc3).setRss(memoryUsedProc3).setMaxRss(memoryUsedProc3).build(); - - RenderHost hostAfterUpdate = getRenderHostBuilder(hostname).setFreeMem(0) - .setFreeSwap(CueUtil.GB2 - info1.getUsedSwapMemory() - info2.getUsedSwapMemory() - - info3.getUsedSwapMemory()) - .build(); - - HostReport report = - HostReport.newBuilder().setHost(hostAfterUpdate).setCoreInfo(getCoreDetail(200, 200, 0, 0)) - .addAllFrames(Arrays.asList(info1, info2, info3)).build(); - - // Get layer state before report gets sent - LayerDetail layerBeforeIncrease = jobManager.getLayerDetail(proc3.getLayerId()); - - // In this case, killing 2 frames should be enough to ge the machine to a safe - // state. Total Swap: 2GB, usage before kill: 1944MB, usage after kill: 348 - // (less than 20%) - long killCount = DispatchSupport.killedOffenderProcs.get(); - hostReportHandler.handleHostReport(report, false); - assertEquals(killCount + 2, DispatchSupport.killedOffenderProcs.get()); - - // Confirm the frame will be set to retry after it's completion has been - // processed - - RunningFrameInfo runningFrame = RunningFrameInfo.newBuilder().setFrameId(proc3.getFrameId()) - .setFrameName("frame_name").setLayerId(proc3.getLayerId()).setRss(memoryUsedProc3) - .setMaxRss(memoryUsedProc3).setResourceId(proc3.id).build(); - FrameCompleteReport completeReport = FrameCompleteReport.newBuilder().setHost(hostAfterUpdate) - .setFrame(runningFrame).setExitSignal(9).setRunTime(1).setExitStatus(1).build(); - - frameCompleteHandler.handleFrameCompleteReport(completeReport); - FrameDetail killedFrame = jobManager.getFrameDetail(proc3.getFrameId()); - LayerDetail layer = jobManager.getLayerDetail(proc3.getLayerId()); - assertEquals(FrameState.WAITING, killedFrame.state); - // Memory increases are processed in two different places. - // First: proc.reserved + 2GB - // Second: the maximum reported proc.maxRss - // The higher valuer beween First and Second wins. - // In this case, proc.maxRss - assertEquals(Math.max(memoryUsedProc3, layerBeforeIncrease.getMinimumMemory() + CueUtil.GB2), - layer.getMinimumMemory()); - } + @Resource + AdminManager adminManager; + + @Resource + HostManager hostManager; + + @Resource + HostReportHandler hostReportHandler; + + @Resource + FrameCompleteHandler frameCompleteHandler; + + @Resource + Dispatcher dispatcher; + + @Resource + JobLauncher jobLauncher; + + @Resource + JobManager jobManager; + + @Resource + CommentManager commentManager; + + private static final String HOSTNAME = "beta"; + private static final String NEW_HOSTNAME = "gamma"; + private String hostname; + private String hostname2; + private static final String SUBJECT_COMMENT_FULL_TEMP_DIR = + "Host set to REPAIR for not having enough storage " + + "space on the temporary directory (mcp)"; + private static final String CUEBOT_COMMENT_USER = "cuebot"; + + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } + + @Before + public void createHost() { + hostname = UUID.randomUUID().toString().substring(0, 8); + hostname2 = UUID.randomUUID().toString().substring(0, 8); + hostManager.createHost(getRenderHost(hostname), + adminManager.findAllocationDetail("spi", "general")); + hostManager.createHost(getRenderHost(hostname2), + adminManager.findAllocationDetail("spi", "general")); + } + + private static CoreDetail getCoreDetail(int total, int idle, int booked, int locked) { + return CoreDetail.newBuilder().setTotalCores(total).setIdleCores(idle) + .setBookedCores(booked).setLockedCores(locked).build(); + } + + private DispatchHost getHost(String hostname) { + return hostManager.findDispatchHost(hostname); + } + + private static RenderHost.Builder getRenderHostBuilder(String hostname) { + return RenderHost.newBuilder().setName(hostname).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(CueUtil.GB8).setFreeSwap(CueUtil.GB2).setLoad(0) + .setTotalMcp(CueUtil.GB4).setTotalMem(CueUtil.GB8).setTotalSwap(CueUtil.GB2) + .setNimbyEnabled(false).setNumProcs(16).setCoresPerProc(100).addTags("test") + .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux") + .setNumGpus(0).setFreeGpuMem(0).setTotalGpuMem(0); + } + + private static RenderHost getRenderHost(String hostname) { + return getRenderHostBuilder(hostname).build(); + } + + private static RenderHost getNewRenderHost(String tags) { + return RenderHost.newBuilder().setName(NEW_HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(CueUtil.GB8).setFreeSwap(CueUtil.GB2).setLoad(0) + .setTotalMcp(195430).setTotalMem(CueUtil.GB8).setTotalSwap(CueUtil.GB2) + .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(100).addTags(tags) + .setState(HardwareState.UP).setFacility("spi").putAttributes("SP_OS", "Linux") + .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) + .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)).build(); + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReport() throws InterruptedException { + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + HostReport report1 = + HostReport.newBuilder().setHost(getRenderHost(hostname)).setCoreInfo(cores).build(); + HostReport report2 = HostReport.newBuilder().setHost(getRenderHost(hostname2)) + .setCoreInfo(cores).build(); + HostReport report1_2 = HostReport.newBuilder().setHost(getRenderHost(hostname)) + .setCoreInfo(getCoreDetail(200, 200, 100, 0)).build(); + + hostReportHandler.handleHostReport(report1, false); + DispatchHost host = getHost(hostname); + assertEquals(LockState.OPEN, host.lockState); + assertEquals(HardwareState.UP, host.hardwareState); + hostReportHandler.handleHostReport(report1_2, false); + host = getHost(hostname); + assertEquals(HardwareState.UP, host.hardwareState); + + // Test Queue thread handling + ThreadPoolExecutor queue = hostReportHandler.getReportQueue(); + // Make sure jobs flow normally without any nullpointer exception + // Expecting results from a ThreadPool based class on JUnit is tricky + // A future test will be developed in the future to better address the behavior + // of + // this feature + hostReportHandler.queueHostReport(report1); // HOSTNAME + hostReportHandler.queueHostReport(report2); // HOSTNAME2 + hostReportHandler.queueHostReport(report1); // HOSTNAME + hostReportHandler.queueHostReport(report1); // HOSTNAME + hostReportHandler.queueHostReport(report1_2); // HOSTNAME + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReportWithNewAllocation() { + FacilityInterface facility = + adminManager.getFacility("AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"); + assertEquals(facility.getName(), "spi"); + + AllocationEntity detail = new AllocationEntity(); + detail.name = "test"; + detail.tag = "test"; + adminManager.createAllocation(facility, detail); + detail = adminManager.findAllocationDetail("spi", "test"); + + boolean isBoot = true; + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + HostReport report = HostReport.newBuilder().setHost(getNewRenderHost("test")) + .setCoreInfo(cores).build(); + + hostReportHandler.handleHostReport(report, isBoot); + DispatchHost host = hostManager.findDispatchHost(NEW_HOSTNAME); + assertEquals(host.getAllocationId(), detail.id); + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReportWithExistentAllocation() { + AllocationEntity alloc = + adminManager.getAllocationDetail("00000000-0000-0000-0000-000000000006"); + assertEquals(alloc.getName(), "spi.general"); + + boolean isBoot = true; + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + HostReport report = HostReport.newBuilder().setHost(getNewRenderHost("general")) + .setCoreInfo(cores).build(); + + hostReportHandler.handleHostReport(report, isBoot); + DispatchHost host = hostManager.findDispatchHost(NEW_HOSTNAME); + assertEquals(host.getAllocationId(), alloc.id); + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReportWithNonExistentTags() { + AllocationEntity alloc = + adminManager.getAllocationDetail("00000000-0000-0000-0000-000000000002"); + assertEquals(alloc.getName(), "lax.unassigned"); + + boolean isBoot = true; + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + HostReport report = HostReport.newBuilder().setHost(getNewRenderHost("nonexistent")) + .setCoreInfo(cores).build(); + + hostReportHandler.handleHostReport(report, isBoot); + DispatchHost host = hostManager.findDispatchHost(NEW_HOSTNAME); + assertEquals(host.getAllocationId(), alloc.id); + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReportWithFullTemporaryDirectories() { + // Create CoreDetail + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + + /* + * Test 1: Precondition: - HardwareState=UP Action: - Receives a HostReport with less + * freeTempDir than the threshold (opencue.properties: + * min_available_temp_storage_percentage) Postcondition: - Host hardwareState changes to + * REPAIR - A comment is created with subject=SUBJECT_COMMENT_FULL_TEMP_DIR and + * user=CUEBOT_COMMENT_USER + */ + // Create HostReport with totalMcp=4GB and freeMcp=128MB + HostReport report1 = HostReport.newBuilder() + .setHost(getRenderHostBuilder(hostname).setFreeMcp(CueUtil.MB128).build()) + .setCoreInfo(cores).build(); + // Call handleHostReport() => Create the comment with + // subject=SUBJECT_COMMENT_FULL_TEMP_DIR and change the + // host's hardwareState to REPAIR + hostReportHandler.handleHostReport(report1, false); + // Get host + DispatchHost host = getHost(hostname); + // Get list of comments by host, user, and subject + List comments = commentManager.getCommentsByHostUserAndSubject(host, + CUEBOT_COMMENT_USER, SUBJECT_COMMENT_FULL_TEMP_DIR); + // Check if there is 1 comment + assertEquals(comments.size(), 1); + // Get host comment + CommentDetail comment = comments.get(0); + // Check if the comment has the user = CUEBOT_COMMENT_USER + assertEquals(comment.user, CUEBOT_COMMENT_USER); + // Check if the comment has the subject = SUBJECT_COMMENT_FULL_TEMP_DIR + assertEquals(comment.subject, SUBJECT_COMMENT_FULL_TEMP_DIR); + // Check host lock state + assertEquals(LockState.OPEN, host.lockState); + // Check if host hardware state is REPAIR + assertEquals(HardwareState.REPAIR, host.hardwareState); + // Test Queue thread handling + ThreadPoolExecutor queue = hostReportHandler.getReportQueue(); + // Make sure jobs flow normally without any nullpointer exception + hostReportHandler.queueHostReport(report1); // HOSTNAME + hostReportHandler.queueHostReport(report1); // HOSTNAME + + /* + * Test 2: Precondition: - HardwareState=REPAIR - There is a comment for the host with + * subject=SUBJECT_COMMENT_FULL_TEMP_DIR and user=CUEBOT_COMMENT_USER Action: Receives a + * HostReport with more freeTempDir than the threshold (opencue.properties: + * min_available_temp_storage_percentage) Postcondition: - Host hardwareState changes to UP + * - Comment with subject=SUBJECT_COMMENT_FULL_TEMP_DIR and user=CUEBOT_COMMENT_USER gets + * deleted + */ + // Set the host freeTempDir to the minimum size required = 1GB (1048576 KB) + HostReport report2 = HostReport.newBuilder() + .setHost(getRenderHostBuilder(hostname).setFreeMcp(CueUtil.GB).build()) + .setCoreInfo(cores).build(); + // Call handleHostReport() => Delete the comment with + // subject=SUBJECT_COMMENT_FULL_TEMP_DIR and change the + // host's hardwareState to UP + hostReportHandler.handleHostReport(report2, false); + // Get host + host = getHost(hostname); + // Get list of comments by host, user, and subject + comments = commentManager.getCommentsByHostUserAndSubject(host, CUEBOT_COMMENT_USER, + SUBJECT_COMMENT_FULL_TEMP_DIR); + // Check if there is no comment associated with the host + assertEquals(comments.size(), 0); + // Check host lock state + assertEquals(LockState.OPEN, host.lockState); + // Check if host hardware state is UP + assertEquals(HardwareState.UP, host.hardwareState); + // Test Queue thread handling + queue = hostReportHandler.getReportQueue(); + // Make sure jobs flow normally without any nullpointer exception + hostReportHandler.queueHostReport(report1); // HOSTNAME + hostReportHandler.queueHostReport(report1); // HOSTNAME + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReportWithHardwareStateRepairNotRelatedToFullTempDir() { + // Create CoreDetail + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + + /* + * Test if host.hardwareState == HardwareState.REPAIR (Not related to freeMcp < + * dispatcher.min_bookable_free_mcp_kb (opencue.properties)) + * + * - There is no comment with subject=SUBJECT_COMMENT_FULL_MCP_DIR and + * user=CUEBOT_COMMENT_USER associated with the host The host.hardwareState continue as + * HardwareState.REPAIR + */ + // Create HostReport + HostReport report = HostReport.newBuilder() + .setHost(getRenderHostBuilder(hostname).setFreeMcp(CueUtil.GB).build()) + .setCoreInfo(cores).build(); + // Get host + DispatchHost host = getHost(hostname); + // Host's HardwareState set to REPAIR + hostManager.setHostState(host, HardwareState.REPAIR); + host.hardwareState = HardwareState.REPAIR; + // Get list of comments by host, user, and subject + List hostComments = commentManager.getCommentsByHostUserAndSubject(host, + CUEBOT_COMMENT_USER, SUBJECT_COMMENT_FULL_TEMP_DIR); + // Check if there is no comment + assertEquals(hostComments.size(), 0); + // There is no comment to delete + boolean commentsDeleted = commentManager.deleteCommentByHostUserAndSubject(host, + CUEBOT_COMMENT_USER, SUBJECT_COMMENT_FULL_TEMP_DIR); + assertFalse(commentsDeleted); + // Call handleHostReport() + hostReportHandler.handleHostReport(report, false); + // Check host lock state + assertEquals(LockState.OPEN, host.lockState); + // Check if host hardware state is REPAIR + assertEquals(HardwareState.REPAIR, host.hardwareState); + // Test Queue thread handling + ThreadPoolExecutor queueThread = hostReportHandler.getReportQueue(); + // Make sure jobs flow normally without any nullpointer exception + hostReportHandler.queueHostReport(report); // HOSTNAME + hostReportHandler.queueHostReport(report); // HOSTNAME + } + + @Test + @Transactional + @Rollback(true) + public void testMemoryAndLlu() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); + + DispatchHost host = getHost(hostname); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + long now = System.currentTimeMillis(); + + RunningFrameInfo info = RunningFrameInfo.newBuilder().setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()).setFrameId(proc.getFrameId()) + .setResourceId(proc.getProcId()).setLluTime(now / 1000).setMaxRss(420000).build(); + HostReport report = HostReport.newBuilder().setHost(getRenderHost(hostname)) + .setCoreInfo(cores).addFrames(info).build(); + + hostReportHandler.handleHostReport(report, false); + + FrameDetail frame = jobManager.getFrameDetail(proc.getFrameId()); + assertEquals(frame.dateLLU, new Timestamp(now / 1000 * 1000)); + assertEquals(420000, frame.maxRss); + } + + @Test + @Transactional + @Rollback(true) + public void testMemoryAggressionRss() { + jobLauncher.testMode = true; + dispatcher.setTestMode(true); + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); + + DispatchHost host = getHost(hostname); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + + // 1.6 = 1 + dispatcher.oom_frame_overboard_allowed_threshold + long memoryOverboard = (long) Math.ceil((double) proc.memoryReserved * 1.6); + + // Test rss overboard + RunningFrameInfo info = RunningFrameInfo.newBuilder().setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()).setFrameId(proc.getFrameId()) + .setResourceId(proc.getProcId()).setRss(memoryOverboard).setMaxRss(memoryOverboard) + .build(); + HostReport report = HostReport.newBuilder().setHost(getRenderHost(hostname)) + .setCoreInfo(getCoreDetail(200, 200, 0, 0)).addFrames(info).build(); + + long killCount = DispatchSupport.killedOffenderProcs.get(); + hostReportHandler.handleHostReport(report, false); + assertEquals(killCount + 1, DispatchSupport.killedOffenderProcs.get()); + } + + @Test + @Transactional + @Rollback(true) + public void testMemoryAggressionMaxRss() { + jobLauncher.testMode = true; + dispatcher.setTestMode(true); + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); + + DispatchHost host = getHost(hostname); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + + // 0.6 = dispatcher.oom_frame_overboard_allowed_threshold + long memoryOverboard = (long) Math.ceil((double) proc.memoryReserved * (1.0 + (2 * 0.6))); + + // Test rss>90% and maxRss overboard + RunningFrameInfo info = RunningFrameInfo.newBuilder().setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()).setFrameId(proc.getFrameId()) + .setResourceId(proc.getProcId()) + .setRss((long) Math.ceil(0.95 * proc.memoryReserved)).setMaxRss(memoryOverboard) + .build(); + HostReport report = HostReport.newBuilder().setHost(getRenderHost(hostname)) + .setCoreInfo(getCoreDetail(200, 200, 0, 0)).addFrames(info).build(); + + long killCount = DispatchSupport.killedOffenderProcs.get(); + hostReportHandler.handleHostReport(report, false); + assertEquals(killCount + 1, DispatchSupport.killedOffenderProcs.get()); + } + + @Test + @Transactional + @Rollback(true) + public void testMemoryAggressionMemoryWarning() { + jobLauncher.testMode = true; + dispatcher.setTestMode(true); + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_multiple_frames.xml")); + + DispatchHost host = getHost(hostname); + List procs = dispatcher.dispatchHost(host); + assertEquals(3, procs.size()); + VirtualProc proc1 = procs.get(0); + VirtualProc proc2 = procs.get(1); + VirtualProc proc3 = procs.get(2); + + // Ok + RunningFrameInfo info1 = RunningFrameInfo.newBuilder().setJobId(proc1.getJobId()) + .setLayerId(proc1.getLayerId()).setFrameId(proc1.getFrameId()) + .setResourceId(proc1.getProcId()).setUsedSwapMemory(CueUtil.MB512 - CueUtil.MB128) + .setVsize(CueUtil.GB2).setRss(CueUtil.GB2).setMaxRss(CueUtil.GB2).build(); + + // Overboard Rss + RunningFrameInfo info2 = RunningFrameInfo.newBuilder().setJobId(proc2.getJobId()) + .setLayerId(proc2.getLayerId()).setFrameId(proc2.getFrameId()) + .setResourceId(proc2.getProcId()).setUsedSwapMemory(CueUtil.MB512) + .setVsize(CueUtil.GB4).setRss(CueUtil.GB4).setMaxRss(CueUtil.GB4).build(); + + // Overboard Rss + long memoryUsedProc3 = CueUtil.GB8; + RunningFrameInfo info3 = RunningFrameInfo.newBuilder().setJobId(proc3.getJobId()) + .setLayerId(proc3.getLayerId()).setFrameId(proc3.getFrameId()) + .setResourceId(proc3.getProcId()).setUsedSwapMemory(CueUtil.MB512 * 2) + .setVsize(memoryUsedProc3).setRss(memoryUsedProc3).setMaxRss(memoryUsedProc3) + .build(); + + RenderHost hostAfterUpdate = getRenderHostBuilder(hostname).setFreeMem(0) + .setFreeSwap(CueUtil.GB2 - info1.getUsedSwapMemory() - info2.getUsedSwapMemory() + - info3.getUsedSwapMemory()) + .build(); + + HostReport report = HostReport.newBuilder().setHost(hostAfterUpdate) + .setCoreInfo(getCoreDetail(200, 200, 0, 0)) + .addAllFrames(Arrays.asList(info1, info2, info3)).build(); + + // Get layer state before report gets sent + LayerDetail layerBeforeIncrease = jobManager.getLayerDetail(proc3.getLayerId()); + + // In this case, killing 2 frames should be enough to ge the machine to a safe + // state. Total Swap: 2GB, usage before kill: 1944MB, usage after kill: 348 + // (less than 20%) + long killCount = DispatchSupport.killedOffenderProcs.get(); + hostReportHandler.handleHostReport(report, false); + assertEquals(killCount + 2, DispatchSupport.killedOffenderProcs.get()); + + // Confirm the frame will be set to retry after it's completion has been + // processed + + RunningFrameInfo runningFrame = RunningFrameInfo.newBuilder().setFrameId(proc3.getFrameId()) + .setFrameName("frame_name").setLayerId(proc3.getLayerId()).setRss(memoryUsedProc3) + .setMaxRss(memoryUsedProc3).setResourceId(proc3.id).build(); + FrameCompleteReport completeReport = + FrameCompleteReport.newBuilder().setHost(hostAfterUpdate).setFrame(runningFrame) + .setExitSignal(9).setRunTime(1).setExitStatus(1).build(); + + frameCompleteHandler.handleFrameCompleteReport(completeReport); + FrameDetail killedFrame = jobManager.getFrameDetail(proc3.getFrameId()); + LayerDetail layer = jobManager.getLayerDetail(proc3.getLayerId()); + assertEquals(FrameState.WAITING, killedFrame.state); + // Memory increases are processed in two different places. + // First: proc.reserved + 2GB + // Second: the maximum reported proc.maxRss + // The higher valuer beween First and Second wins. + // In this case, proc.maxRss + assertEquals( + Math.max(memoryUsedProc3, layerBeforeIncrease.getMinimumMemory() + CueUtil.GB2), + layer.getMinimumMemory()); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/LocalDispatcherTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/LocalDispatcherTests.java index 6d142ad8c..ccef855c8 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/LocalDispatcherTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/LocalDispatcherTests.java @@ -49,287 +49,288 @@ @ContextConfiguration public class LocalDispatcherTests extends TransactionalTest { - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - Dispatcher localDispatcher; - - @Resource - BookingManager bookingManager; - - private static final String HOSTNAME = "beta"; - - private static final String JOBNAME = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; - - private static final String TARGET_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; - - @Before - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - } - - @Before - public void setTestMode() { - localDispatcher.setTestMode(true); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(0) - .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) - .setNumProcs(2).setCoresPerProc(400).setState(HardwareState.UP).setFacility("spi") - .addTags("test").putAttributes("SP_OS", "Linux").setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512).build(); - - hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); - } - - public JobDetail getJob() { - return jobManager.findJobDetail(JOBNAME); - } - - public JobDetail getTargetJob() { - return jobManager.findJobDetail(TARGET_JOB); - } - - public DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostAutoDetectJob() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setThreads(1); - lja.setMaxMemory(CueUtil.GB8); - lja.setMaxCoreUnits(200); - bookingManager.createLocalHostAssignment(host, job, lja); - - List procs = localDispatcher.dispatchHost(host); - - // Should have 2 procs. - assertEquals(2, procs.size()); - assertTrue(bookingManager.hasActiveLocalFrames(host)); - - /* - * Check to ensure the procs are marked as local. - */ - assertTrue(procs.stream().allMatch(proc -> proc.isLocalDispatch)); - - /* - * Check to ensure the right job was booked. - */ - assertTrue(procs.stream().allMatch(proc -> proc.jobId.equals(job.getId()))); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostAutoDetectLayer() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - LayerInterface layer = jobManager.getLayers(job).get(0); - - LocalHostAssignment lba = new LocalHostAssignment(300, 1, CueUtil.GB8, 0, 0); - bookingManager.createLocalHostAssignment(host, layer, lba); - - List procs = localDispatcher.dispatchHost(host); - - // Should have 2 procs. - assertEquals(3, procs.size()); - assertTrue(bookingManager.hasActiveLocalFrames(host)); - - /* - * Check that they are all marked local. - */ - assertTrue(procs.stream().allMatch(proc -> proc.isLocalDispatch)); - - /* - * Check that they are all frame the same layer. - */ - assertTrue(procs.stream().allMatch(proc -> proc.layerId.equals(layer.getId()))); - - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostAutoDetectFrame() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - LayerInterface layer = jobManager.getLayers(job).get(0); - FrameInterface frame = jobManager.findFrame(layer, 5); - - LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); - bookingManager.createLocalHostAssignment(host, frame, lba); - - List procs = localDispatcher.dispatchHost(host); - - /* - * Should always be 1 or 0, in this case it should be 1. - */ - assertEquals(1, procs.size()); - assertTrue(bookingManager.hasActiveLocalFrames(host)); - /* - * Check the frame id. - */ - assertEquals(frame.getFrameId(), procs.get(0).frameId); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostToLocalJob() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - - LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); - bookingManager.createLocalHostAssignment(host, job, lba); - - List procs = localDispatcher.dispatchHost(host, job); - - // Should have 2 procs. - assertEquals(2, procs.size()); - assertTrue(bookingManager.hasActiveLocalFrames(host)); - - // Check that they are local. - assertTrue(procs.stream().allMatch(proc -> proc.isLocalDispatch)); - - /* - * Check to ensure the right job was booked. - */ - assertTrue(procs.stream().allMatch(proc -> proc.jobId.equals(job.getId()))); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostToLocalLayer() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - LayerInterface layer = jobManager.getLayers(job).get(0); - - LocalHostAssignment lba = new LocalHostAssignment(300, 1, CueUtil.GB8, 0, 0); - bookingManager.createLocalHostAssignment(host, layer, lba); - - List procs = localDispatcher.dispatchHost(host, layer); - - // Should have 2 procs. - assertEquals(3, procs.size()); - assertTrue(bookingManager.hasActiveLocalFrames(host)); - - /* - * Check that they are all marked local. - */ - assertTrue(procs.stream().allMatch(proc -> proc.isLocalDispatch)); - - /* - * Check that they are all frame the same layer. - */ - assertTrue(procs.stream().allMatch(proc -> proc.layerId.equals(layer.getId()))); - - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostToLocalFrame() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - LayerInterface layer = jobManager.getLayers(job).get(0); - FrameInterface frame = jobManager.findFrame(layer, 5); - - LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); - bookingManager.createLocalHostAssignment(host, frame, lba); - - List procs = localDispatcher.dispatchHost(host, frame); - - /* - * Should always be 1 or 0 procs, in this case 1. - */ - assertEquals(1, procs.size()); - assertTrue(bookingManager.hasActiveLocalFrames(host)); - /* - * Check the frame id. - */ - assertEquals(frame.getFrameId(), procs.get(0).frameId); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostToLocalFrameTwice() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - LayerInterface layer = jobManager.getLayers(job).get(0); - FrameInterface frame = jobManager.findFrame(layer, 5); - - LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); - bookingManager.createLocalHostAssignment(host, frame, lba); - - List procs = localDispatcher.dispatchHost(host, frame); - - /* - * Should always be 1 or 0 procs, in this case 1. - */ - assertEquals(1, procs.size()); - - /* - * Dispatch again. - */ - procs = localDispatcher.dispatchHost(host, frame); - - /* - * Should always be 1 or 0 procs, in this case 0. - */ - assertEquals(0, procs.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testDispatchHostToLocalJobDeficit() { - DispatchHost host = getHost(); - JobDetail job = getJob(); - - LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 0, 0); - bookingManager.createLocalHostAssignment(host, job, lba); - - List procs = localDispatcher.dispatchHost(host, job); - - // Should have 1 proc. - assertEquals(1, procs.size()); - assertTrue(bookingManager.hasActiveLocalFrames(host)); - - // Check that they are local. - assertTrue(procs.get(0).isLocalDispatch); - /* - * Check to ensure the right job was booked. - */ - assertEquals(job.getJobId(), procs.get(0).jobId); - - /* - * Now, lower our min cores to create a deficit. - */ - assertFalse(bookingManager.hasResourceDeficit(host)); - bookingManager.setMaxResources(lba, 700, 0, 0, 0); - assertTrue(bookingManager.hasResourceDeficit(host)); - } + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + HostManager hostManager; + + @Resource + AdminManager adminManager; + + @Resource + Dispatcher localDispatcher; + + @Resource + BookingManager bookingManager; + + private static final String HOSTNAME = "beta"; + + private static final String JOBNAME = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; + + private static final String TARGET_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; + + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + } + + @Before + public void setTestMode() { + localDispatcher.setTestMode(true); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(0) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960) + .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(400) + .setState(HardwareState.UP).setFacility("spi").addTags("test") + .putAttributes("SP_OS", "Linux").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } + + public JobDetail getJob() { + return jobManager.findJobDetail(JOBNAME); + } + + public JobDetail getTargetJob() { + return jobManager.findJobDetail(TARGET_JOB); + } + + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostAutoDetectJob() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setThreads(1); + lja.setMaxMemory(CueUtil.GB8); + lja.setMaxCoreUnits(200); + bookingManager.createLocalHostAssignment(host, job, lja); + + List procs = localDispatcher.dispatchHost(host); + + // Should have 2 procs. + assertEquals(2, procs.size()); + assertTrue(bookingManager.hasActiveLocalFrames(host)); + + /* + * Check to ensure the procs are marked as local. + */ + assertTrue(procs.stream().allMatch(proc -> proc.isLocalDispatch)); + + /* + * Check to ensure the right job was booked. + */ + assertTrue(procs.stream().allMatch(proc -> proc.jobId.equals(job.getId()))); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostAutoDetectLayer() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + LayerInterface layer = jobManager.getLayers(job).get(0); + + LocalHostAssignment lba = new LocalHostAssignment(300, 1, CueUtil.GB8, 0, 0); + bookingManager.createLocalHostAssignment(host, layer, lba); + + List procs = localDispatcher.dispatchHost(host); + + // Should have 2 procs. + assertEquals(3, procs.size()); + assertTrue(bookingManager.hasActiveLocalFrames(host)); + + /* + * Check that they are all marked local. + */ + assertTrue(procs.stream().allMatch(proc -> proc.isLocalDispatch)); + + /* + * Check that they are all frame the same layer. + */ + assertTrue(procs.stream().allMatch(proc -> proc.layerId.equals(layer.getId()))); + + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostAutoDetectFrame() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + LayerInterface layer = jobManager.getLayers(job).get(0); + FrameInterface frame = jobManager.findFrame(layer, 5); + + LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); + bookingManager.createLocalHostAssignment(host, frame, lba); + + List procs = localDispatcher.dispatchHost(host); + + /* + * Should always be 1 or 0, in this case it should be 1. + */ + assertEquals(1, procs.size()); + assertTrue(bookingManager.hasActiveLocalFrames(host)); + /* + * Check the frame id. + */ + assertEquals(frame.getFrameId(), procs.get(0).frameId); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostToLocalJob() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + + LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); + bookingManager.createLocalHostAssignment(host, job, lba); + + List procs = localDispatcher.dispatchHost(host, job); + + // Should have 2 procs. + assertEquals(2, procs.size()); + assertTrue(bookingManager.hasActiveLocalFrames(host)); + + // Check that they are local. + assertTrue(procs.stream().allMatch(proc -> proc.isLocalDispatch)); + + /* + * Check to ensure the right job was booked. + */ + assertTrue(procs.stream().allMatch(proc -> proc.jobId.equals(job.getId()))); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostToLocalLayer() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + LayerInterface layer = jobManager.getLayers(job).get(0); + + LocalHostAssignment lba = new LocalHostAssignment(300, 1, CueUtil.GB8, 0, 0); + bookingManager.createLocalHostAssignment(host, layer, lba); + + List procs = localDispatcher.dispatchHost(host, layer); + + // Should have 2 procs. + assertEquals(3, procs.size()); + assertTrue(bookingManager.hasActiveLocalFrames(host)); + + /* + * Check that they are all marked local. + */ + assertTrue(procs.stream().allMatch(proc -> proc.isLocalDispatch)); + + /* + * Check that they are all frame the same layer. + */ + assertTrue(procs.stream().allMatch(proc -> proc.layerId.equals(layer.getId()))); + + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostToLocalFrame() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + LayerInterface layer = jobManager.getLayers(job).get(0); + FrameInterface frame = jobManager.findFrame(layer, 5); + + LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); + bookingManager.createLocalHostAssignment(host, frame, lba); + + List procs = localDispatcher.dispatchHost(host, frame); + + /* + * Should always be 1 or 0 procs, in this case 1. + */ + assertEquals(1, procs.size()); + assertTrue(bookingManager.hasActiveLocalFrames(host)); + /* + * Check the frame id. + */ + assertEquals(frame.getFrameId(), procs.get(0).frameId); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostToLocalFrameTwice() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + LayerInterface layer = jobManager.getLayers(job).get(0); + FrameInterface frame = jobManager.findFrame(layer, 5); + + LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); + bookingManager.createLocalHostAssignment(host, frame, lba); + + List procs = localDispatcher.dispatchHost(host, frame); + + /* + * Should always be 1 or 0 procs, in this case 1. + */ + assertEquals(1, procs.size()); + + /* + * Dispatch again. + */ + procs = localDispatcher.dispatchHost(host, frame); + + /* + * Should always be 1 or 0 procs, in this case 0. + */ + assertEquals(0, procs.size()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHostToLocalJobDeficit() { + DispatchHost host = getHost(); + JobDetail job = getJob(); + + LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 0, 0); + bookingManager.createLocalHostAssignment(host, job, lba); + + List procs = localDispatcher.dispatchHost(host, job); + + // Should have 1 proc. + assertEquals(1, procs.size()); + assertTrue(bookingManager.hasActiveLocalFrames(host)); + + // Check that they are local. + assertTrue(procs.get(0).isLocalDispatch); + /* + * Check to ensure the right job was booked. + */ + assertEquals(job.getJobId(), procs.get(0).jobId); + + /* + * Now, lower our min cores to create a deficit. + */ + assertFalse(bookingManager.hasResourceDeficit(host)); + bookingManager.setMaxResources(lba, 700, 0, 0, 0); + assertTrue(bookingManager.hasResourceDeficit(host)); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/RedirectManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/RedirectManagerTests.java index d58b589bb..dc419ced3 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/RedirectManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/RedirectManagerTests.java @@ -73,345 +73,346 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class RedirectManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - RedirectManager redirectManager; + @Resource + RedirectManager redirectManager; - @Resource - RedirectService redirectService; + @Resource + RedirectService redirectService; - @Resource - JobManager jobManager; + @Resource + JobManager jobManager; - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - HostManager hostManager; + @Resource + HostManager hostManager; - @Resource - AdminManager adminManager; + @Resource + AdminManager adminManager; - @Resource - Dispatcher dispatcher; + @Resource + Dispatcher dispatcher; - @Resource - GroupManager groupManager; + @Resource + GroupManager groupManager; - @Resource - ProcDao procDao; + @Resource + ProcDao procDao; - @Resource - JobDao jobDao; + @Resource + JobDao jobDao; - @Resource - Whiteboard whiteboard; + @Resource + Whiteboard whiteboard; - @Resource - ProcSearchFactory procSearchFactory; + @Resource + ProcSearchFactory procSearchFactory; - private static final String HOSTNAME = "beta"; + private static final String HOSTNAME = "beta"; - private static final String JOBNAME = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; + private static final String JOBNAME = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; - private static final String TARGET_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; + private static final String TARGET_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; - @Before - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - } - - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) - .setNumProcs(1).setCoresPerProc(100).setState(HardwareState.UP).setFacility("spi") - .addTags("test").putAttributes("SP_OS", "Linux").build(); - - hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); - } - - public JobDetail getJob() { - return jobManager.findJobDetail(JOBNAME); - } + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + } - public JobDetail getTargetJob() { - return jobManager.findJobDetail(TARGET_JOB); - } + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } - public DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960) + .setNimbyEnabled(false).setNumProcs(1).setCoresPerProc(100) + .setState(HardwareState.UP).setFacility("spi").addTags("test") + .putAttributes("SP_OS", "Linux").build(); + + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } - @Test - @Transactional - @Rollback(true) - public void testAddJobRedirectByCriteria() { + public JobDetail getJob() { + return jobManager.findJobDetail(JOBNAME); + } - JobDetail job = getJob(); - DispatchHost host = getHost(); + public JobDetail getTargetJob() { + return jobManager.findJobDetail(TARGET_JOB); + } - List procs = dispatcher.dispatchHost(host, job); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } - /* Setup a proc search */ - ProcSearchInterface search = procSearchFactory.create(); - ProcSearchCriteria criteria = search.getCriteria(); - search.setCriteria(criteria.toBuilder().addJobs(job.getName()).build()); + @Test + @Transactional + @Rollback(true) + public void testAddJobRedirectByCriteria() { - List jobs = new ArrayList(1); - jobs.add(jobManager.findJob(TARGET_JOB)); + JobDetail job = getJob(); + DispatchHost host = getHost(); - /* Now redirect this proc to the other job */ - redirectManager.addRedirect(search.getCriteria(), jobs, false, new Source()); + List procs = dispatcher.dispatchHost(host, job); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); - /* Test that the redirect was added properly. */ - assertTrue(redirectManager.hasRedirect(procs.get(0))); + /* Setup a proc search */ + ProcSearchInterface search = procSearchFactory.create(); + ProcSearchCriteria criteria = search.getCriteria(); + search.setCriteria(criteria.toBuilder().addJobs(job.getName()).build()); - /* Check to ensure the redirect target was set. */ - assertEquals(TARGET_JOB, whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); + List jobs = new ArrayList(1); + jobs.add(jobManager.findJob(TARGET_JOB)); - redirectManager.removeRedirect(proc); - assertFalse(redirectManager.hasRedirect(proc)); - assertThat(whiteboard.getProcs(search).getProcs(0).getRedirectTarget(), is(emptyString())); - } + /* Now redirect this proc to the other job */ + redirectManager.addRedirect(search.getCriteria(), jobs, false, new Source()); - @Test - @Transactional - @Rollback(true) - public void testAddGroupRedirectByCriteria() { + /* Test that the redirect was added properly. */ + assertTrue(redirectManager.hasRedirect(procs.get(0))); - JobDetail job = getJob(); - DispatchHost host = getHost(); + /* Check to ensure the redirect target was set. */ + assertEquals(TARGET_JOB, whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); - List procs = dispatcher.dispatchHost(host, job); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); + redirectManager.removeRedirect(proc); + assertFalse(redirectManager.hasRedirect(proc)); + assertThat(whiteboard.getProcs(search).getProcs(0).getRedirectTarget(), is(emptyString())); + } - // Double check there is a proc. - procDao.getVirtualProc(proc.getId()); + @Test + @Transactional + @Rollback(true) + public void testAddGroupRedirectByCriteria() { - /* Setup a proc search */ - ProcSearchInterface search = procSearchFactory.create(); - ProcSearchCriteria criteria = search.getCriteria(); - search.setCriteria(criteria.toBuilder().addJobs(job.getName()).build()); + JobDetail job = getJob(); + DispatchHost host = getHost(); - GroupInterface root = groupManager.getRootGroupDetail(job); - GroupDetail group = new GroupDetail(); - group.name = "Foo"; - group.showId = root.getShowId(); + List procs = dispatcher.dispatchHost(host, job); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); - groupManager.createGroup(group, root); + // Double check there is a proc. + procDao.getVirtualProc(proc.getId()); - /* Now redirect this proc to the other job */ - redirectManager.addRedirect(search.getCriteria(), group, false, new Source()); + /* Setup a proc search */ + ProcSearchInterface search = procSearchFactory.create(); + ProcSearchCriteria criteria = search.getCriteria(); + search.setCriteria(criteria.toBuilder().addJobs(job.getName()).build()); - /* Test that the redirect was added properly. */ - assertTrue(redirectManager.hasRedirect(procs.get(0))); + GroupInterface root = groupManager.getRootGroupDetail(job); + GroupDetail group = new GroupDetail(); + group.name = "Foo"; + group.showId = root.getShowId(); - /* Check to ensure the redirect target was set. */ - assertEquals(group.getName(), whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); + groupManager.createGroup(group, root); - redirectManager.removeRedirect(proc); - assertFalse(redirectManager.hasRedirect(proc)); - assertThat(whiteboard.getProcs(search).getProcs(0).getRedirectTarget(), is(emptyString())); - } + /* Now redirect this proc to the other job */ + redirectManager.addRedirect(search.getCriteria(), group, false, new Source()); - @Test - @Transactional - @Rollback(true) - public void testAddJobRedirect() { + /* Test that the redirect was added properly. */ + assertTrue(redirectManager.hasRedirect(procs.get(0))); - JobDetail job = getJob(); - JobDetail target = getTargetJob(); - DispatchHost host = getHost(); + /* Check to ensure the redirect target was set. */ + assertEquals(group.getName(), whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); - List procs = dispatcher.dispatchHost(host, job); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); + redirectManager.removeRedirect(proc); + assertFalse(redirectManager.hasRedirect(proc)); + assertThat(whiteboard.getProcs(search).getProcs(0).getRedirectTarget(), is(emptyString())); + } - ProcSearchInterface search = procSearchFactory.create(); - search.setCriteria(ProcSearchCriteria.newBuilder().addJobs(job.getName()).build()); + @Test + @Transactional + @Rollback(true) + public void testAddJobRedirect() { - assertTrue(redirectManager.addRedirect(proc, target, false, new Source())); + JobDetail job = getJob(); + JobDetail target = getTargetJob(); + DispatchHost host = getHost(); - assertTrue(redirectManager.hasRedirect(proc)); - assertEquals(TARGET_JOB, whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); + List procs = dispatcher.dispatchHost(host, job); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); - redirectManager.removeRedirect(proc); - assertFalse(redirectManager.hasRedirect(proc)); - assertThat(whiteboard.getProcs(search).getProcs(0).getRedirectTarget(), is(emptyString())); - } + ProcSearchInterface search = procSearchFactory.create(); + search.setCriteria(ProcSearchCriteria.newBuilder().addJobs(job.getName()).build()); - @Test - @Transactional - @Rollback(true) - public void testAddGroupRedirect() { + assertTrue(redirectManager.addRedirect(proc, target, false, new Source())); - JobDetail job = getJob(); - JobDetail target = getTargetJob(); - DispatchHost host = getHost(); + assertTrue(redirectManager.hasRedirect(proc)); + assertEquals(TARGET_JOB, whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); - /* Find the root group and move our target job there. */ - GroupDetail group = groupManager.getRootGroupDetail(job); - groupManager.reparentJob(target, group, new Inherit[] {}); + redirectManager.removeRedirect(proc); + assertFalse(redirectManager.hasRedirect(proc)); + assertThat(whiteboard.getProcs(search).getProcs(0).getRedirectTarget(), is(emptyString())); + } - assertEquals(group.getId(), groupManager.getGroupDetail(target).getId()); + @Test + @Transactional + @Rollback(true) + public void testAddGroupRedirect() { - List procs = dispatcher.dispatchHost(host, job); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); + JobDetail job = getJob(); + JobDetail target = getTargetJob(); + DispatchHost host = getHost(); - ProcSearchInterface search = procSearchFactory.create(); - search.setCriteria(ProcSearchCriteria.newBuilder().addJobs(job.getName()).build()); + /* Find the root group and move our target job there. */ + GroupDetail group = groupManager.getRootGroupDetail(job); + groupManager.reparentJob(target, group, new Inherit[] {}); - assertEquals(group.getGroupId(), jobDao.getJobDetail(target.getJobId()).groupId); + assertEquals(group.getId(), groupManager.getGroupDetail(target).getId()); - assertTrue(redirectManager.addRedirect(proc, group, false, new Source())); + List procs = dispatcher.dispatchHost(host, job); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); - assertTrue(redirectManager.hasRedirect(proc)); - assertEquals(group.getName(), whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); + ProcSearchInterface search = procSearchFactory.create(); + search.setCriteria(ProcSearchCriteria.newBuilder().addJobs(job.getName()).build()); - redirectManager.removeRedirect(proc); - assertFalse(redirectManager.hasRedirect(proc)); - assertThat(whiteboard.getProcs(search).getProcs(0).getRedirectTarget(), is(emptyString())); - } + assertEquals(group.getGroupId(), jobDao.getJobDetail(target.getJobId()).groupId); - @Test - @Transactional - @Rollback(true) - public void testJobRedirect() { + assertTrue(redirectManager.addRedirect(proc, group, false, new Source())); - JobDetail job = getJob(); - JobDetail target = getTargetJob(); - DispatchHost host = getHost(); + assertTrue(redirectManager.hasRedirect(proc)); + assertEquals(group.getName(), whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); - List procs = dispatcher.dispatchHost(host, job); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); + redirectManager.removeRedirect(proc); + assertFalse(redirectManager.hasRedirect(proc)); + assertThat(whiteboard.getProcs(search).getProcs(0).getRedirectTarget(), is(emptyString())); + } - ProcSearchInterface search = procSearchFactory.create(); - search.setCriteria(ProcSearchCriteria.newBuilder().addJobs(job.getName()).build()); + @Test + @Transactional + @Rollback(true) + public void testJobRedirect() { - assertTrue(redirectManager.addRedirect(proc, target, false, new Source())); + JobDetail job = getJob(); + JobDetail target = getTargetJob(); + DispatchHost host = getHost(); - assertTrue(redirectManager.hasRedirect(proc)); - assertEquals(TARGET_JOB, whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); + List procs = dispatcher.dispatchHost(host, job); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); - assertTrue(redirectManager.redirect(proc)); + ProcSearchInterface search = procSearchFactory.create(); + search.setCriteria(ProcSearchCriteria.newBuilder().addJobs(job.getName()).build()); - assertEquals(Convert.coreUnitsToCores(100), - whiteboard.getJob(target.getJobId()).getJobStats().getReservedCores(), 0); - } + assertTrue(redirectManager.addRedirect(proc, target, false, new Source())); - @Test - @Transactional - @Rollback(true) - public void testGroupRedirect() { + assertTrue(redirectManager.hasRedirect(proc)); + assertEquals(TARGET_JOB, whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); - JobDetail job = getJob(); - JobDetail target = getTargetJob(); - DispatchHost host = getHost(); + assertTrue(redirectManager.redirect(proc)); - /* Find the root group and move our target job there. */ - GroupDetail group = groupManager.getRootGroupDetail(job); - groupManager.reparentJob(target, group, new Inherit[] {}); + assertEquals(Convert.coreUnitsToCores(100), + whiteboard.getJob(target.getJobId()).getJobStats().getReservedCores(), 0); + } - assertEquals(group.getId(), groupManager.getGroupDetail(target).getId()); + @Test + @Transactional + @Rollback(true) + public void testGroupRedirect() { - List procs = dispatcher.dispatchHost(host, job); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); + JobDetail job = getJob(); + JobDetail target = getTargetJob(); + DispatchHost host = getHost(); - ProcSearchInterface search = procSearchFactory.create(); - search.setCriteria(ProcSearchCriteria.newBuilder().addJobs(job.getName()).build()); + /* Find the root group and move our target job there. */ + GroupDetail group = groupManager.getRootGroupDetail(job); + groupManager.reparentJob(target, group, new Inherit[] {}); - assertEquals(group.getGroupId(), jobDao.getJobDetail(target.getJobId()).groupId); + assertEquals(group.getId(), groupManager.getGroupDetail(target).getId()); - assertTrue(redirectManager.addRedirect(proc, group, false, new Source())); + List procs = dispatcher.dispatchHost(host, job); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); - assertTrue(redirectManager.hasRedirect(proc)); - assertEquals(group.getName(), whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); + ProcSearchInterface search = procSearchFactory.create(); + search.setCriteria(ProcSearchCriteria.newBuilder().addJobs(job.getName()).build()); - redirectManager.redirect(proc); + assertEquals(group.getGroupId(), jobDao.getJobDetail(target.getJobId()).groupId); - assertEquals(Convert.coreUnitsToCores(100), - whiteboard.getGroup(group.getGroupId()).getGroupStats().getReservedCores(), 0); - } + assertTrue(redirectManager.addRedirect(proc, group, false, new Source())); - @Test - @Transactional - @Rollback(true) - public void testNonExistentRedirect() { - JobDetail job = getJob(); - DispatchHost host = getHost(); + assertTrue(redirectManager.hasRedirect(proc)); + assertEquals(group.getName(), whiteboard.getProcs(search).getProcs(0).getRedirectTarget()); - List procs = dispatcher.dispatchHost(host, job); - assertEquals(1, procs.size()); - VirtualProc proc = procs.get(0); + redirectManager.redirect(proc); - assertFalse(redirectManager.hasRedirect(proc)); + assertEquals(Convert.coreUnitsToCores(100), + whiteboard.getGroup(group.getGroupId()).getGroupStats().getReservedCores(), 0); + } - // This should not throw any exception. - assertFalse(redirectManager.redirect(proc)); - } + @Test + @Transactional + @Rollback(true) + public void testNonExistentRedirect() { + JobDetail job = getJob(); + DispatchHost host = getHost(); - /** - * Test that parallel attempts to save a redirect with the same key succeed without throwing an - * exception. - */ - @Test - @Transactional - @Rollback(true) - public void testParallelPuts() { - final int N = 20; + List procs = dispatcher.dispatchHost(host, job); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); - CountDownLatch startSignal = new CountDownLatch(1); - CountDownLatch stopSignal = new CountDownLatch(N); + assertFalse(redirectManager.hasRedirect(proc)); - final String redirect_key = "test"; + // This should not throw any exception. + assertFalse(redirectManager.redirect(proc)); + } - Redirect redirect = new Redirect(RedirectType.JOB_REDIRECT, "foo", "bar"); + /** + * Test that parallel attempts to save a redirect with the same key succeed without throwing an + * exception. + */ + @Test + @Transactional + @Rollback(true) + public void testParallelPuts() { + final int N = 20; + + CountDownLatch startSignal = new CountDownLatch(1); + CountDownLatch stopSignal = new CountDownLatch(N); + + final String redirect_key = "test"; + + Redirect redirect = new Redirect(RedirectType.JOB_REDIRECT, "foo", "bar"); + + for (int i = 0; i < N; i++) { + new Thread(new Runnable() { + @Override + public void run() { + try { + try { + startSignal.await(); + } catch (InterruptedException e) { + throw new RuntimeException("Failed to wait for start signal", e); + } + + // This should not throw anything... + redirectService.put(redirect_key, redirect); + } finally { + stopSignal.countDown(); + } + } + }).start(); + } - for (int i = 0; i < N; i++) { - new Thread(new Runnable() { - @Override - public void run() { - try { + // Start all the threads at roughly the same time. + try { + startSignal.countDown(); try { - startSignal.await(); + stopSignal.await(); } catch (InterruptedException e) { - throw new RuntimeException("Failed to wait for start signal", e); + throw new RuntimeException("Failed to wait for stop signal", e); } - - // This should not throw anything... - redirectService.put(redirect_key, redirect); - } finally { - stopSignal.countDown(); - } + } finally { + // Clean up after test. + redirectService.remove(redirect_key); } - }).start(); - } - - // Start all the threads at roughly the same time. - try { - startSignal.countDown(); - try { - stopSignal.await(); - } catch (InterruptedException e) { - throw new RuntimeException("Failed to wait for stop signal", e); - } - } finally { - // Clean up after test. - redirectService.remove(redirect_key); } - } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/StrandedCoreTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/StrandedCoreTests.java index 588e11506..5488038c8 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/StrandedCoreTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/StrandedCoreTests.java @@ -47,82 +47,83 @@ @ContextConfiguration public class StrandedCoreTests extends TransactionalTest { - @Resource - JobManager jobManager; + @Resource + JobManager jobManager; - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - HostManager hostManager; + @Resource + HostManager hostManager; - @Resource - AdminManager adminManager; + @Resource + AdminManager adminManager; - @Resource - GroupManager groupManager; + @Resource + GroupManager groupManager; - @Resource - Dispatcher dispatcher; + @Resource + Dispatcher dispatcher; - @Resource - DispatchSupport dispatchSupport; + @Resource + DispatchSupport dispatchSupport; - @Resource - FrameDao frameDao; + @Resource + FrameDao frameDao; - private static final String HOSTNAME = "beta"; + private static final String HOSTNAME = "beta"; - private static final String JOBNAME = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; + private static final String JOBNAME = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; - private static final String TARGET_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; + private static final String TARGET_JOB = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; - @Before - public void launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - } + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + } - @Before - public void setTestMode() { - dispatcher.setTestMode(true); - } + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) - .setNumProcs(2).setCoresPerProc(200).setState(HardwareState.UP).setFacility("spi") - .addTags("test").putAttributes("SP_OS", "Linux").build(); + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960) + .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(200) + .setState(HardwareState.UP).setFacility("spi").addTags("test") + .putAttributes("SP_OS", "Linux").build(); - hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); - } + hostManager.createHost(host, adminManager.findAllocationDetail("spi", "general")); + } - public JobDetail getJob() { - return jobManager.findJobDetail(JOBNAME); - } + public JobDetail getJob() { + return jobManager.findJobDetail(JOBNAME); + } - public JobDetail getTargetJob() { - return jobManager.findJobDetail(TARGET_JOB); - } + public JobDetail getTargetJob() { + return jobManager.findJobDetail(TARGET_JOB); + } - public DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); - } + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } - @Test - @Transactional - @Rollback(true) - public void dispatchStrandedCores() { - DispatchHost host = getHost(); - JobDetail job = getJob(); + @Test + @Transactional + @Rollback(true) + public void dispatchStrandedCores() { + DispatchHost host = getHost(); + JobDetail job = getJob(); - dispatchSupport.strandCores(host, 200); - List procs = dispatcher.dispatchHost(host, job); - assertTrue("No procs were booked by the dispatcher.", procs.size() > 0); - assertEquals(400, procs.get(0).coresReserved); - } + dispatchSupport.strandCores(host, 200); + List procs = dispatcher.dispatchHost(host, job); + assertTrue("No procs were booked by the dispatcher.", procs.size() > 0); + assertEquals(400, procs.get(0).coresReserved); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/TestBookingQueue.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/TestBookingQueue.java index a51fec72e..c481cfa75 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/TestBookingQueue.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/TestBookingQueue.java @@ -43,63 +43,64 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class TestBookingQueue extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - HostDao hostDao; - - @Resource - Dispatcher dispatcher; - - @Resource - HostManager hostManager; - - @Resource - BookingQueue bookingQueue; - - @Autowired - Environment env; - - private static final String HOSTNAME = "beta"; - - @Before - public void create() { - RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960).setNimbyEnabled(false) - .setNumProcs(1).setCoresPerProc(100).setState(HardwareState.UP).setFacility("spi") - .addAllTags(ImmutableList.of("mcore", "4core", "8g")).setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512).build(); - - hostManager.createHost(host); - } - - @Test - @Transactional - @Rollback(true) - public void testBookingQueue() { - - int healthThreshold = 10; - int minUnhealthyPeriodMin = 3; - int queueCapacity = 2000; - int corePoolSize = 10; - int maxPoolSize = 14; - - DispatchHost host1 = hostDao.findDispatchHost(HOSTNAME); - host1.idleCores = 500; - DispatchHost host2 = hostDao.findDispatchHost(HOSTNAME); - DispatchHost host3 = hostDao.findDispatchHost(HOSTNAME); - BookingQueue queue = new BookingQueue(healthThreshold, minUnhealthyPeriodMin, queueCapacity, - corePoolSize, maxPoolSize); - bookingQueue.execute(new DispatchBookHost(host2, dispatcher, env)); - bookingQueue.execute(new DispatchBookHost(host3, dispatcher, env)); - bookingQueue.execute(new DispatchBookHost(host1, dispatcher, env)); - try { - Thread.sleep(10000); - } catch (InterruptedException e) { - // TODO Auto-generated catch block - e.printStackTrace(); + @Resource + HostDao hostDao; + + @Resource + Dispatcher dispatcher; + + @Resource + HostManager hostManager; + + @Resource + BookingQueue bookingQueue; + + @Autowired + Environment env; + + private static final String HOSTNAME = "beta"; + + @Before + public void create() { + RenderHost host = RenderHost.newBuilder().setName(HOSTNAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem(8173264).setTotalSwap(20960) + .setNimbyEnabled(false).setNumProcs(1).setCoresPerProc(100) + .setState(HardwareState.UP).setFacility("spi") + .addAllTags(ImmutableList.of("mcore", "4core", "8g")) + .setFreeGpuMem((int) CueUtil.MB512).setTotalGpuMem((int) CueUtil.MB512).build(); + + hostManager.createHost(host); } - } + @Test + @Transactional + @Rollback(true) + public void testBookingQueue() { + + int healthThreshold = 10; + int minUnhealthyPeriodMin = 3; + int queueCapacity = 2000; + int corePoolSize = 10; + int maxPoolSize = 14; + + DispatchHost host1 = hostDao.findDispatchHost(HOSTNAME); + host1.idleCores = 500; + DispatchHost host2 = hostDao.findDispatchHost(HOSTNAME); + DispatchHost host3 = hostDao.findDispatchHost(HOSTNAME); + BookingQueue queue = new BookingQueue(healthThreshold, minUnhealthyPeriodMin, queueCapacity, + corePoolSize, maxPoolSize); + bookingQueue.execute(new DispatchBookHost(host2, dispatcher, env)); + bookingQueue.execute(new DispatchBookHost(host3, dispatcher, env)); + bookingQueue.execute(new DispatchBookHost(host1, dispatcher, env)); + try { + Thread.sleep(10000); + } catch (InterruptedException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/servant/FakeStreamObserver.java b/cuebot/src/test/java/com/imageworks/spcue/test/servant/FakeStreamObserver.java index 6db8d7119..bd1c754a0 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/servant/FakeStreamObserver.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/servant/FakeStreamObserver.java @@ -19,12 +19,12 @@ public class FakeStreamObserver implements StreamObserver { - @Override - public void onNext(T value) {} + @Override + public void onNext(T value) {} - @Override - public void onError(Throwable t) {} + @Override + public void onError(Throwable t) {} - @Override - public void onCompleted() {} + @Override + public void onCompleted() {} } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/servant/ManageAllocationTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/servant/ManageAllocationTests.java index 85d9eeb64..4f2cb1bb8 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/servant/ManageAllocationTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/servant/ManageAllocationTests.java @@ -48,84 +48,84 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class ManageAllocationTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - AllocationDao allocationDao; + @Resource + AllocationDao allocationDao; - @Resource - FacilityDao facilityDao; + @Resource + FacilityDao facilityDao; - @Resource - ManageAllocation manageAllocation; + @Resource + ManageAllocation manageAllocation; - @Test - @Transactional - @Rollback(true) - public void testCreate() { - Facility facility = - Facility.newBuilder().setName(facilityDao.getFacility("spi").getName()).build(); + @Test + @Transactional + @Rollback(true) + public void testCreate() { + Facility facility = + Facility.newBuilder().setName(facilityDao.getFacility("spi").getName()).build(); - // Use . name - AllocCreateRequest request = AllocCreateRequest.newBuilder().setName("spi.test_tag") - .setTag("test_tag").setFacility(facility).build(); + // Use . name + AllocCreateRequest request = AllocCreateRequest.newBuilder().setName("spi.test_tag") + .setTag("test_tag").setFacility(facility).build(); - FakeStreamObserver responseObserver = - new FakeStreamObserver(); - manageAllocation.create(request, responseObserver); + FakeStreamObserver responseObserver = + new FakeStreamObserver(); + manageAllocation.create(request, responseObserver); - allocationDao.findAllocationEntity("spi", "test_tag"); - } + allocationDao.findAllocationEntity("spi", "test_tag"); + } + + @Test + @Transactional + @Rollback(true) + public void testDelete() { + Facility facility = + Facility.newBuilder().setName(facilityDao.getFacility("spi").getName()).build(); + + // Non . name should work too. + AllocCreateRequest createRequest = AllocCreateRequest.newBuilder().setName("test_tag") + .setTag("test_tag").setFacility(facility).build(); - @Test - @Transactional - @Rollback(true) - public void testDelete() { - Facility facility = - Facility.newBuilder().setName(facilityDao.getFacility("spi").getName()).build(); + FakeStreamObserver createResponseObserver = + new FakeStreamObserver(); + manageAllocation.create(createRequest, createResponseObserver); - // Non . name should work too. - AllocCreateRequest createRequest = AllocCreateRequest.newBuilder().setName("test_tag") - .setTag("test_tag").setFacility(facility).build(); + Allocation allocation = Allocation.newBuilder().setName("spi.test_tag").setTag("test_tag") + .setFacility("spi").build(); - FakeStreamObserver createResponseObserver = - new FakeStreamObserver(); - manageAllocation.create(createRequest, createResponseObserver); + AllocDeleteRequest deleteRequest = + AllocDeleteRequest.newBuilder().setAllocation(allocation).build(); - Allocation allocation = Allocation.newBuilder().setName("spi.test_tag").setTag("test_tag") - .setFacility("spi").build(); + FakeStreamObserver deleteResponseObserver = + new FakeStreamObserver(); + + manageAllocation.delete(deleteRequest, deleteResponseObserver); + + try { + allocationDao.findAllocationEntity("spi", "test_tag"); + fail("Expected exception"); + } catch (EmptyResultDataAccessException e) { + assertEquals(e.getMessage(), "Incorrect result size: expected 1, actual 0"); + } + } - AllocDeleteRequest deleteRequest = - AllocDeleteRequest.newBuilder().setAllocation(allocation).build(); + @Test + @Transactional + @Rollback(true) + public void testSetDefault() { + AllocationEntity alloc = allocationDao.getDefaultAllocationEntity(); + assertEquals(alloc.getName(), "lax.unassigned"); - FakeStreamObserver deleteResponseObserver = - new FakeStreamObserver(); + Allocation allocation = Allocation.newBuilder().setName("spi.general").setTag("general") + .setFacility("spi").build(); + AllocSetDefaultRequest request = + AllocSetDefaultRequest.newBuilder().setAllocation(allocation).build(); - manageAllocation.delete(deleteRequest, deleteResponseObserver); + FakeStreamObserver observer = + new FakeStreamObserver(); + manageAllocation.setDefault(request, observer); - try { - allocationDao.findAllocationEntity("spi", "test_tag"); - fail("Expected exception"); - } catch (EmptyResultDataAccessException e) { - assertEquals(e.getMessage(), "Incorrect result size: expected 1, actual 0"); + alloc = allocationDao.getDefaultAllocationEntity(); + assertEquals(alloc.getName(), "spi.general"); } - } - - @Test - @Transactional - @Rollback(true) - public void testSetDefault() { - AllocationEntity alloc = allocationDao.getDefaultAllocationEntity(); - assertEquals(alloc.getName(), "lax.unassigned"); - - Allocation allocation = - Allocation.newBuilder().setName("spi.general").setTag("general").setFacility("spi").build(); - AllocSetDefaultRequest request = - AllocSetDefaultRequest.newBuilder().setAllocation(allocation).build(); - - FakeStreamObserver observer = - new FakeStreamObserver(); - manageAllocation.setDefault(request, observer); - - alloc = allocationDao.getDefaultAllocationEntity(); - assertEquals(alloc.getName(), "spi.general"); - } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/servant/ManageFrameTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/servant/ManageFrameTests.java index d9890e894..062c49583 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/servant/ManageFrameTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/servant/ManageFrameTests.java @@ -41,77 +41,77 @@ @Transactional @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class ManageFrameTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - FrameDao frameDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - ManageFrame manageFrame; - - public JobDetail launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - private FrameStateDisplayOverride createFrameStateDisplayOverride(FrameState state, String text, - int red, int green, int blue) { - FrameStateDisplayOverride override = FrameStateDisplayOverride.newBuilder().setState(state) - .setText(text).setColor(FrameStateDisplayOverride.RGB.newBuilder().setRed(red) - .setGreen(green).setBlue(blue).build()) - .build(); - - return override; - } - - @Test - @Transactional - @Rollback(true) - public void testFrameStateOverride() { - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - Frame jobFrame = Frame.newBuilder().setId(frame.getFrameId()).setName(frame.getName()) - .setState(frame.state).build(); - - // create initial override - FrameStateDisplayOverride override = - createFrameStateDisplayOverride(FrameState.SUCCEEDED, "FINISHED", 200, 200, 123); - FrameStateDisplayOverrideRequest req = FrameStateDisplayOverrideRequest.newBuilder() - .setFrame(jobFrame).setOverride(override).build(); - FakeStreamObserver responseObserver = - new FakeStreamObserver(); - manageFrame.setFrameStateDisplayOverride(req, responseObserver); - FrameStateDisplayOverrideSeq results = - frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); - assertEquals(1, results.getOverridesCount()); - - // try to create same override - manageFrame.setFrameStateDisplayOverride(req, responseObserver); - results = frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); - assertEquals(1, results.getOverridesCount()); - - // try to update override text - FrameStateDisplayOverride overrideUpdated = - createFrameStateDisplayOverride(FrameState.SUCCEEDED, "DONE", 200, 200, 123); - FrameStateDisplayOverrideRequest reqUpdated = FrameStateDisplayOverrideRequest.newBuilder() - .setFrame(jobFrame).setOverride(overrideUpdated).build(); - manageFrame.setFrameStateDisplayOverride(reqUpdated, responseObserver); - results = frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); - assertEquals(1, results.getOverridesCount()); - assertEquals(overrideUpdated, results.getOverridesList().get(0)); - - // add a new override - FrameStateDisplayOverride overrideNew = - createFrameStateDisplayOverride(FrameState.EATEN, "NOMNOM", 120, 50, 123); - FrameStateDisplayOverrideRequest reqNew = FrameStateDisplayOverrideRequest.newBuilder() - .setFrame(jobFrame).setOverride(overrideNew).build(); - manageFrame.setFrameStateDisplayOverride(reqNew, responseObserver); - results = frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); - assertEquals(2, results.getOverridesCount()); - } + @Resource + FrameDao frameDao; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + ManageFrame manageFrame; + + public JobDetail launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + } + + private FrameStateDisplayOverride createFrameStateDisplayOverride(FrameState state, String text, + int red, int green, int blue) { + FrameStateDisplayOverride override = FrameStateDisplayOverride.newBuilder().setState(state) + .setText(text).setColor(FrameStateDisplayOverride.RGB.newBuilder().setRed(red) + .setGreen(green).setBlue(blue).build()) + .build(); + + return override; + } + + @Test + @Transactional + @Rollback(true) + public void testFrameStateOverride() { + JobDetail job = launchJob(); + FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); + Frame jobFrame = Frame.newBuilder().setId(frame.getFrameId()).setName(frame.getName()) + .setState(frame.state).build(); + + // create initial override + FrameStateDisplayOverride override = + createFrameStateDisplayOverride(FrameState.SUCCEEDED, "FINISHED", 200, 200, 123); + FrameStateDisplayOverrideRequest req = FrameStateDisplayOverrideRequest.newBuilder() + .setFrame(jobFrame).setOverride(override).build(); + FakeStreamObserver responseObserver = + new FakeStreamObserver(); + manageFrame.setFrameStateDisplayOverride(req, responseObserver); + FrameStateDisplayOverrideSeq results = + frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); + assertEquals(1, results.getOverridesCount()); + + // try to create same override + manageFrame.setFrameStateDisplayOverride(req, responseObserver); + results = frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); + assertEquals(1, results.getOverridesCount()); + + // try to update override text + FrameStateDisplayOverride overrideUpdated = + createFrameStateDisplayOverride(FrameState.SUCCEEDED, "DONE", 200, 200, 123); + FrameStateDisplayOverrideRequest reqUpdated = FrameStateDisplayOverrideRequest.newBuilder() + .setFrame(jobFrame).setOverride(overrideUpdated).build(); + manageFrame.setFrameStateDisplayOverride(reqUpdated, responseObserver); + results = frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); + assertEquals(1, results.getOverridesCount()); + assertEquals(overrideUpdated, results.getOverridesList().get(0)); + + // add a new override + FrameStateDisplayOverride overrideNew = + createFrameStateDisplayOverride(FrameState.EATEN, "NOMNOM", 120, 50, 123); + FrameStateDisplayOverrideRequest reqNew = FrameStateDisplayOverrideRequest.newBuilder() + .setFrame(jobFrame).setOverride(overrideNew).build(); + manageFrame.setFrameStateDisplayOverride(reqNew, responseObserver); + results = frameDao.getFrameStateDisplayOverrides(frame.getFrameId()); + assertEquals(2, results.getOverridesCount()); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/AdminManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/AdminManagerTests.java index 454eeef81..cc69c5070 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/AdminManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/AdminManagerTests.java @@ -40,143 +40,143 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class AdminManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - AdminManager adminManager; - - @Resource - FacilityDao facilityDao; - - @Resource - ShowDao showDao; - - private static final String TEST_ALLOC_NAME = "testAlloc"; - - @Test - @Transactional - @Rollback(true) - public void createAllocation() { - AllocationEntity a = new AllocationEntity(); - a.name = TEST_ALLOC_NAME; - a.tag = "general"; - adminManager.createAllocation(facilityDao.getDefaultFacility(), a); - } - - @Test - @Transactional - @Rollback(true) - public void deleteAllocation() { - AllocationEntity a = new AllocationEntity(); - a.name = facilityDao.getDefaultFacility().getName() + "." + TEST_ALLOC_NAME; - a.tag = "general"; - adminManager.createAllocation(facilityDao.getDefaultFacility(), a); - adminManager.deleteAllocation(a); - } - - @Test - @Transactional - @Rollback(true) - public void setDefaultAllocation() { - AllocationEntity a = adminManager.getDefaultAllocation(); - assertEquals(a.name, facilityDao.getDefaultFacility().getName() + ".unassigned"); - - a = new AllocationEntity(); - a.name = TEST_ALLOC_NAME; - a.tag = "general"; - adminManager.createAllocation(facilityDao.getDefaultFacility(), a); - adminManager.setDefaultAllocation(a); - - a = adminManager.getDefaultAllocation(); - assertEquals(a.name, facilityDao.getDefaultFacility().getName() + "." + TEST_ALLOC_NAME); - } - - @Test - @Transactional - @Rollback(true) - public void createShow() { - ShowEntity show = new ShowEntity(); - show.name = "testtest"; - adminManager.createShow(show); - ShowEntity result = showDao.findShowDetail(show.name); - assertEquals(result.name, show.name); - } - - @Test - @Transactional - @Rollback(true) - public void createInvalidShow() { - ShowEntity show = new ShowEntity(); - show.name = "test/test"; - try { - adminManager.createShow(show); - fail("Expected exception"); - } catch (SpecBuilderException e) { - assertEquals(e.getMessage(), "The show name: test/test is not in the proper format. " - + "Show names must be alpha numeric, no dashes or punctuation."); + @Resource + AdminManager adminManager; + + @Resource + FacilityDao facilityDao; + + @Resource + ShowDao showDao; + + private static final String TEST_ALLOC_NAME = "testAlloc"; + + @Test + @Transactional + @Rollback(true) + public void createAllocation() { + AllocationEntity a = new AllocationEntity(); + a.name = TEST_ALLOC_NAME; + a.tag = "general"; + adminManager.createAllocation(facilityDao.getDefaultFacility(), a); + } + + @Test + @Transactional + @Rollback(true) + public void deleteAllocation() { + AllocationEntity a = new AllocationEntity(); + a.name = facilityDao.getDefaultFacility().getName() + "." + TEST_ALLOC_NAME; + a.tag = "general"; + adminManager.createAllocation(facilityDao.getDefaultFacility(), a); + adminManager.deleteAllocation(a); + } + + @Test + @Transactional + @Rollback(true) + public void setDefaultAllocation() { + AllocationEntity a = adminManager.getDefaultAllocation(); + assertEquals(a.name, facilityDao.getDefaultFacility().getName() + ".unassigned"); + + a = new AllocationEntity(); + a.name = TEST_ALLOC_NAME; + a.tag = "general"; + adminManager.createAllocation(facilityDao.getDefaultFacility(), a); + adminManager.setDefaultAllocation(a); + + a = adminManager.getDefaultAllocation(); + assertEquals(a.name, facilityDao.getDefaultFacility().getName() + "." + TEST_ALLOC_NAME); + } + + @Test + @Transactional + @Rollback(true) + public void createShow() { + ShowEntity show = new ShowEntity(); + show.name = "testtest"; + adminManager.createShow(show); + ShowEntity result = showDao.findShowDetail(show.name); + assertEquals(result.name, show.name); + } + + @Test + @Transactional + @Rollback(true) + public void createInvalidShow() { + ShowEntity show = new ShowEntity(); + show.name = "test/test"; + try { + adminManager.createShow(show); + fail("Expected exception"); + } catch (SpecBuilderException e) { + assertEquals(e.getMessage(), "The show name: test/test is not in the proper format. " + + "Show names must be alpha numeric, no dashes or punctuation."); + } + } + + @Test + @Transactional + @Rollback(true) + public void getFacility() { + adminManager.getFacility("spi"); + adminManager.getFacility("AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA1"); + } + + @Test + @Transactional + @Rollback(true) + public void createLimit() { + String limitName = "testlimit"; + adminManager.createLimit(limitName, 42); + } + + @Test + @Transactional + @Rollback(true) + public void deleteLimit() { + String limitName = "testlimit"; + adminManager.createLimit(limitName, 42); + LimitInterface limit = adminManager.findLimit(limitName); + adminManager.deleteLimit(limit); + } + + @Test + @Transactional + @Rollback(true) + public void findLimit() { + String limitName = "testlimit"; + adminManager.createLimit(limitName, 42); + adminManager.findLimit(limitName); + } + + @Test + @Transactional + @Rollback(true) + public void getLimit() { + String limitName = "testlimit"; + String limitId = adminManager.createLimit(limitName, 42); + + adminManager.getLimit(limitId); + } + + @Test + @Transactional + @Rollback(true) + public void setLimitName() { + String limitName = "testlimit"; + adminManager.createLimit(limitName, 42); + LimitInterface limit = adminManager.findLimit(limitName); + adminManager.setLimitName(limit, "newLimitName"); + } + + @Test + @Transactional + @Rollback(true) + public void setLimitMaxValue() { + String limitName = "testlimit"; + adminManager.createLimit(limitName, 42); + LimitInterface limit = adminManager.findLimit(limitName); + adminManager.setLimitMaxValue(limit, 16); } - } - - @Test - @Transactional - @Rollback(true) - public void getFacility() { - adminManager.getFacility("spi"); - adminManager.getFacility("AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA1"); - } - - @Test - @Transactional - @Rollback(true) - public void createLimit() { - String limitName = "testlimit"; - adminManager.createLimit(limitName, 42); - } - - @Test - @Transactional - @Rollback(true) - public void deleteLimit() { - String limitName = "testlimit"; - adminManager.createLimit(limitName, 42); - LimitInterface limit = adminManager.findLimit(limitName); - adminManager.deleteLimit(limit); - } - - @Test - @Transactional - @Rollback(true) - public void findLimit() { - String limitName = "testlimit"; - adminManager.createLimit(limitName, 42); - adminManager.findLimit(limitName); - } - - @Test - @Transactional - @Rollback(true) - public void getLimit() { - String limitName = "testlimit"; - String limitId = adminManager.createLimit(limitName, 42); - - adminManager.getLimit(limitId); - } - - @Test - @Transactional - @Rollback(true) - public void setLimitName() { - String limitName = "testlimit"; - adminManager.createLimit(limitName, 42); - LimitInterface limit = adminManager.findLimit(limitName); - adminManager.setLimitName(limit, "newLimitName"); - } - - @Test - @Transactional - @Rollback(true) - public void setLimitMaxValue() { - String limitName = "testlimit"; - adminManager.createLimit(limitName, 42); - LimitInterface limit = adminManager.findLimit(limitName); - adminManager.setLimitMaxValue(limit, 16); - } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/BookingManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/BookingManagerTests.java index 4161ea409..64843ee85 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/BookingManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/BookingManagerTests.java @@ -60,322 +60,323 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class BookingManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - JobManager jobManager; - - @Resource - HostDao hostDao; - - @Resource - BookingDao bookingDao; - - @Resource - DispatcherDao dispatcherDao; - - @Resource - ProcDao procDao; - - @Resource - BookingManager bookingManager; + @Resource + HostManager hostManager; - @Resource - Dispatcher localDispatcher; - - @Resource - RqdClient rqdClient; - - @Resource - Whiteboard whiteboard; - - @Before - public void setTestMode() { - localDispatcher.setTestMode(true); - rqdClient.setTestMode(true); - } - - public DispatchHost createHost() { - - RenderHost host = RenderHost.newBuilder().setName("test_host").setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16).setTotalSwap((int) CueUtil.GB16) - .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(100).setState(HardwareState.UP) - .setFacility("spi").addTags("general").setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512).build(); - - DispatchHost dh = hostManager.createHost(host); - hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); - - return dh; - } - - public JobDetail launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail d = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - jobManager.setJobPaused(d, false); - return d; - } - - public JobDetail launchJob2() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - JobDetail d = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); - jobManager.setJobPaused(d, false); - return d; - } - - @Test - @Transactional - @Rollback(true) - public void createLocalHostAssignment() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - LocalHostAssignment l1 = new LocalHostAssignment(); - l1.setMaxCoreUnits(200); - l1.setMaxMemory(CueUtil.GB4); - l1.setThreads(2); - - bookingManager.createLocalHostAssignment(h, j, l1); - LocalHostAssignment l2 = bookingManager.getLocalHostAssignment(h.getHostId(), j.getJobId()); - - assertEquals(l1.id, l2.id); - assertEquals(l1.getFrameId(), l2.getFrameId()); - assertEquals(l1.getLayerId(), l2.getLayerId()); - assertEquals(l1.getJobId(), l2.getJobId()); - assertEquals(l1.getIdleCoreUnits(), l2.getIdleCoreUnits()); - assertEquals(l1.getMaxCoreUnits(), l2.getMaxCoreUnits()); - assertEquals(l1.getThreads(), l2.getThreads()); - assertEquals(l1.getIdleMemory(), l2.getIdleMemory()); - assertEquals(l1.getMaxMemory(), l2.getMaxMemory()); - assertFalse(bookingManager.hasActiveLocalFrames(h)); - } - - @Test - @Transactional - @Rollback(true) - public void hasLocalHostAssignment() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - LocalHostAssignment l1 = new LocalHostAssignment(); - l1.setMaxCoreUnits(200); - l1.setMaxMemory(CueUtil.GB4); - l1.setThreads(2); - - assertFalse(bookingManager.hasLocalHostAssignment(h)); - - bookingManager.createLocalHostAssignment(h, j, l1); - assertTrue(bookingManager.hasLocalHostAssignment(h)); - - bookingManager.removeLocalHostAssignment(l1); - assertFalse(bookingManager.hasLocalHostAssignment(h)); - - assertFalse(bookingManager.hasActiveLocalFrames(h)); - } - - @Test - @Transactional - @Rollback(true) - public void hasActiveLocalFrames() { - // See LocalDispatcherTests - } - - @Test - @Transactional - @Rollback(true) - public void createLocalHostAssignmentForJob() { - - DispatchHost h = createHost(); - JobDetail job = launchJob(); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - - bookingManager.createLocalHostAssignment(h, job, lja); - - assertNotNull(lja.getJobId()); - assertEquals(job.getJobId(), lja.getJobId()); - assertEquals(RenderPartitionType.JOB_PARTITION, lja.getType()); - assertFalse(bookingManager.hasActiveLocalFrames(h)); - - whiteboard.getRenderPartition(lja); - } - - @Test - @Transactional - @Rollback(true) - public void createLocalHostAssignmentForLayer() { - - DispatchHost h = createHost(); - JobDetail job = launchJob2(); - LayerInterface layer = jobManager.getLayers(job).get(0); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB8); - lja.setThreads(1); - - bookingManager.createLocalHostAssignment(h, layer, lja); - - assertNotNull(layer.getLayerId()); - assertEquals(layer.getLayerId(), lja.getLayerId()); - assertEquals(RenderPartitionType.LAYER_PARTITION, lja.getType()); - assertFalse(bookingManager.hasActiveLocalFrames(h)); - - whiteboard.getRenderPartition(lja); - } - - @Test - @Transactional - @Rollback(true) - public void createLocalHostAssignmentForFrame() { - - DispatchHost h = createHost(); - JobDetail job = launchJob2(); - LayerInterface layer = jobManager.getLayers(job).get(0); - FrameInterface frame = jobManager.findFrame(layer, 5); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB8); - lja.setThreads(1); - - bookingManager.createLocalHostAssignment(h, frame, lja); - - assertNotNull(frame.getFrameId()); - assertEquals(frame.getFrameId(), lja.getFrameId()); - assertEquals(RenderPartitionType.FRAME_PARTITION, lja.getType()); - assertFalse(bookingManager.hasActiveLocalFrames(h)); + @Resource + AdminManager adminManager; - whiteboard.getRenderPartition(lja); - } + @Resource + JobLauncher jobLauncher; - @Test - @Transactional - @Rollback(true) - public void deactivateLocalHostAssignment() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - - bookingManager.createLocalHostAssignment(h, j, lja); - bookingManager.deactivateLocalHostAssignment(lja); - } - - @Test - @Transactional - @Rollback(true) - public void setMaxResources() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - - bookingManager.createLocalHostAssignment(h, j, lja); - - /* - * Lower the cores. - */ - bookingManager.setMaxResources(lja, 100, CueUtil.GB2, 1, CueUtil.MB256); - - LocalHostAssignment l2 = bookingManager.getLocalHostAssignment(lja.id); - - assertEquals(100, l2.getMaxCoreUnits()); - assertEquals(CueUtil.GB2, l2.getMaxMemory()); - assertEquals(CueUtil.MB256, l2.getMaxGpuMemory()); - - /* - * Raise the values. - */ - bookingManager.setMaxResources(lja, 200, CueUtil.GB4, 1, CueUtil.MB512); - - l2 = bookingManager.getLocalHostAssignment(lja.id); - assertEquals(200, l2.getMaxCoreUnits()); - assertEquals(CueUtil.GB4, l2.getMaxMemory()); - assertEquals(CueUtil.MB512, l2.getMaxGpuMemory()); - } - - @Test - @Transactional - @Rollback(true) - public void setIllegalMaxResources() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - assertEquals(200, h.idleCores); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setMaxGpuMemory(CueUtil.MB512); - lja.setThreads(2); - - bookingManager.createLocalHostAssignment(h, j, lja); - - /* - * Raise the cores too high - */ - bookingManager.setMaxResources(lja, 800, CueUtil.GB2, 0, 0); - } - - @Test - @Transactional - @Rollback(true) - public void removeLocalHostAssignment() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - - bookingManager.createLocalHostAssignment(h, j, lja); - assertFalse(bookingManager.hasActiveLocalFrames(h)); - - /* - * Now remove the local host assignment. - */ - bookingManager.removeLocalHostAssignment(lja); - - /* - * Ensure its gone. - */ - try { - hostDao.getHost(lja); - fail("Local host is still present but should be gone"); - } catch (EmptyResultDataAccessException e) { + @Resource + JobManager jobManager; + + @Resource + HostDao hostDao; + + @Resource + BookingDao bookingDao; + + @Resource + DispatcherDao dispatcherDao; + + @Resource + ProcDao procDao; + + @Resource + BookingManager bookingManager; + + @Resource + Dispatcher localDispatcher; + + @Resource + RqdClient rqdClient; + + @Resource + Whiteboard whiteboard; + + @Before + public void setTestMode() { + localDispatcher.setTestMode(true); + rqdClient.setTestMode(true); + } + + public DispatchHost createHost() { + + RenderHost host = RenderHost.newBuilder().setName("test_host").setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16) + .setTotalSwap((int) CueUtil.GB16).setNimbyEnabled(false).setNumProcs(2) + .setCoresPerProc(100).setState(HardwareState.UP).setFacility("spi") + .addTags("general").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); + + DispatchHost dh = hostManager.createHost(host); + hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); + + return dh; + } + + public JobDetail launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail d = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + jobManager.setJobPaused(d, false); + return d; + } + + public JobDetail launchJob2() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + JobDetail d = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); + jobManager.setJobPaused(d, false); + return d; + } + + @Test + @Transactional + @Rollback(true) + public void createLocalHostAssignment() { + + DispatchHost h = createHost(); + JobDetail j = launchJob(); + + LocalHostAssignment l1 = new LocalHostAssignment(); + l1.setMaxCoreUnits(200); + l1.setMaxMemory(CueUtil.GB4); + l1.setThreads(2); + + bookingManager.createLocalHostAssignment(h, j, l1); + LocalHostAssignment l2 = bookingManager.getLocalHostAssignment(h.getHostId(), j.getJobId()); + + assertEquals(l1.id, l2.id); + assertEquals(l1.getFrameId(), l2.getFrameId()); + assertEquals(l1.getLayerId(), l2.getLayerId()); + assertEquals(l1.getJobId(), l2.getJobId()); + assertEquals(l1.getIdleCoreUnits(), l2.getIdleCoreUnits()); + assertEquals(l1.getMaxCoreUnits(), l2.getMaxCoreUnits()); + assertEquals(l1.getThreads(), l2.getThreads()); + assertEquals(l1.getIdleMemory(), l2.getIdleMemory()); + assertEquals(l1.getMaxMemory(), l2.getMaxMemory()); + assertFalse(bookingManager.hasActiveLocalFrames(h)); + } + + @Test + @Transactional + @Rollback(true) + public void hasLocalHostAssignment() { + + DispatchHost h = createHost(); + JobDetail j = launchJob(); + + LocalHostAssignment l1 = new LocalHostAssignment(); + l1.setMaxCoreUnits(200); + l1.setMaxMemory(CueUtil.GB4); + l1.setThreads(2); + + assertFalse(bookingManager.hasLocalHostAssignment(h)); + + bookingManager.createLocalHostAssignment(h, j, l1); + assertTrue(bookingManager.hasLocalHostAssignment(h)); + + bookingManager.removeLocalHostAssignment(l1); + assertFalse(bookingManager.hasLocalHostAssignment(h)); + + assertFalse(bookingManager.hasActiveLocalFrames(h)); + } + + @Test + @Transactional + @Rollback(true) + public void hasActiveLocalFrames() { + // See LocalDispatcherTests + } + + @Test + @Transactional + @Rollback(true) + public void createLocalHostAssignmentForJob() { + + DispatchHost h = createHost(); + JobDetail job = launchJob(); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + + bookingManager.createLocalHostAssignment(h, job, lja); + + assertNotNull(lja.getJobId()); + assertEquals(job.getJobId(), lja.getJobId()); + assertEquals(RenderPartitionType.JOB_PARTITION, lja.getType()); + assertFalse(bookingManager.hasActiveLocalFrames(h)); + + whiteboard.getRenderPartition(lja); } - /* - * Ensure the cores are back on the host. - */ - assertEquals(200, hostDao.getDispatchHost(h.getId()).idleCores); - } + @Test + @Transactional + @Rollback(true) + public void createLocalHostAssignmentForLayer() { + + DispatchHost h = createHost(); + JobDetail job = launchJob2(); + LayerInterface layer = jobManager.getLayers(job).get(0); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB8); + lja.setThreads(1); + + bookingManager.createLocalHostAssignment(h, layer, lja); + + assertNotNull(layer.getLayerId()); + assertEquals(layer.getLayerId(), lja.getLayerId()); + assertEquals(RenderPartitionType.LAYER_PARTITION, lja.getType()); + assertFalse(bookingManager.hasActiveLocalFrames(h)); + + whiteboard.getRenderPartition(lja); + } + + @Test + @Transactional + @Rollback(true) + public void createLocalHostAssignmentForFrame() { + + DispatchHost h = createHost(); + JobDetail job = launchJob2(); + LayerInterface layer = jobManager.getLayers(job).get(0); + FrameInterface frame = jobManager.findFrame(layer, 5); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB8); + lja.setThreads(1); + + bookingManager.createLocalHostAssignment(h, frame, lja); + + assertNotNull(frame.getFrameId()); + assertEquals(frame.getFrameId(), lja.getFrameId()); + assertEquals(RenderPartitionType.FRAME_PARTITION, lja.getType()); + assertFalse(bookingManager.hasActiveLocalFrames(h)); + + whiteboard.getRenderPartition(lja); + } + + @Test + @Transactional + @Rollback(true) + public void deactivateLocalHostAssignment() { + + DispatchHost h = createHost(); + JobDetail j = launchJob(); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + + bookingManager.createLocalHostAssignment(h, j, lja); + bookingManager.deactivateLocalHostAssignment(lja); + } + + @Test + @Transactional + @Rollback(true) + public void setMaxResources() { + + DispatchHost h = createHost(); + JobDetail j = launchJob(); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + + bookingManager.createLocalHostAssignment(h, j, lja); + + /* + * Lower the cores. + */ + bookingManager.setMaxResources(lja, 100, CueUtil.GB2, 1, CueUtil.MB256); + + LocalHostAssignment l2 = bookingManager.getLocalHostAssignment(lja.id); + + assertEquals(100, l2.getMaxCoreUnits()); + assertEquals(CueUtil.GB2, l2.getMaxMemory()); + assertEquals(CueUtil.MB256, l2.getMaxGpuMemory()); + + /* + * Raise the values. + */ + bookingManager.setMaxResources(lja, 200, CueUtil.GB4, 1, CueUtil.MB512); + + l2 = bookingManager.getLocalHostAssignment(lja.id); + assertEquals(200, l2.getMaxCoreUnits()); + assertEquals(CueUtil.GB4, l2.getMaxMemory()); + assertEquals(CueUtil.MB512, l2.getMaxGpuMemory()); + } + + @Test + @Transactional + @Rollback(true) + public void setIllegalMaxResources() { + + DispatchHost h = createHost(); + JobDetail j = launchJob(); + + assertEquals(200, h.idleCores); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setMaxGpuMemory(CueUtil.MB512); + lja.setThreads(2); + + bookingManager.createLocalHostAssignment(h, j, lja); + + /* + * Raise the cores too high + */ + bookingManager.setMaxResources(lja, 800, CueUtil.GB2, 0, 0); + } + + @Test + @Transactional + @Rollback(true) + public void removeLocalHostAssignment() { + + DispatchHost h = createHost(); + JobDetail j = launchJob(); + + LocalHostAssignment lja = new LocalHostAssignment(); + lja.setMaxCoreUnits(200); + lja.setMaxMemory(CueUtil.GB4); + lja.setThreads(2); + + bookingManager.createLocalHostAssignment(h, j, lja); + assertFalse(bookingManager.hasActiveLocalFrames(h)); + + /* + * Now remove the local host assignment. + */ + bookingManager.removeLocalHostAssignment(lja); + + /* + * Ensure its gone. + */ + try { + hostDao.getHost(lja); + fail("Local host is still present but should be gone"); + } catch (EmptyResultDataAccessException e) { + } + + /* + * Ensure the cores are back on the host. + */ + assertEquals(200, hostDao.getDispatchHost(h.getId()).idleCores); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/CommentManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/CommentManagerTests.java index 46a667763..05c1d35da 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/CommentManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/CommentManagerTests.java @@ -36,37 +36,37 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class CommentManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - JobManager jobManager; + @Resource + JobManager jobManager; - @Resource - CommentManager commentManager; + @Resource + CommentManager commentManager; - public JobDetail launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail d = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - jobManager.setJobPaused(d, false); - return d; - } + public JobDetail launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail d = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + jobManager.setJobPaused(d, false); + return d; + } - @Test - @Transactional - @Rollback(true) - public void testJobComment() { + @Test + @Transactional + @Rollback(true) + public void testJobComment() { - JobDetail j = launchJob(); + JobDetail j = launchJob(); - CommentDetail c = new CommentDetail(); - c.message = "A test comment"; - c.subject = "A test subject"; - c.user = "Mr. Bigglesworth"; - c.timestamp = new java.sql.Timestamp(System.currentTimeMillis()); + CommentDetail c = new CommentDetail(); + c.message = "A test comment"; + c.subject = "A test subject"; + c.user = "Mr. Bigglesworth"; + c.timestamp = new java.sql.Timestamp(System.currentTimeMillis()); - commentManager.addComment(j, c); + commentManager.addComment(j, c); - } + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/DepartmentManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/DepartmentManagerTests.java index 4f3b70ec0..686caf8d0 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/DepartmentManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/DepartmentManagerTests.java @@ -42,55 +42,55 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class DepartmentManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - DepartmentManager departmentManager; - - @Resource - ShowDao showDao; - - @Resource - DepartmentDao departmentDao; - - @Resource - AdminManager adminManager; - - @Resource - PointDao pointDao; - - private static final String TEST_TI_TASK_NAME = "RINT"; - - @Test - @Transactional - @Rollback(true) - public void enableTiManaged() { - ShowInterface show = showDao.findShowDetail("pipe"); - DepartmentInterface dept = departmentDao.getDefaultDepartment(); - PointInterface rp = pointDao.getPointConfigDetail(show, dept); - - departmentManager.disableTiManaged(rp); - departmentManager.enableTiManaged(rp, TEST_TI_TASK_NAME, 1000); - } - - @Test - @Transactional - @Rollback(true) - public void updateTiManagedTasks() { - ShowInterface show = showDao.findShowDetail("pipe"); - DepartmentInterface dept = departmentDao.getDefaultDepartment(); - PointInterface rp; - - try { - rp = pointDao.getPointConfigDetail(show, dept); - } catch (org.springframework.dao.DataRetrievalFailureException e) { - pointDao.insertPointConf(show, dept); - rp = pointDao.getPointConfigDetail(show, dept); + @Resource + DepartmentManager departmentManager; + + @Resource + ShowDao showDao; + + @Resource + DepartmentDao departmentDao; + + @Resource + AdminManager adminManager; + + @Resource + PointDao pointDao; + + private static final String TEST_TI_TASK_NAME = "RINT"; + + @Test + @Transactional + @Rollback(true) + public void enableTiManaged() { + ShowInterface show = showDao.findShowDetail("pipe"); + DepartmentInterface dept = departmentDao.getDefaultDepartment(); + PointInterface rp = pointDao.getPointConfigDetail(show, dept); + + departmentManager.disableTiManaged(rp); + departmentManager.enableTiManaged(rp, TEST_TI_TASK_NAME, 1000); } - departmentManager.disableTiManaged(rp); - departmentManager.enableTiManaged(rp, TEST_TI_TASK_NAME, 1000); - departmentManager.updateManagedTasks(rp); + @Test + @Transactional + @Rollback(true) + public void updateTiManagedTasks() { + ShowInterface show = showDao.findShowDetail("pipe"); + DepartmentInterface dept = departmentDao.getDefaultDepartment(); + PointInterface rp; + + try { + rp = pointDao.getPointConfigDetail(show, dept); + } catch (org.springframework.dao.DataRetrievalFailureException e) { + pointDao.insertPointConf(show, dept); + rp = pointDao.getPointConfigDetail(show, dept); + } + departmentManager.disableTiManaged(rp); + departmentManager.enableTiManaged(rp, TEST_TI_TASK_NAME, 1000); - departmentManager.disableTiManaged(rp); + departmentManager.updateManagedTasks(rp); - } + departmentManager.disableTiManaged(rp); + + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/DependManagerChunkingTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/DependManagerChunkingTests.java index 595895b92..710c2e3b3 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/DependManagerChunkingTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/DependManagerChunkingTests.java @@ -50,164 +50,164 @@ @ContextConfiguration public class DependManagerChunkingTests extends TransactionalTest { - @Resource - DependDao dependDao; - - @Resource - DependManager dependManager; - - @Resource - FrameDao frameDao; - - @Resource - LayerDao layerDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - FrameSearchFactory frameSearchFactory; - - @Before - public void launchTestJobs() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/chunk_depend.xml")); - } - - private JobDetail getJob() { - return jobManager.findJobDetail("pipe-dev.cue-testuser_chunked_depend"); - } - - private int getTotalDependSum(LayerInterface layer) { - return frameDao.findFrameDetails(frameSearchFactory.create(layer)).stream() - .mapToInt(frame -> frame.dependCount).sum(); - } - - private boolean hasDependFrames(LayerInterface layer) { - FrameSearchInterface search = frameSearchFactory.create(layer); - search.filterByFrameStates(ImmutableList.of(FrameState.DEPEND)); - return frameDao.findFrames(search).size() > 0; - } - - private int getDependRecordCount(LayerInterface l) { - List activeDeps = - dependDao.getWhatThisDependsOn(l, DependTarget.ANY_TARGET); - int numChildDeps = - activeDeps.stream().mapToInt(dep -> dependDao.getChildDepends(dep).size()).sum(); - return numChildDeps + activeDeps.size(); - } - - /** - * Test a non-chunked layer depending on a large chunked layer. <1> <1> <2> <3> <4> <5> - */ - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyNonChunkOnLargeChunk() { - - JobDetail job = getJob(); - LayerInterface layer_a = layerDao.findLayer(job, "no_chunk"); - LayerInterface layer_b = layerDao.findLayer(job, "large_chunk"); - - FrameByFrame depend = new FrameByFrame(layer_a, layer_b); - dependManager.createDepend(depend); - - assertTrue(hasDependFrames(layer_a)); - assertEquals(100, getTotalDependSum(layer_a)); - // Optimized to LayerOnLayer - assertEquals(1, getDependRecordCount(layer_a)); - - for (LightweightDependency lwd : dependDao.getWhatDependsOn(layer_b)) { - assertEquals(DependType.LAYER_ON_LAYER, lwd.type); - dependManager.satisfyDepend(lwd); + @Resource + DependDao dependDao; + + @Resource + DependManager dependManager; + + @Resource + FrameDao frameDao; + + @Resource + LayerDao layerDao; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + FrameSearchFactory frameSearchFactory; + + @Before + public void launchTestJobs() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/chunk_depend.xml")); + } + + private JobDetail getJob() { + return jobManager.findJobDetail("pipe-dev.cue-testuser_chunked_depend"); + } + + private int getTotalDependSum(LayerInterface layer) { + return frameDao.findFrameDetails(frameSearchFactory.create(layer)).stream() + .mapToInt(frame -> frame.dependCount).sum(); + } + + private boolean hasDependFrames(LayerInterface layer) { + FrameSearchInterface search = frameSearchFactory.create(layer); + search.filterByFrameStates(ImmutableList.of(FrameState.DEPEND)); + return frameDao.findFrames(search).size() > 0; + } + + private int getDependRecordCount(LayerInterface l) { + List activeDeps = + dependDao.getWhatThisDependsOn(l, DependTarget.ANY_TARGET); + int numChildDeps = + activeDeps.stream().mapToInt(dep -> dependDao.getChildDepends(dep).size()).sum(); + return numChildDeps + activeDeps.size(); } - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependSum(layer_a)); - } - - /** - * Test a large chunked layer depending on a non-chunked layer. <1> <1> <2> <3> <4> <5> - */ - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyLargeChunkOnNonChunk() { - - JobDetail job = getJob(); - LayerInterface layer_a = layerDao.findLayer(job, "large_chunk"); - LayerInterface layer_b = layerDao.findLayer(job, "no_chunk"); - - FrameByFrame depend = new FrameByFrame(layer_a, layer_b); - dependManager.createDepend(depend); - - assertTrue(hasDependFrames(layer_a)); - // Optimized to LayerOnLayer - assertEquals(1, getTotalDependSum(layer_a)); - assertEquals(1, getDependRecordCount(layer_a)); - - for (LightweightDependency lwd : dependDao.getWhatDependsOn(layer_b)) { - assertEquals(DependType.LAYER_ON_LAYER, lwd.type); - dependManager.satisfyDepend(lwd); + /** + * Test a non-chunked layer depending on a large chunked layer. <1> <1> <2> <3> <4> <5> + */ + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyNonChunkOnLargeChunk() { + + JobDetail job = getJob(); + LayerInterface layer_a = layerDao.findLayer(job, "no_chunk"); + LayerInterface layer_b = layerDao.findLayer(job, "large_chunk"); + + FrameByFrame depend = new FrameByFrame(layer_a, layer_b); + dependManager.createDepend(depend); + + assertTrue(hasDependFrames(layer_a)); + assertEquals(100, getTotalDependSum(layer_a)); + // Optimized to LayerOnLayer + assertEquals(1, getDependRecordCount(layer_a)); + + for (LightweightDependency lwd : dependDao.getWhatDependsOn(layer_b)) { + assertEquals(DependType.LAYER_ON_LAYER, lwd.type); + dependManager.satisfyDepend(lwd); + } + + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependSum(layer_a)); } - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependSum(layer_a)); - } - - /** - * Test a small chunk depending on a non chunk <1> <1> <2> <3> <4> <4> <5> <6> <7> - */ - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfySmallChunkOnNonChunk() { - - JobDetail job = getJob(); - LayerInterface layer_a = layerDao.findLayer(job, "small_chunk"); - LayerInterface layer_b = layerDao.findLayer(job, "no_chunk"); - - FrameByFrame depend = new FrameByFrame(layer_a, layer_b); - dependManager.createDepend(depend); - - assertTrue(hasDependFrames(layer_a)); - assertEquals(100, getTotalDependSum(layer_a)); - assertEquals(101, getDependRecordCount(layer_a)); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - dependManager.satisfyDepend(lwd); - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependSum(layer_a)); - } - - /** - * Test a non chunk depending on a small chunk - * - * <1> <1> <2> <3> <4> <5> <5> <6> <7> <8> <9> <10> - */ - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyNonChunkOnSmallChunk() { - - JobDetail job = getJob(); - LayerInterface layer_a = layerDao.findLayer(job, "no_chunk"); - LayerInterface layer_b = layerDao.findLayer(job, "small_chunk"); - - FrameByFrame depend = new FrameByFrame(layer_a, layer_b); - dependManager.createDepend(depend); - - assertEquals(101, getDependRecordCount(layer_a)); - assertTrue(hasDependFrames(layer_a)); - assertEquals(100, getTotalDependSum(layer_a)); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - dependManager.satisfyDepend(lwd); - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependSum(layer_a)); - } + /** + * Test a large chunked layer depending on a non-chunked layer. <1> <1> <2> <3> <4> <5> + */ + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyLargeChunkOnNonChunk() { + + JobDetail job = getJob(); + LayerInterface layer_a = layerDao.findLayer(job, "large_chunk"); + LayerInterface layer_b = layerDao.findLayer(job, "no_chunk"); + + FrameByFrame depend = new FrameByFrame(layer_a, layer_b); + dependManager.createDepend(depend); + + assertTrue(hasDependFrames(layer_a)); + // Optimized to LayerOnLayer + assertEquals(1, getTotalDependSum(layer_a)); + assertEquals(1, getDependRecordCount(layer_a)); + + for (LightweightDependency lwd : dependDao.getWhatDependsOn(layer_b)) { + assertEquals(DependType.LAYER_ON_LAYER, lwd.type); + dependManager.satisfyDepend(lwd); + } + + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependSum(layer_a)); + } + + /** + * Test a small chunk depending on a non chunk <1> <1> <2> <3> <4> <4> <5> <6> <7> + */ + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfySmallChunkOnNonChunk() { + + JobDetail job = getJob(); + LayerInterface layer_a = layerDao.findLayer(job, "small_chunk"); + LayerInterface layer_b = layerDao.findLayer(job, "no_chunk"); + + FrameByFrame depend = new FrameByFrame(layer_a, layer_b); + dependManager.createDepend(depend); + + assertTrue(hasDependFrames(layer_a)); + assertEquals(100, getTotalDependSum(layer_a)); + assertEquals(101, getDependRecordCount(layer_a)); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + dependManager.satisfyDepend(lwd); + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependSum(layer_a)); + } + + /** + * Test a non chunk depending on a small chunk + * + * <1> <1> <2> <3> <4> <5> <5> <6> <7> <8> <9> <10> + */ + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyNonChunkOnSmallChunk() { + + JobDetail job = getJob(); + LayerInterface layer_a = layerDao.findLayer(job, "no_chunk"); + LayerInterface layer_b = layerDao.findLayer(job, "small_chunk"); + + FrameByFrame depend = new FrameByFrame(layer_a, layer_b); + dependManager.createDepend(depend); + + assertEquals(101, getDependRecordCount(layer_a)); + assertTrue(hasDependFrames(layer_a)); + assertEquals(100, getTotalDependSum(layer_a)); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + dependManager.satisfyDepend(lwd); + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependSum(layer_a)); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/DependManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/DependManagerTests.java index 1b7fd5c54..5d4c25907 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/DependManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/DependManagerTests.java @@ -61,500 +61,500 @@ @ContextConfiguration public class DependManagerTests extends TransactionalTest { - @Resource - DependDao dependDao; - - @Resource - DependManager dependManager; - - @Resource - FrameDao frameDao; - - @Resource - LayerDao layerDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - FrameSearchFactory frameSearchFactory; - - @Before - public void launchTestJobs() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_depend_test.xml")); - } - - public JobDetail getJobA() { - return jobManager.findJobDetail("pipe-dev.cue-testuser_depend_test_a"); - } - - public JobDetail getJobB() { - return jobManager.findJobDetail("pipe-dev.cue-testuser_depend_test_b"); - } - - private int getTotalDependCount(JobInterface job) { - return frameDao.findFrameDetails(frameSearchFactory.create(job)).stream() - .mapToInt(frame -> frame.dependCount).sum(); - } - - private boolean hasDependFrames(JobInterface job) { - FrameSearchInterface search = frameSearchFactory.create(job); - search.filterByFrameStates(ImmutableList.of(FrameState.DEPEND)); - return frameDao.findFrames(search).size() > 0; - } - - private int getTotalDependCount(LayerInterface layer) { - return frameDao.findFrameDetails(frameSearchFactory.create(layer)).stream() - .mapToInt(frame -> frame.dependCount).sum(); - } - - private boolean hasDependFrames(LayerInterface layer) { - FrameSearchInterface search = frameSearchFactory.create(layer); - search.filterByFrameStates(ImmutableList.of(FrameState.DEPEND)); - return frameDao.findFrames(search).size() > 0; - } - - private int getTotalDependCount(FrameInterface frame) { - return frameDao.findFrameDetails(frameSearchFactory.create(frame)).stream() - .mapToInt(frameDetail -> frameDetail.dependCount).sum(); - } - - private boolean hasDependFrames(FrameInterface frame) { - FrameSearchInterface search = frameSearchFactory.create(frame); - search.filterByFrameStates(ImmutableList.of(FrameState.DEPEND)); - return frameDao.findFrames(search).size() > 0; - } - - @Test - @Transactional - @Rollback(true) - public void testUnsatisfyFrameOnFrame() { - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - FrameInterface frame_a = frameDao.findFrame(layer_a, 1); - FrameInterface frame_b = frameDao.findFrame(layer_b, 1); - - FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); - dependManager.createDepend(depend); - - // Check to ensure depend was setup properly. - assertTrue(hasDependFrames(layer_a)); - assertEquals(1, getTotalDependCount(layer_a)); - assertTrue(hasDependFrames(frame_a)); - assertEquals(1, getTotalDependCount(frame_a)); - - LightweightDependency lwd = dependManager.getDepend(depend.getId()); - dependManager.satisfyDepend(lwd); - - // Check to ensure it was satisfied properly. - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); - assertFalse(hasDependFrames(frame_a)); - assertEquals(0, getTotalDependCount(frame_a)); - - // Now unsatisfy it. - dependManager.unsatisfyDepend(lwd); - - assertTrue(hasDependFrames(layer_a)); - assertEquals(1, getTotalDependCount(layer_a)); - assertTrue(hasDependFrames(frame_a)); - assertEquals(1, getTotalDependCount(frame_a)); - } - - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyJobOnJob() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - JobOnJob depend = new JobOnJob(job_a, job_b); - dependManager.createDepend(depend); - - assertTrue(hasDependFrames(job_a)); - assertFalse(hasDependFrames(job_b)); - assertEquals(20, getTotalDependCount(job_a)); - assertEquals(0, getTotalDependCount(job_b)); - - for (LightweightDependency lwd : dependDao.getWhatDependsOn(job_b)) { - dependManager.satisfyDepend(lwd); + @Resource + DependDao dependDao; + + @Resource + DependManager dependManager; + + @Resource + FrameDao frameDao; + + @Resource + LayerDao layerDao; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + FrameSearchFactory frameSearchFactory; + + @Before + public void launchTestJobs() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_depend_test.xml")); + } + + public JobDetail getJobA() { + return jobManager.findJobDetail("pipe-dev.cue-testuser_depend_test_a"); + } + + public JobDetail getJobB() { + return jobManager.findJobDetail("pipe-dev.cue-testuser_depend_test_b"); + } + + private int getTotalDependCount(JobInterface job) { + return frameDao.findFrameDetails(frameSearchFactory.create(job)).stream() + .mapToInt(frame -> frame.dependCount).sum(); + } + + private boolean hasDependFrames(JobInterface job) { + FrameSearchInterface search = frameSearchFactory.create(job); + search.filterByFrameStates(ImmutableList.of(FrameState.DEPEND)); + return frameDao.findFrames(search).size() > 0; + } + + private int getTotalDependCount(LayerInterface layer) { + return frameDao.findFrameDetails(frameSearchFactory.create(layer)).stream() + .mapToInt(frame -> frame.dependCount).sum(); + } + + private boolean hasDependFrames(LayerInterface layer) { + FrameSearchInterface search = frameSearchFactory.create(layer); + search.filterByFrameStates(ImmutableList.of(FrameState.DEPEND)); + return frameDao.findFrames(search).size() > 0; } - assertFalse(hasDependFrames(job_a)); - assertFalse(hasDependFrames(job_b)); - assertEquals(0, getTotalDependCount(job_a)); - assertEquals(0, getTotalDependCount(job_b)); - } + private int getTotalDependCount(FrameInterface frame) { + return frameDao.findFrameDetails(frameSearchFactory.create(frame)).stream() + .mapToInt(frameDetail -> frameDetail.dependCount).sum(); + } + + private boolean hasDependFrames(FrameInterface frame) { + FrameSearchInterface search = frameSearchFactory.create(frame); + search.filterByFrameStates(ImmutableList.of(FrameState.DEPEND)); + return frameDao.findFrames(search).size() > 0; + } + + @Test + @Transactional + @Rollback(true) + public void testUnsatisfyFrameOnFrame() { + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + FrameInterface frame_a = frameDao.findFrame(layer_a, 1); + FrameInterface frame_b = frameDao.findFrame(layer_b, 1); + + FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); + dependManager.createDepend(depend); + + // Check to ensure depend was setup properly. + assertTrue(hasDependFrames(layer_a)); + assertEquals(1, getTotalDependCount(layer_a)); + assertTrue(hasDependFrames(frame_a)); + assertEquals(1, getTotalDependCount(frame_a)); + + LightweightDependency lwd = dependManager.getDepend(depend.getId()); + dependManager.satisfyDepend(lwd); + + // Check to ensure it was satisfied properly. + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); + assertFalse(hasDependFrames(frame_a)); + assertEquals(0, getTotalDependCount(frame_a)); + + // Now unsatisfy it. + dependManager.unsatisfyDepend(lwd); + + assertTrue(hasDependFrames(layer_a)); + assertEquals(1, getTotalDependCount(layer_a)); + assertTrue(hasDependFrames(frame_a)); + assertEquals(1, getTotalDependCount(frame_a)); + } - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyJobOnLayer() { + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyJobOnJob() { - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); - JobOnLayer depend = new JobOnLayer(job_a, layer_b); - dependManager.createDepend(depend); + JobOnJob depend = new JobOnJob(job_a, job_b); + dependManager.createDepend(depend); - assertTrue(hasDependFrames(job_a)); - assertFalse(hasDependFrames(job_b)); - assertEquals(20, getTotalDependCount(job_a)); - assertEquals(0, getTotalDependCount(job_b)); + assertTrue(hasDependFrames(job_a)); + assertFalse(hasDependFrames(job_b)); + assertEquals(20, getTotalDependCount(job_a)); + assertEquals(0, getTotalDependCount(job_b)); - for (LightweightDependency lwd : dependDao.getWhatDependsOn(layer_b)) { - dependManager.satisfyDepend(lwd); + for (LightweightDependency lwd : dependDao.getWhatDependsOn(job_b)) { + dependManager.satisfyDepend(lwd); + } + + assertFalse(hasDependFrames(job_a)); + assertFalse(hasDependFrames(job_b)); + assertEquals(0, getTotalDependCount(job_a)); + assertEquals(0, getTotalDependCount(job_b)); } - assertFalse(hasDependFrames(job_a)); - assertFalse(hasDependFrames(job_b)); - assertEquals(0, getTotalDependCount(job_a)); - assertEquals(0, getTotalDependCount(job_b)); - } + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyJobOnLayer() { - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyJobOnFrame() { + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - FrameInterface frame_b = frameDao.findFrame(layer_b, 1); + JobOnLayer depend = new JobOnLayer(job_a, layer_b); + dependManager.createDepend(depend); - JobOnFrame depend = new JobOnFrame(job_a, frame_b); - dependManager.createDepend(depend); + assertTrue(hasDependFrames(job_a)); + assertFalse(hasDependFrames(job_b)); + assertEquals(20, getTotalDependCount(job_a)); + assertEquals(0, getTotalDependCount(job_b)); - assertTrue(hasDependFrames(job_a)); - assertFalse(hasDependFrames(job_b)); - assertEquals(20, getTotalDependCount(job_a)); - assertEquals(0, getTotalDependCount(job_b)); + for (LightweightDependency lwd : dependDao.getWhatDependsOn(layer_b)) { + dependManager.satisfyDepend(lwd); + } - for (LightweightDependency lwd : dependDao.getWhatDependsOn(frame_b)) { - dependManager.satisfyDepend(lwd); + assertFalse(hasDependFrames(job_a)); + assertFalse(hasDependFrames(job_b)); + assertEquals(0, getTotalDependCount(job_a)); + assertEquals(0, getTotalDependCount(job_b)); } - assertFalse(hasDependFrames(job_a)); - assertFalse(hasDependFrames(job_b)); - assertEquals(0, getTotalDependCount(job_a)); - assertEquals(0, getTotalDependCount(job_b)); - } + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyJobOnFrame() { - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyLayerOnJob() { + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + FrameInterface frame_b = frameDao.findFrame(layer_b, 1); - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + JobOnFrame depend = new JobOnFrame(job_a, frame_b); + dependManager.createDepend(depend); - LayerOnJob depend = new LayerOnJob(layer_a, job_b); - dependManager.createDepend(depend); + assertTrue(hasDependFrames(job_a)); + assertFalse(hasDependFrames(job_b)); + assertEquals(20, getTotalDependCount(job_a)); + assertEquals(0, getTotalDependCount(job_b)); - assertTrue(hasDependFrames(layer_a)); - assertEquals(10, getTotalDependCount(layer_a)); + for (LightweightDependency lwd : dependDao.getWhatDependsOn(frame_b)) { + dependManager.satisfyDepend(lwd); + } - for (LightweightDependency lwd : dependDao.getWhatDependsOn(job_b)) { - dependManager.satisfyDepend(lwd); + assertFalse(hasDependFrames(job_a)); + assertFalse(hasDependFrames(job_b)); + assertEquals(0, getTotalDependCount(job_a)); + assertEquals(0, getTotalDependCount(job_b)); } - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); - } + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyLayerOnJob() { - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyLayerOnLayer() { + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + LayerOnJob depend = new LayerOnJob(layer_a, job_b); + dependManager.createDepend(depend); - LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); - dependManager.createDepend(depend); + assertTrue(hasDependFrames(layer_a)); + assertEquals(10, getTotalDependCount(layer_a)); - assertTrue(hasDependFrames(layer_a)); - assertEquals(10, getTotalDependCount(layer_a)); + for (LightweightDependency lwd : dependDao.getWhatDependsOn(job_b)) { + dependManager.satisfyDepend(lwd); + } - for (LightweightDependency lwd : dependDao.getWhatDependsOn(layer_b)) { - dependManager.satisfyDepend(lwd); + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); } - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); - } + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyLayerOnLayer() { - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyLayerOnFrame() { + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - FrameInterface frame_b = frameDao.findFrame(layer_b, 1); + LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); + dependManager.createDepend(depend); - LayerOnFrame depend = new LayerOnFrame(layer_a, frame_b); - dependManager.createDepend(depend); + assertTrue(hasDependFrames(layer_a)); + assertEquals(10, getTotalDependCount(layer_a)); - assertTrue(hasDependFrames(layer_a)); - assertEquals(10, getTotalDependCount(layer_a)); + for (LightweightDependency lwd : dependDao.getWhatDependsOn(layer_b)) { + dependManager.satisfyDepend(lwd); + } - for (LightweightDependency lwd : dependDao.getWhatDependsOn(frame_b)) { - dependManager.satisfyDepend(lwd); + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); } - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); - } + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyLayerOnFrame() { - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyLayerOnSimFrame() { + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + FrameInterface frame_b = frameDao.findFrame(layer_b, 1); - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - FrameInterface frame_b = frameDao.findFrame(layer_b, 1); + LayerOnFrame depend = new LayerOnFrame(layer_a, frame_b); + dependManager.createDepend(depend); - LayerOnSimFrame depend = new LayerOnSimFrame(layer_a, frame_b); - dependManager.createDepend(depend); + assertTrue(hasDependFrames(layer_a)); + assertEquals(10, getTotalDependCount(layer_a)); - assertTrue(hasDependFrames(layer_a)); - assertEquals(10, getTotalDependCount(layer_a)); + for (LightweightDependency lwd : dependDao.getWhatDependsOn(frame_b)) { + dependManager.satisfyDepend(lwd); + } - for (LightweightDependency lwd : dependDao.getWhatDependsOn(frame_b)) { - dependManager.satisfyDepend(lwd); + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); } - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); - } + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyLayerOnSimFrame() { - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyFrameOnJob() { + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + FrameInterface frame_b = frameDao.findFrame(layer_b, 1); - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - FrameInterface frame_a = frameDao.findFrame(layer_a, 1); + LayerOnSimFrame depend = new LayerOnSimFrame(layer_a, frame_b); + dependManager.createDepend(depend); - FrameOnJob depend = new FrameOnJob(frame_a, job_b); - dependManager.createDepend(depend); + assertTrue(hasDependFrames(layer_a)); + assertEquals(10, getTotalDependCount(layer_a)); - assertTrue(hasDependFrames(layer_a)); - assertEquals(1, getTotalDependCount(layer_a)); - assertTrue(hasDependFrames(frame_a)); - assertEquals(1, getTotalDependCount(frame_a)); + for (LightweightDependency lwd : dependDao.getWhatDependsOn(frame_b)) { + dependManager.satisfyDepend(lwd); + } - for (LightweightDependency lwd : dependDao.getWhatDependsOn(job_b)) { - dependManager.satisfyDepend(lwd); + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); } - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); - assertFalse(hasDependFrames(frame_a)); - assertEquals(0, getTotalDependCount(frame_a)); - } - - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyFrameOnLayer() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - FrameInterface frame_a = frameDao.findFrame(layer_a, 1); - - FrameOnLayer depend = new FrameOnLayer(frame_a, layer_b); - dependManager.createDepend(depend); - - assertTrue(hasDependFrames(layer_a)); - assertEquals(1, getTotalDependCount(layer_a)); - assertTrue(hasDependFrames(frame_a)); - assertEquals(1, getTotalDependCount(frame_a)); - - for (LightweightDependency lwd : dependDao.getWhatDependsOn(layer_b)) { - dependManager.satisfyDepend(lwd); + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyFrameOnJob() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + FrameInterface frame_a = frameDao.findFrame(layer_a, 1); + + FrameOnJob depend = new FrameOnJob(frame_a, job_b); + dependManager.createDepend(depend); + + assertTrue(hasDependFrames(layer_a)); + assertEquals(1, getTotalDependCount(layer_a)); + assertTrue(hasDependFrames(frame_a)); + assertEquals(1, getTotalDependCount(frame_a)); + + for (LightweightDependency lwd : dependDao.getWhatDependsOn(job_b)) { + dependManager.satisfyDepend(lwd); + } + + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); + assertFalse(hasDependFrames(frame_a)); + assertEquals(0, getTotalDependCount(frame_a)); } - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); - assertFalse(hasDependFrames(frame_a)); - assertEquals(0, getTotalDependCount(frame_a)); - } - - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyFrameOnFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - FrameInterface frame_a = frameDao.findFrame(layer_a, 1); - FrameInterface frame_b = frameDao.findFrame(layer_b, 1); - - FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); - dependManager.createDepend(depend); - - assertTrue(hasDependFrames(layer_a)); - assertEquals(1, getTotalDependCount(layer_a)); - assertTrue(hasDependFrames(frame_a)); - assertEquals(1, getTotalDependCount(frame_a)); - - for (LightweightDependency lwd : dependDao.getWhatDependsOn(frame_b)) { - dependManager.satisfyDepend(lwd); + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyFrameOnLayer() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + FrameInterface frame_a = frameDao.findFrame(layer_a, 1); + + FrameOnLayer depend = new FrameOnLayer(frame_a, layer_b); + dependManager.createDepend(depend); + + assertTrue(hasDependFrames(layer_a)); + assertEquals(1, getTotalDependCount(layer_a)); + assertTrue(hasDependFrames(frame_a)); + assertEquals(1, getTotalDependCount(frame_a)); + + for (LightweightDependency lwd : dependDao.getWhatDependsOn(layer_b)) { + dependManager.satisfyDepend(lwd); + } + + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); + assertFalse(hasDependFrames(frame_a)); + assertEquals(0, getTotalDependCount(frame_a)); } - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); - assertFalse(hasDependFrames(frame_a)); - assertEquals(0, getTotalDependCount(frame_a)); - } + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyFrameOnFrame() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + FrameInterface frame_a = frameDao.findFrame(layer_a, 1); + FrameInterface frame_b = frameDao.findFrame(layer_b, 1); + + FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); + dependManager.createDepend(depend); + + assertTrue(hasDependFrames(layer_a)); + assertEquals(1, getTotalDependCount(layer_a)); + assertTrue(hasDependFrames(frame_a)); + assertEquals(1, getTotalDependCount(frame_a)); + + for (LightweightDependency lwd : dependDao.getWhatDependsOn(frame_b)) { + dependManager.satisfyDepend(lwd); + } + + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); + assertFalse(hasDependFrames(frame_a)); + assertEquals(0, getTotalDependCount(frame_a)); + } - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyFrameByFrame() { + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyFrameByFrame() { - /** - * A compound depend, like FrameByFrame or PreviousFrame cannot be satisfied by using - * dependDao.getWhatDependsOn. You must have a reference to the actual dependency. - */ + /** + * A compound depend, like FrameByFrame or PreviousFrame cannot be satisfied by using + * dependDao.getWhatDependsOn. You must have a reference to the actual dependency. + */ - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - FrameByFrame depend = new FrameByFrame(layer_a, layer_b); - dependManager.createDepend(depend); + FrameByFrame depend = new FrameByFrame(layer_a, layer_b); + dependManager.createDepend(depend); - assertTrue(hasDependFrames(layer_a)); - assertEquals(10, getTotalDependCount(layer_a)); + assertTrue(hasDependFrames(layer_a)); + assertEquals(10, getTotalDependCount(layer_a)); - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - dependManager.satisfyDepend(lwd); + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + dependManager.satisfyDepend(lwd); - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); - } + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); + } - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyLayerOnLayerAnyFrame() { + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyLayerOnLayerAnyFrame() { - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); - depend.setAnyFrame(true); - dependManager.createDepend(depend); + LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); + depend.setAnyFrame(true); + dependManager.createDepend(depend); - assertTrue(hasDependFrames(layer_a)); - assertEquals(10, getTotalDependCount(layer_a)); + assertTrue(hasDependFrames(layer_a)); + assertEquals(10, getTotalDependCount(layer_a)); - FrameInterface frame_b = frameDao.findFrame(layer_b, 5); + FrameInterface frame_b = frameDao.findFrame(layer_b, 5); - for (LightweightDependency lwd : dependDao.getWhatDependsOn(frame_b)) { - dependManager.satisfyDepend(lwd); + for (LightweightDependency lwd : dependDao.getWhatDependsOn(frame_b)) { + dependManager.satisfyDepend(lwd); + } + + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); } - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); - } + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyPreviousFrame() { - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyPreviousFrame() { + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + PreviousFrame depend = new PreviousFrame(layer_a, layer_b); + dependManager.createDepend(depend); - PreviousFrame depend = new PreviousFrame(layer_a, layer_b); - dependManager.createDepend(depend); + assertTrue(hasDependFrames(layer_a)); + assertEquals(9, getTotalDependCount(layer_a)); - assertTrue(hasDependFrames(layer_a)); - assertEquals(9, getTotalDependCount(layer_a)); + FrameInterface frame_b = frameDao.findFrame(layer_b, 9); + for (LightweightDependency lwd : dependDao.getWhatDependsOn(frame_b)) { + dependManager.satisfyDepend(lwd); + for (FrameDetail f : frameDao.findFrameDetails(frameSearchFactory.create(layer_a))) { + logger.info(f.getName() + " " + f.state.toString()); + } + } - FrameInterface frame_b = frameDao.findFrame(layer_b, 9); - for (LightweightDependency lwd : dependDao.getWhatDependsOn(frame_b)) { - dependManager.satisfyDepend(lwd); - for (FrameDetail f : frameDao.findFrameDetails(frameSearchFactory.create(layer_a))) { - logger.info(f.getName() + " " + f.state.toString()); - } + assertTrue(hasDependFrames(layer_a)); + assertEquals(8, getTotalDependCount(layer_a)); } - assertTrue(hasDependFrames(layer_a)); - assertEquals(8, getTotalDependCount(layer_a)); - } - - /** - * In this test, some of the dependOnFrames are already completed. The FrameOnFrame depends that - * get setup on those frames should be inactive, and the depend count should not be updated the - * corresponding dependErFrames. - */ - @Test - @Transactional - @Rollback(true) - public void testCreateAndSatisfyFrameByFrameParital() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - - FrameSearchInterface search = frameSearchFactory.create(layer_b); - search.filterByFrameSet("1-3"); - frameDao.findFrames(search) - .forEach(frame -> frameDao.updateFrameState(frame, FrameState.SUCCEEDED)); - - FrameByFrame depend = new FrameByFrame(layer_a, layer_b); - dependManager.createDepend(depend); - - /** Check the active state **/ - assertTrue(dependDao.getWhatDependsOn(frameDao.findFrame(layer_b, 1)).stream() - .noneMatch(dep -> dep.active)); - assertTrue(dependDao.getWhatDependsOn(frameDao.findFrame(layer_b, 2)).stream() - .noneMatch(dep -> dep.active)); - assertTrue(dependDao.getWhatDependsOn(frameDao.findFrame(layer_b, 3)).stream() - .noneMatch(dep -> dep.active)); - assertTrue(dependDao.getWhatDependsOn(frameDao.findFrame(layer_b, 4)).stream() - .allMatch(dep -> dep.active)); - - assertTrue(hasDependFrames(layer_a)); - assertEquals(7, getTotalDependCount(layer_a)); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - dependManager.satisfyDepend(lwd); - - assertFalse(hasDependFrames(layer_a)); - assertEquals(0, getTotalDependCount(layer_a)); - } + /** + * In this test, some of the dependOnFrames are already completed. The FrameOnFrame depends that + * get setup on those frames should be inactive, and the depend count should not be updated the + * corresponding dependErFrames. + */ + @Test + @Transactional + @Rollback(true) + public void testCreateAndSatisfyFrameByFrameParital() { + + JobDetail job_a = getJobA(); + JobDetail job_b = getJobB(); + LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); + LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); + + FrameSearchInterface search = frameSearchFactory.create(layer_b); + search.filterByFrameSet("1-3"); + frameDao.findFrames(search) + .forEach(frame -> frameDao.updateFrameState(frame, FrameState.SUCCEEDED)); + + FrameByFrame depend = new FrameByFrame(layer_a, layer_b); + dependManager.createDepend(depend); + + /** Check the active state **/ + assertTrue(dependDao.getWhatDependsOn(frameDao.findFrame(layer_b, 1)).stream() + .noneMatch(dep -> dep.active)); + assertTrue(dependDao.getWhatDependsOn(frameDao.findFrame(layer_b, 2)).stream() + .noneMatch(dep -> dep.active)); + assertTrue(dependDao.getWhatDependsOn(frameDao.findFrame(layer_b, 3)).stream() + .noneMatch(dep -> dep.active)); + assertTrue(dependDao.getWhatDependsOn(frameDao.findFrame(layer_b, 4)).stream() + .allMatch(dep -> dep.active)); + + assertTrue(hasDependFrames(layer_a)); + assertEquals(7, getTotalDependCount(layer_a)); + + LightweightDependency lwd = dependDao.getDepend(depend.getId()); + dependManager.satisfyDepend(lwd); + + assertFalse(hasDependFrames(layer_a)); + assertEquals(0, getTotalDependCount(layer_a)); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/EmailSupportTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/EmailSupportTests.java index 17df2e023..f0eedab9e 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/EmailSupportTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/EmailSupportTests.java @@ -44,75 +44,75 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class EmailSupportTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - EmailSupport emailSupport; + @Resource + EmailSupport emailSupport; - @Resource - JobDao jobDao; + @Resource + JobDao jobDao; - @Resource - FrameDao frameDao; + @Resource + FrameDao frameDao; - @Resource - DependDao dependDao; + @Resource + DependDao dependDao; - @Resource - LayerDao layerDao; + @Resource + LayerDao layerDao; - @Resource - DependManager dependManager; + @Resource + DependManager dependManager; - @Resource - FrameSearchFactory frameSearchFactory; + @Resource + FrameSearchFactory frameSearchFactory; - @Before - public void setTestMode() { - jobLauncher.testMode = true; - } + @Before + public void setTestMode() { + jobLauncher.testMode = true; + } - @Test - @Transactional - @Rollback(true) - public void testJobCompleteEmailSuccess() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - jobLauncher.launch(spec); + @Test + @Transactional + @Rollback(true) + public void testJobCompleteEmailSuccess() { + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); + jobLauncher.launch(spec); - JobDetail job = spec.getJobs().get(0).detail; + JobDetail job = spec.getJobs().get(0).detail; - jobDao.updateEmail(job, System.getProperty("user.name")); + jobDao.updateEmail(job, System.getProperty("user.name")); - // Satisfy all dependencies, this will allow us to mark frames as complete. - layerDao.getLayers(job) - .forEach(layer -> dependDao.getWhatThisDependsOn(layer, DependTarget.ANY_TARGET) - .forEach(dep -> dependManager.satisfyDepend(dep))); + // Satisfy all dependencies, this will allow us to mark frames as complete. + layerDao.getLayers(job) + .forEach(layer -> dependDao.getWhatThisDependsOn(layer, DependTarget.ANY_TARGET) + .forEach(dep -> dependManager.satisfyDepend(dep))); - frameDao.findFrames(frameSearchFactory.create(job)).forEach(frame -> frameDao - .updateFrameState(frameDao.getFrame(frame.getFrameId()), FrameState.SUCCEEDED)); + frameDao.findFrames(frameSearchFactory.create(job)).forEach(frame -> frameDao + .updateFrameState(frameDao.getFrame(frame.getFrameId()), FrameState.SUCCEEDED)); - emailSupport.sendShutdownEmail(job); - } + emailSupport.sendShutdownEmail(job); + } - @Test - @Transactional - @Rollback(true) - public void testJobCompleteEmailFail() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - jobLauncher.launch(spec); + @Test + @Transactional + @Rollback(true) + public void testJobCompleteEmailFail() { + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); + jobLauncher.launch(spec); - JobDetail job = spec.getJobs().get(0).detail; + JobDetail job = spec.getJobs().get(0).detail; - jobDao.updateEmail(job, System.getProperty("user.name")); + jobDao.updateEmail(job, System.getProperty("user.name")); - layerDao.getLayers(job) - .forEach(layer -> dependDao.getWhatThisDependsOn(layer, DependTarget.ANY_TARGET) - .forEach(dep -> dependManager.satisfyDepend(dep))); + layerDao.getLayers(job) + .forEach(layer -> dependDao.getWhatThisDependsOn(layer, DependTarget.ANY_TARGET) + .forEach(dep -> dependManager.satisfyDepend(dep))); - frameDao.findFrames(frameSearchFactory.create(job)).forEach( - frame -> frameDao.updateFrameState(frameDao.getFrame(frame.getFrameId()), FrameState.DEAD)); + frameDao.findFrames(frameSearchFactory.create(job)).forEach(frame -> frameDao + .updateFrameState(frameDao.getFrame(frame.getFrameId()), FrameState.DEAD)); - emailSupport.sendShutdownEmail(job); - } + emailSupport.sendShutdownEmail(job); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/FilterManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/FilterManagerTests.java index 5443e9171..c1e111dd7 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/FilterManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/FilterManagerTests.java @@ -63,332 +63,332 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class FilterManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - FilterDao filterDao; + @Resource + FilterDao filterDao; - @Resource - ShowDao showDao; + @Resource + ShowDao showDao; - @Resource - DepartmentDao departmentDao; + @Resource + DepartmentDao departmentDao; - @Resource - GroupManager groupManager; - - @Resource - JobManager jobManager; + @Resource + GroupManager groupManager; + + @Resource + JobManager jobManager; - @Resource - FilterManager filterManager; + @Resource + FilterManager filterManager; - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - JobDao jobDao; + @Resource + JobDao jobDao; - @Resource - LayerDao layerDao; + @Resource + LayerDao layerDao; - @Resource - GroupDao groupDao; - - @Resource - Whiteboard whiteboard; - - private static String FILTER_NAME = "test_filter"; - - @Before - public void setTestMode() { - jobLauncher.testMode = true; - } - - public ShowInterface getShow() { - return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); - } - - public FilterEntity buildFilter() { - FilterEntity filter = new FilterEntity(); - filter.name = FILTER_NAME; - filter.showId = "00000000-0000-0000-0000-000000000000"; - filter.type = FilterType.MATCH_ANY; - filter.enabled = true; - - return filter; - } - - @Test - @Transactional - @Rollback(true) - public void testShotEndsWith() { - - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - MatcherEntity m = new MatcherEntity(); - m.filterId = f.getFilterId(); - m.name = "match end of shot"; - m.subject = MatchSubject.SHOT; - m.type = MatchType.ENDS_WITH; - m.value = ".cue"; - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - - assertTrue(filterManager.isMatch(m, job)); - m.value = "layout"; - assertFalse(filterManager.isMatch(m, job)); - } - - @Test - @Transactional - @Rollback(true) - public void testLayerNameContains() { - - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - MatcherEntity m = new MatcherEntity(); - m.filterId = f.getFilterId(); - m.name = "layer name contains"; - m.subject = MatchSubject.LAYER_NAME; - m.type = MatchType.CONTAINS; - m.value = "pass_1"; - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - - assertTrue(filterManager.isMatch(m, job)); - m.value = "pass_11111"; - assertFalse(filterManager.isMatch(m, job)); - } - - @Test - @Transactional - @Rollback(true) - public void testApplyActionPauseJob() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.PAUSE_JOB; - a1.filterId = f.getFilterId(); - a1.valueType = ActionValueType.BOOLEAN_TYPE; - a1.booleanValue = true; - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - filterManager.applyAction(a1, job); - - assertTrue(jobDao.getJobDetail(job.getJobId()).isPaused); - - a1.booleanValue = false; - filterManager.applyAction(a1, job); - assertFalse(jobDao.getJobDetail(job.getJobId()).isPaused); - } - - @Test - @Transactional - @Rollback(true) - public void testApplyActionSetMemoryOptimizer() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.SET_MEMORY_OPTIMIZER; - a1.filterId = f.getFilterId(); - a1.valueType = ActionValueType.BOOLEAN_TYPE; - a1.booleanValue = false; - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - filterManager.applyAction(a1, job); - - assertTrue(whiteboard.getLayers(job).getLayersList().stream() - .noneMatch(Layer::getMemoryOptimizerEnabled)); - - a1.booleanValue = true; - filterManager.applyAction(a1, job); - assertTrue(whiteboard.getLayers(job).getLayersList().stream() - .allMatch(Layer::getMemoryOptimizerEnabled)); - } - - @Test - @Transactional - @Rollback(true) - public void testApplyActionSetMinCores() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.SET_JOB_MIN_CORES; - a1.filterId = f.getFilterId(); - a1.valueType = ActionValueType.FLOAT_TYPE; - a1.floatValue = 10f; - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - filterManager.applyAction(a1, job); - - assertEquals(Convert.coresToCoreUnits(a1.floatValue), - jobDao.getJobDetail(job.getJobId()).minCoreUnits, 0); - - a1.floatValue = 100f; - filterManager.applyAction(a1, job); - assertEquals(Convert.coresToCoreUnits(a1.floatValue), - jobDao.getJobDetail(job.getJobId()).minCoreUnits, 0); - } - - @Test - @Transactional - @Rollback(true) - public void testApplyActionSetMaxCores() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.SET_JOB_MAX_CORES; - a1.filterId = f.getFilterId(); - a1.valueType = ActionValueType.FLOAT_TYPE; - a1.floatValue = 10f; - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - filterManager.applyAction(a1, job); - - assertEquals(Convert.coresToCoreUnits(a1.floatValue), - jobDao.getJobDetail(job.getJobId()).maxCoreUnits, 0); - - a1.intValue = 100; - filterManager.applyAction(a1, job); - assertEquals(Convert.coresToCoreUnits(a1.floatValue), - jobDao.getJobDetail(job.getJobId()).maxCoreUnits, 0); - } - - @Test - @Transactional - @Rollback(true) - public void testApplyActionSetPriority() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.SET_JOB_PRIORITY; - a1.filterId = f.getFilterId(); - a1.valueType = ActionValueType.INTEGER_TYPE; - a1.intValue = 100; - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - filterManager.applyAction(a1, job); - - assertEquals(a1.intValue, jobDao.getJobDetail(job.getJobId()).priority); - - a1.intValue = 1001; - filterManager.applyAction(a1, job); - assertEquals(a1.intValue, jobDao.getJobDetail(job.getJobId()).priority); - } - - @Test - @Transactional - @Rollback(true) - public void testApplyActionMoveToGroup() { - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - GroupDetail g = new GroupDetail(); - g.name = "Testest"; - g.showId = job.getShowId(); - g.deptId = departmentDao.getDefaultDepartment().getId(); - - groupManager.createGroup(g, groupManager.getRootGroupDetail(job)); - - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.MOVE_JOB_TO_GROUP; - a1.filterId = f.getFilterId(); - a1.valueType = ActionValueType.GROUP_TYPE; - a1.groupValue = g.id; - - filterManager.applyAction(a1, job); - - assertEquals(g.id, jobDao.getJobDetail(job.getJobId()).groupId); - - assertEquals(groupDao.getGroupDetail(a1.groupValue).deptId, - jobDao.getJobDetail(job.getJobId()).deptId); - } - - @Test - @Transactional - @Rollback(true) - public void testApplyActionSetRenderCoreLayers() { + @Resource + GroupDao groupDao; + + @Resource + Whiteboard whiteboard; + + private static String FILTER_NAME = "test_filter"; + + @Before + public void setTestMode() { + jobLauncher.testMode = true; + } + + public ShowInterface getShow() { + return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); + } + + public FilterEntity buildFilter() { + FilterEntity filter = new FilterEntity(); + filter.name = FILTER_NAME; + filter.showId = "00000000-0000-0000-0000-000000000000"; + filter.type = FilterType.MATCH_ANY; + filter.enabled = true; + + return filter; + } + + @Test + @Transactional + @Rollback(true) + public void testShotEndsWith() { + + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + MatcherEntity m = new MatcherEntity(); + m.filterId = f.getFilterId(); + m.name = "match end of shot"; + m.subject = MatchSubject.SHOT; + m.type = MatchType.ENDS_WITH; + m.value = ".cue"; + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + + assertTrue(filterManager.isMatch(m, job)); + m.value = "layout"; + assertFalse(filterManager.isMatch(m, job)); + } + + @Test + @Transactional + @Rollback(true) + public void testLayerNameContains() { + + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + MatcherEntity m = new MatcherEntity(); + m.filterId = f.getFilterId(); + m.name = "layer name contains"; + m.subject = MatchSubject.LAYER_NAME; + m.type = MatchType.CONTAINS; + m.value = "pass_1"; + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + + assertTrue(filterManager.isMatch(m, job)); + m.value = "pass_11111"; + assertFalse(filterManager.isMatch(m, job)); + } + + @Test + @Transactional + @Rollback(true) + public void testApplyActionPauseJob() { + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.PAUSE_JOB; + a1.filterId = f.getFilterId(); + a1.valueType = ActionValueType.BOOLEAN_TYPE; + a1.booleanValue = true; + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + filterManager.applyAction(a1, job); + + assertTrue(jobDao.getJobDetail(job.getJobId()).isPaused); + + a1.booleanValue = false; + filterManager.applyAction(a1, job); + assertFalse(jobDao.getJobDetail(job.getJobId()).isPaused); + } + + @Test + @Transactional + @Rollback(true) + public void testApplyActionSetMemoryOptimizer() { + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.SET_MEMORY_OPTIMIZER; + a1.filterId = f.getFilterId(); + a1.valueType = ActionValueType.BOOLEAN_TYPE; + a1.booleanValue = false; + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + filterManager.applyAction(a1, job); + + assertTrue(whiteboard.getLayers(job).getLayersList().stream() + .noneMatch(Layer::getMemoryOptimizerEnabled)); + + a1.booleanValue = true; + filterManager.applyAction(a1, job); + assertTrue(whiteboard.getLayers(job).getLayersList().stream() + .allMatch(Layer::getMemoryOptimizerEnabled)); + } + + @Test + @Transactional + @Rollback(true) + public void testApplyActionSetMinCores() { + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.SET_JOB_MIN_CORES; + a1.filterId = f.getFilterId(); + a1.valueType = ActionValueType.FLOAT_TYPE; + a1.floatValue = 10f; + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + filterManager.applyAction(a1, job); + + assertEquals(Convert.coresToCoreUnits(a1.floatValue), + jobDao.getJobDetail(job.getJobId()).minCoreUnits, 0); + + a1.floatValue = 100f; + filterManager.applyAction(a1, job); + assertEquals(Convert.coresToCoreUnits(a1.floatValue), + jobDao.getJobDetail(job.getJobId()).minCoreUnits, 0); + } + + @Test + @Transactional + @Rollback(true) + public void testApplyActionSetMaxCores() { + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.SET_JOB_MAX_CORES; + a1.filterId = f.getFilterId(); + a1.valueType = ActionValueType.FLOAT_TYPE; + a1.floatValue = 10f; + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + filterManager.applyAction(a1, job); + + assertEquals(Convert.coresToCoreUnits(a1.floatValue), + jobDao.getJobDetail(job.getJobId()).maxCoreUnits, 0); + + a1.intValue = 100; + filterManager.applyAction(a1, job); + assertEquals(Convert.coresToCoreUnits(a1.floatValue), + jobDao.getJobDetail(job.getJobId()).maxCoreUnits, 0); + } + + @Test + @Transactional + @Rollback(true) + public void testApplyActionSetPriority() { + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.SET_JOB_PRIORITY; + a1.filterId = f.getFilterId(); + a1.valueType = ActionValueType.INTEGER_TYPE; + a1.intValue = 100; + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + filterManager.applyAction(a1, job); + + assertEquals(a1.intValue, jobDao.getJobDetail(job.getJobId()).priority); + + a1.intValue = 1001; + filterManager.applyAction(a1, job); + assertEquals(a1.intValue, jobDao.getJobDetail(job.getJobId()).priority); + } + + @Test + @Transactional + @Rollback(true) + public void testApplyActionMoveToGroup() { + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + GroupDetail g = new GroupDetail(); + g.name = "Testest"; + g.showId = job.getShowId(); + g.deptId = departmentDao.getDefaultDepartment().getId(); + + groupManager.createGroup(g, groupManager.getRootGroupDetail(job)); + + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.MOVE_JOB_TO_GROUP; + a1.filterId = f.getFilterId(); + a1.valueType = ActionValueType.GROUP_TYPE; + a1.groupValue = g.id; + + filterManager.applyAction(a1, job); + + assertEquals(g.id, jobDao.getJobDetail(job.getJobId()).groupId); + + assertEquals(groupDao.getGroupDetail(a1.groupValue).deptId, + jobDao.getJobDetail(job.getJobId()).deptId); + } + + @Test + @Transactional + @Rollback(true) + public void testApplyActionSetRenderCoreLayers() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.SET_ALL_RENDER_LAYER_MIN_CORES; - a1.filterId = f.getFilterId(); - a1.valueType = ActionValueType.FLOAT_TYPE; - a1.floatValue = 40f; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); + + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.SET_ALL_RENDER_LAYER_MIN_CORES; + a1.filterId = f.getFilterId(); + a1.valueType = ActionValueType.FLOAT_TYPE; + a1.floatValue = 40f; - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - filterManager.applyAction(a1, job); - - assertEquals(Convert.coresToCoreUnits(a1.floatValue), - layerDao.findLayerDetail(job, "pass_1").minimumCores, 0); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + filterManager.applyAction(a1, job); + + assertEquals(Convert.coresToCoreUnits(a1.floatValue), + layerDao.findLayerDetail(job, "pass_1").minimumCores, 0); - assertEquals(Convert.coresToCoreUnits(.25f), - layerDao.findLayerDetail(job, "pass_1_preprocess").minimumCores, 0); - } + assertEquals(Convert.coresToCoreUnits(.25f), + layerDao.findLayerDetail(job, "pass_1_preprocess").minimumCores, 0); + } - @Test - @Transactional - @Rollback(true) - public void testApplyActionSetRenderLayerMemory() { + @Test + @Transactional + @Rollback(true) + public void testApplyActionSetRenderLayerMemory() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.SET_ALL_RENDER_LAYER_MEMORY; - a1.filterId = f.getFilterId(); - a1.valueType = ActionValueType.INTEGER_TYPE; - a1.intValue = CueUtil.GB8; + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.SET_ALL_RENDER_LAYER_MEMORY; + a1.filterId = f.getFilterId(); + a1.valueType = ActionValueType.INTEGER_TYPE; + a1.intValue = CueUtil.GB8; - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - filterManager.applyAction(a1, job); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + filterManager.applyAction(a1, job); - assertEquals(CueUtil.GB8, layerDao.findLayerDetail(job, "pass_1").minimumMemory); - } + assertEquals(CueUtil.GB8, layerDao.findLayerDetail(job, "pass_1").minimumMemory); + } - @Test - @Transactional - @Rollback(true) - public void testApplyActionSetAllRenderLayerTags() { + @Test + @Transactional + @Rollback(true) + public void testApplyActionSetAllRenderLayerTags() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); + FilterEntity f = buildFilter(); + filterDao.insertFilter(f); - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.SET_ALL_RENDER_LAYER_TAGS; - a1.filterId = f.getFilterId(); - a1.valueType = ActionValueType.STRING_TYPE; - a1.stringValue = "blah"; + ActionEntity a1 = new ActionEntity(); + a1.type = ActionType.SET_ALL_RENDER_LAYER_TAGS; + a1.filterId = f.getFilterId(); + a1.valueType = ActionValueType.STRING_TYPE; + a1.stringValue = "blah"; - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - filterManager.applyAction(a1, job); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + filterManager.applyAction(a1, job); - assertThat(layerDao.findLayerDetail(job, "pass_1").tags, contains("blah")); - assertThat(layerDao.findLayerDetail(job, "pass_1_preprocess").tags, contains("general")); - } + assertThat(layerDao.findLayerDetail(job, "pass_1").tags, contains("blah")); + assertThat(layerDao.findLayerDetail(job, "pass_1_preprocess").tags, contains("general")); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/GroupManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/GroupManagerTests.java index d99e0cc98..d47d5e4b0 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/GroupManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/GroupManagerTests.java @@ -46,67 +46,67 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class GroupManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - GroupManager groupManager; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - GroupDao groupDao; - - @Resource - JobDao jobDao; - - @Resource - DepartmentDao departmentDao; - - @Resource - ShowDao showDao; - - @Before - public void setTestMode() { - jobLauncher.testMode = true; - } - - @Test - @Transactional - @Rollback(true) - public void createGroup() { - ShowInterface pipe = showDao.findShowDetail("pipe"); - GroupDetail group = new GroupDetail(); - group.name = "testGroup"; - group.showId = pipe.getId(); - group.parentId = groupDao.getRootGroupDetail(pipe).getId(); - group.deptId = departmentDao.getDefaultDepartment().getId(); - groupManager.createGroup(group, null); - } - - @Test - @Transactional - @Rollback(true) - public void setGroupDepartment() { - ShowInterface pipe = showDao.findShowDetail("pipe"); - GroupDetail group = groupDao.getRootGroupDetail(pipe); - - // Launch a test job - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - jobLauncher.launch(spec); - JobInterface job = jobManager.getJob(spec.getJobs().get(0).detail.id); - - // Set the group's department property to Lighting, it should - // currently be Unknown - DepartmentInterface dept = departmentDao.findDepartment("Lighting"); - jobDao.updateParent(job, group); - - // Update the group to the Lighting department - groupManager.setGroupDepartment(group, dept); - - // Now check if the job we launched was also updated to the lighting department - assertEquals(dept.getDepartmentId(), jobDao.getJobDetail(job.getJobId()).deptId); - } + @Resource + GroupManager groupManager; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + GroupDao groupDao; + + @Resource + JobDao jobDao; + + @Resource + DepartmentDao departmentDao; + + @Resource + ShowDao showDao; + + @Before + public void setTestMode() { + jobLauncher.testMode = true; + } + + @Test + @Transactional + @Rollback(true) + public void createGroup() { + ShowInterface pipe = showDao.findShowDetail("pipe"); + GroupDetail group = new GroupDetail(); + group.name = "testGroup"; + group.showId = pipe.getId(); + group.parentId = groupDao.getRootGroupDetail(pipe).getId(); + group.deptId = departmentDao.getDefaultDepartment().getId(); + groupManager.createGroup(group, null); + } + + @Test + @Transactional + @Rollback(true) + public void setGroupDepartment() { + ShowInterface pipe = showDao.findShowDetail("pipe"); + GroupDetail group = groupDao.getRootGroupDetail(pipe); + + // Launch a test job + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); + jobLauncher.launch(spec); + JobInterface job = jobManager.getJob(spec.getJobs().get(0).detail.id); + + // Set the group's department property to Lighting, it should + // currently be Unknown + DepartmentInterface dept = departmentDao.findDepartment("Lighting"); + jobDao.updateParent(job, group); + + // Update the group to the Lighting department + groupManager.setGroupDepartment(group, dept); + + // Now check if the job we launched was also updated to the lighting department + assertEquals(dept.getDepartmentId(), jobDao.getJobDetail(job.getJobId()).deptId); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/HostManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/HostManagerTests.java index f6673b106..305e66a35 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/HostManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/HostManagerTests.java @@ -60,127 +60,128 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class HostManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - AdminManager adminManager; + @Resource + AdminManager adminManager; - @Resource - HostManager hostManager; + @Resource + HostManager hostManager; - @Resource - HostDao hostDao; + @Resource + HostDao hostDao; - @Resource - FacilityDao facilityDao; + @Resource + FacilityDao facilityDao; - @Resource - FrameDao frameDao; + @Resource + FrameDao frameDao; - @Resource - ProcDao procDao; + @Resource + ProcDao procDao; - @Resource - AllocationDao allocationDao; + @Resource + AllocationDao allocationDao; - @Resource - JobManager jobManager; + @Resource + JobManager jobManager; - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - OwnerManager ownerManager; + @Resource + OwnerManager ownerManager; - private static final String HOST_NAME = "alpha1"; + private static final String HOST_NAME = "alpha1"; - public DispatchHost createHost() { + public DispatchHost createHost() { - RenderHost host = RenderHost.newBuilder().setName(HOST_NAME).setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(15290520).setFreeSwap(2076).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16).setTotalSwap(2076) - .setNimbyEnabled(true).setNumProcs(2).setCoresPerProc(400).setState(HardwareState.UP) - .setFacility("spi").addAllTags(ImmutableList.of("linux", "64bit")) - .setFreeGpuMem((int) CueUtil.MB512).setTotalGpuMem((int) CueUtil.MB512).build(); + RenderHost host = RenderHost.newBuilder().setName(HOST_NAME).setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(15290520).setFreeSwap(2076).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16).setTotalSwap(2076) + .setNimbyEnabled(true).setNumProcs(2).setCoresPerProc(400) + .setState(HardwareState.UP).setFacility("spi") + .addAllTags(ImmutableList.of("linux", "64bit")).setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); - hostDao.insertRenderHost(host, adminManager.findAllocationDetail("spi", "general"), false); + hostDao.insertRenderHost(host, adminManager.findAllocationDetail("spi", "general"), false); - return hostDao.findDispatchHost(HOST_NAME); - } + return hostDao.findDispatchHost(HOST_NAME); + } - @Before - public void setTestMode() { - jobLauncher.testMode = true; - } + @Before + public void setTestMode() { + jobLauncher.testMode = true; + } - /** - * Test that moves a host from one allocation to another. - */ - @Test - @Transactional - @Rollback(true) - public void setAllocation() { - HostInterface h = createHost(); - hostManager.setAllocation(h, allocationDao.findAllocationEntity("spi", "general")); - } + /** + * Test that moves a host from one allocation to another. + */ + @Test + @Transactional + @Rollback(true) + public void setAllocation() { + HostInterface h = createHost(); + hostManager.setAllocation(h, allocationDao.findAllocationEntity("spi", "general")); + } - /** - * This test ensures you can't transfer a host that has a proc assigned to a show without a - * subscription to the destination allocation. - */ - @Test(expected = EntityModificationError.class) - @Transactional - @Rollback(true) - public void setBadAllocation() { + /** + * This test ensures you can't transfer a host that has a proc assigned to a show without a + * subscription to the destination allocation. + */ + @Test(expected = EntityModificationError.class) + @Transactional + @Rollback(true) + public void setBadAllocation() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/facility.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); + jobLauncher.launch(new File("src/test/resources/conf/jobspec/facility.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1"); + DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - DispatchHost h = createHost(); + DispatchHost h = createHost(); - AllocationEntity ad = allocationDao.findAllocationEntity("spi", "desktop"); + AllocationEntity ad = allocationDao.findAllocationEntity("spi", "desktop"); - VirtualProc proc = VirtualProc.build(h, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); + VirtualProc proc = VirtualProc.build(h, frame); + proc.frameId = frame.id; + procDao.insertVirtualProc(proc); - AllocationEntity ad2 = allocationDao.findAllocationEntity("spi", "desktop"); - hostManager.setAllocation(h, ad2); - } + AllocationEntity ad2 = allocationDao.findAllocationEntity("spi", "desktop"); + hostManager.setAllocation(h, ad2); + } - @Test - @Transactional - @Rollback(true) - public void testGetPrefferedShow() { - DispatchHost h = createHost(); + @Test + @Transactional + @Rollback(true) + public void testGetPrefferedShow() { + DispatchHost h = createHost(); - ShowInterface pshow = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("spongebob", pshow); + ShowInterface pshow = adminManager.findShowEntity("pipe"); + OwnerEntity o = ownerManager.createOwner("spongebob", pshow); - ownerManager.takeOwnership(o, h); + ownerManager.takeOwnership(o, h); - ShowInterface show = hostManager.getPreferredShow(h); - assertEquals(pshow, show); - } + ShowInterface show = hostManager.getPreferredShow(h); + assertEquals(pshow, show); + } - @Test - @Transactional - @Rollback(true) - public void testisPrefferedShow() { - DispatchHost h = createHost(); + @Test + @Transactional + @Rollback(true) + public void testisPrefferedShow() { + DispatchHost h = createHost(); - assertFalse(hostManager.isPreferShow(h)); + assertFalse(hostManager.isPreferShow(h)); - ShowInterface pshow = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("spongebob", pshow); + ShowInterface pshow = adminManager.findShowEntity("pipe"); + OwnerEntity o = ownerManager.createOwner("spongebob", pshow); - ownerManager.takeOwnership(o, h); + ownerManager.takeOwnership(o, h); - ShowInterface show = hostManager.getPreferredShow(h); - assertEquals(pshow, show); + ShowInterface show = hostManager.getPreferredShow(h); + assertEquals(pshow, show); - assertTrue(hostManager.isPreferShow(h)); - } + assertTrue(hostManager.isPreferShow(h)); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/JobManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/JobManagerTests.java index e6a9601dd..eae351b02 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/JobManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/JobManagerTests.java @@ -73,428 +73,430 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class JobManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - JobManager jobManager; + @Resource + JobManager jobManager; - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - @Resource - JobManagerSupport jobManagerSupport; + @Resource + JobManagerSupport jobManagerSupport; - @Resource - HostManager hostManager; + @Resource + HostManager hostManager; - @Resource - AdminManager adminManager; + @Resource + AdminManager adminManager; - @Resource - LayerDao layerDao; + @Resource + LayerDao layerDao; - @Resource - DispatcherDao dispatcherDao; + @Resource + DispatcherDao dispatcherDao; - @Resource - FrameDao frameDao; + @Resource + FrameDao frameDao; - @Resource - JobDao jobDao; + @Resource + JobDao jobDao; - @Resource - FrameSearchFactory frameSearchFactory; + @Resource + FrameSearchFactory frameSearchFactory; - private static final String JOB1 = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; - private static final String JOB2 = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; - private static final String JOB3 = "pipe-dev.cue-testuser_shell_v1"; + private static final String JOB1 = "pipe-dev.cue-testuser_shell_dispatch_test_v1"; + private static final String JOB2 = "pipe-dev.cue-testuser_shell_dispatch_test_v2"; + private static final String JOB3 = "pipe-dev.cue-testuser_shell_v1"; - public JobDetail getJob1() { - return jobManager.findJobDetail(JOB1); - } + public JobDetail getJob1() { + return jobManager.findJobDetail(JOB1); + } + + public JobDetail getJob2() { + return jobManager.findJobDetail(JOB2); + } + + public JobDetail getJob3() { + return jobManager.findJobDetail(JOB3); + } + + public DispatchHost createHost() { + + RenderHost host = RenderHost.newBuilder().setName("test_host").setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16) + .setTotalSwap((int) CueUtil.GB16).setNimbyEnabled(false).setNumProcs(2) + .setCoresPerProc(100).setState(HardwareState.UP).setFacility("spi") + .addTags("general").build(); + + DispatchHost dh = hostManager.createHost(host); + hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); + + return dh; + } + + @BeforeTransaction + public void init() { + jobLauncher.testMode = true; + + for (String jobName : ImmutableList.of(JOB1, JOB2, JOB3)) { + try { + JobInterface job = jobDao.findJob(jobName); + jobDao.updateJobFinished(job); + jobDao.deleteJob(job); + } catch (EmptyResultDataAccessException e) { + // Job doesn't exist, ignore. + } + } + + JobSpec spec = jobLauncher + .parse(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + jobLauncher.launch(spec); + + spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); + jobLauncher.launch(spec); + + for (String jobName : ImmutableList.of(JOB1, JOB2, JOB3)) { + jobDao.updatePaused(jobDao.findJob(jobName), true); + } + } + + @AfterTransaction + public void destroy() { + for (String jobName : ImmutableList.of(JOB1, JOB2, JOB3)) { + JobInterface job = jobDao.findJob(jobName); + jobDao.updateJobFinished(job); + jobDao.deleteJob(job); + } + } + + @Test + @Transactional + @Rollback(true) + public void testLaunchAutoEatJob() { + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/autoeat.xml")); + jobLauncher.launch(spec); + + assertTrue(jobDao.getDispatchJob(spec.getJobs().get(0).detail.id).autoEat); + } + + @Test + @Transactional + @Rollback(true) + public void testLaunchJob() { + LayerDetail job1Layer = layerDao.findLayerDetail(jobDao.findJob(JOB1), "pass_1"); + assertEquals(CueUtil.GB2, job1Layer.minimumMemory); + assertEquals(100, job1Layer.minimumCores); + + // check some job_stats values + assertEquals(20, getJob1().totalFrames); + assertEquals(10, jobDao.getFrameStateTotals(jobDao.findJob(JOB2)).waiting); + assertEquals(0, jobDao.getFrameStateTotals(jobDao.findJob(JOB1)).depend); + + FrameStateTotals job3FrameStates = jobDao.getFrameStateTotals(jobDao.findJob(JOB3)); + assertEquals(1, job3FrameStates.waiting); + assertEquals(10, job3FrameStates.depend); + } + + @Test + @Transactional + @Rollback(true) + public void testShutdownRelaunchJob() { + JobDetail job1 = getJob1(); + JobDetail job2 = getJob2(); + logger.info("job detail: " + job2.getName()); + logger.info("job state " + job2.state.toString()); + + jobManager.shutdownJob(job1); + jobManager.shutdownJob(job2); + + assertEquals(JobState.FINISHED, jobDao.getJobDetail(job1.id).state); + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); + + getJob1(); + } + + @Test + @Transactional + @Rollback(true) + public void testShutdownJob() { + JobDetail job = getJob1(); + logger.info("job detail: " + job.getName()); + logger.info("job state " + job.state.toString()); + + jobManager.shutdownJob(getJob1()); + + assertEquals(JobState.FINISHED, jobDao.getJobDetail(job.id).state); + } + + @Test + @Transactional + @Rollback(true) + public void testAutoNameJob() { + JobSpec spec = + jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_autoname.xml")); + jobLauncher.launch(spec); - public JobDetail getJob2() { - return jobManager.findJobDetail(JOB2); - } + assertEquals(JobState.PENDING, jobDao.findJobDetail(spec.conformJobName("autoname")).state); + } - public JobDetail getJob3() { - return jobManager.findJobDetail(JOB3); - } + @Test + @Transactional + @Rollback(true) + public void testShowAlias() { + JobSpec spec = + jobLauncher.parse(new File("src/test/resources/conf/jobspec/show_alias.xml")); + jobLauncher.launch(spec); + } - public DispatchHost createHost() { + @Test + @Transactional + @Rollback(true) + public void testMisNamedJob() { + JobSpec spec = + jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_misnamed.xml")); + assertEquals("pipe-dev.cue-testuser_pipe_dev.cue_testuser_blah_blah_v1", + spec.getJobs().get(0).detail.name); + } - RenderHost host = RenderHost.newBuilder().setName("test_host").setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16).setTotalSwap((int) CueUtil.GB16) - .setNimbyEnabled(false).setNumProcs(2).setCoresPerProc(100).setState(HardwareState.UP) - .setFacility("spi").addTags("general").build(); + @Test + @Transactional + @Rollback(true) + public void testMisNamedJob2() { + JobSpec spec = + jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_misnamed.xml")); - DispatchHost dh = hostManager.createHost(host); - hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); + assertEquals(spec.conformJobName("blah_____blah_v1"), "pipe-dev.cue-testuser_blah_blah_v1"); + } - return dh; - } + @Test + @Transactional + @Rollback(true) + public void testNonExistentShow() { + JobSpec spec = jobLauncher + .parse(new File("src/test/resources/conf/jobspec/jobspec_nonexistent_show.xml")); + try { + jobLauncher.launch(spec); + fail("Expected exception"); + } catch (EntityCreationError e) { + assertEquals(e.getMessage(), + "The nonexistentshow does not exist. Please contact administrator of your " + + "OpenCue deployment to have this show created."); + } + } - @BeforeTransaction - public void init() { - jobLauncher.testMode = true; + @Test + @Transactional + @Rollback(true) + public void testPostFrameJobLaunch() { + JobSpec spec = jobLauncher + .parse(new File("src/test/resources/conf/jobspec/jobspec_postframes.xml")); + jobLauncher.launch(spec); + + String jobId = spec.getJobs().get(0).detail.id; + String postJobId = spec.getJobs().get(0).getPostJob().detail.id; + + assertEquals(JobState.PENDING, jobDao.getJobDetail(jobId).state); + assertTrue(jobManager.shutdownJob(jobManager.getJob(jobId))); + assertEquals(JobState.FINISHED, jobDao.getJobDetail(jobId).state); + assertEquals(JobState.PENDING, jobDao.getJobDetail(postJobId).state); + } - for (String jobName : ImmutableList.of(JOB1, JOB2, JOB3)) { - try { - JobInterface job = jobDao.findJob(jobName); - jobDao.updateJobFinished(job); - jobDao.deleteJob(job); - } catch (EmptyResultDataAccessException e) { - // Job doesn't exist, ignore. - } + @Test + @Transactional + @Rollback(true) + public void testReorderLayerFirst() { + + JobDetail job = getJob1(); + LayerInterface layer = layerDao.findLayer(job, "pass_2"); + + jobManager.reorderLayer(layer, new FrameSet("5-10"), Order.FIRST); + + assertEquals(-6, frameDao.findFrameDetail(job, "0005-pass_2").dispatchOrder); + assertEquals(-5, frameDao.findFrameDetail(job, "0006-pass_2").dispatchOrder); + assertEquals(-4, frameDao.findFrameDetail(job, "0007-pass_2").dispatchOrder); + assertEquals(-3, frameDao.findFrameDetail(job, "0008-pass_2").dispatchOrder); + assertEquals(-2, frameDao.findFrameDetail(job, "0009-pass_2").dispatchOrder); + assertEquals(-1, frameDao.findFrameDetail(job, "0010-pass_2").dispatchOrder); + assertEquals(3, frameDao.findFrameDetail(job, "0004-pass_2").dispatchOrder); + assertEquals(2, frameDao.findFrameDetail(job, "0003-pass_2").dispatchOrder); + assertEquals(1, frameDao.findFrameDetail(job, "0002-pass_2").dispatchOrder); + assertEquals(0, frameDao.findFrameDetail(job, "0001-pass_2").dispatchOrder); + + DispatchHost host = createHost(); + jobManager.setJobPaused(job, false); + + String[] order = new String[] {"0005-pass_2", "0006-pass_2", "0007-pass_2", "0008-pass_2", + "0009-pass_2", "0010-pass_2", "0001-pass_1", "0001-pass_2"}; + + for (String f : order) { + DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job, host); + frameDao.updateFrameState(frame, FrameState.SUCCEEDED); + assertEquals(f, frame.getName()); + } } - JobSpec spec = - jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - jobLauncher.launch(spec); + @Test + @Transactional + @Rollback(true) + public void testReorderLayerLast() { + + JobDetail job = getJob1(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); + + jobManager.reorderLayer(layer, new FrameSet("1-5"), Order.LAST); + + assertEquals(11, frameDao.findFrameDetail(job, "0001-pass_1").dispatchOrder); + assertEquals(12, frameDao.findFrameDetail(job, "0002-pass_1").dispatchOrder); + assertEquals(13, frameDao.findFrameDetail(job, "0003-pass_1").dispatchOrder); + assertEquals(14, frameDao.findFrameDetail(job, "0004-pass_1").dispatchOrder); + assertEquals(15, frameDao.findFrameDetail(job, "0005-pass_1").dispatchOrder); + + DispatchHost host = createHost(); + jobManager.setJobPaused(job, false); + + String[] order = new String[] {"0001-pass_2", "0002-pass_2", "0003-pass_2", "0004-pass_2", + "0005-pass_2", "0006-pass_1", "0006-pass_2", "0007-pass_1"}; + + for (String f : order) { + DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job, host); + frameDao.updateFrameState(frame, FrameState.SUCCEEDED); + assertEquals(f, frame.getName()); + } + } + + @Test + @Transactional + @Rollback(true) + public void testReorderLayerReverse() { + + JobDetail job = getJob1(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); - spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - jobLauncher.launch(spec); + jobManager.reorderLayer(layer, new FrameSet("1-5"), Order.REVERSE); - for (String jobName : ImmutableList.of(JOB1, JOB2, JOB3)) { - jobDao.updatePaused(jobDao.findJob(jobName), true); + assertEquals(0, frameDao.findFrameDetail(job, "0005-pass_1").dispatchOrder); + assertEquals(1, frameDao.findFrameDetail(job, "0004-pass_1").dispatchOrder); + assertEquals(2, frameDao.findFrameDetail(job, "0003-pass_1").dispatchOrder); + assertEquals(3, frameDao.findFrameDetail(job, "0002-pass_1").dispatchOrder); + assertEquals(4, frameDao.findFrameDetail(job, "0001-pass_1").dispatchOrder); } - } - - @AfterTransaction - public void destroy() { - for (String jobName : ImmutableList.of(JOB1, JOB2, JOB3)) { - JobInterface job = jobDao.findJob(jobName); - jobDao.updateJobFinished(job); - jobDao.deleteJob(job); + + @Test + @Transactional + @Rollback(true) + public void testStaggerLayer() { + + JobDetail job = getJob1(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); + FrameSet staggeredFrameSet = new FrameSet("1-10:2"); + jobManager.staggerLayer(layer, "1-10", 2); + + for (int i = 0; i < staggeredFrameSet.size(); i++) { + assertEquals(staggeredFrameSet.get(i), frameDao.findFrameDetail(job, + CueUtil.buildFrameName(layer, staggeredFrameSet.get(i))).number); + } + } - } - - @Test - @Transactional - @Rollback(true) - public void testLaunchAutoEatJob() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/autoeat.xml")); - jobLauncher.launch(spec); - - assertTrue(jobDao.getDispatchJob(spec.getJobs().get(0).detail.id).autoEat); - } - - @Test - @Transactional - @Rollback(true) - public void testLaunchJob() { - LayerDetail job1Layer = layerDao.findLayerDetail(jobDao.findJob(JOB1), "pass_1"); - assertEquals(CueUtil.GB2, job1Layer.minimumMemory); - assertEquals(100, job1Layer.minimumCores); - - // check some job_stats values - assertEquals(20, getJob1().totalFrames); - assertEquals(10, jobDao.getFrameStateTotals(jobDao.findJob(JOB2)).waiting); - assertEquals(0, jobDao.getFrameStateTotals(jobDao.findJob(JOB1)).depend); - - FrameStateTotals job3FrameStates = jobDao.getFrameStateTotals(jobDao.findJob(JOB3)); - assertEquals(1, job3FrameStates.waiting); - assertEquals(10, job3FrameStates.depend); - } - - @Test - @Transactional - @Rollback(true) - public void testShutdownRelaunchJob() { - JobDetail job1 = getJob1(); - JobDetail job2 = getJob2(); - logger.info("job detail: " + job2.getName()); - logger.info("job state " + job2.state.toString()); - - jobManager.shutdownJob(job1); - jobManager.shutdownJob(job2); - - assertEquals(JobState.FINISHED, jobDao.getJobDetail(job1.id).state); - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - - getJob1(); - } - - @Test - @Transactional - @Rollback(true) - public void testShutdownJob() { - JobDetail job = getJob1(); - logger.info("job detail: " + job.getName()); - logger.info("job state " + job.state.toString()); - - jobManager.shutdownJob(getJob1()); - - assertEquals(JobState.FINISHED, jobDao.getJobDetail(job.id).state); - } - - @Test - @Transactional - @Rollback(true) - public void testAutoNameJob() { - JobSpec spec = - jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_autoname.xml")); - jobLauncher.launch(spec); - - assertEquals(JobState.PENDING, jobDao.findJobDetail(spec.conformJobName("autoname")).state); - } - - @Test - @Transactional - @Rollback(true) - public void testShowAlias() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/show_alias.xml")); - jobLauncher.launch(spec); - } - - @Test - @Transactional - @Rollback(true) - public void testMisNamedJob() { - JobSpec spec = - jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_misnamed.xml")); - assertEquals("pipe-dev.cue-testuser_pipe_dev.cue_testuser_blah_blah_v1", - spec.getJobs().get(0).detail.name); - } - - @Test - @Transactional - @Rollback(true) - public void testMisNamedJob2() { - JobSpec spec = - jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_misnamed.xml")); - - assertEquals(spec.conformJobName("blah_____blah_v1"), "pipe-dev.cue-testuser_blah_blah_v1"); - } - - @Test - @Transactional - @Rollback(true) - public void testNonExistentShow() { - JobSpec spec = - jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_nonexistent_show.xml")); - try { - jobLauncher.launch(spec); - fail("Expected exception"); - } catch (EntityCreationError e) { - assertEquals(e.getMessage(), - "The nonexistentshow does not exist. Please contact administrator of your " - + "OpenCue deployment to have this show created."); + + @Test + @Transactional + @Rollback(true) + public void testGetLayers() { + JobDetail job = getJob1(); + jobManager.getLayerDetails(job); + jobManager.getLayers(job); } - } - - @Test - @Transactional - @Rollback(true) - public void testPostFrameJobLaunch() { - JobSpec spec = - jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec_postframes.xml")); - jobLauncher.launch(spec); - - String jobId = spec.getJobs().get(0).detail.id; - String postJobId = spec.getJobs().get(0).getPostJob().detail.id; - - assertEquals(JobState.PENDING, jobDao.getJobDetail(jobId).state); - assertTrue(jobManager.shutdownJob(jobManager.getJob(jobId))); - assertEquals(JobState.FINISHED, jobDao.getJobDetail(jobId).state); - assertEquals(JobState.PENDING, jobDao.getJobDetail(postJobId).state); - } - - @Test - @Transactional - @Rollback(true) - public void testReorderLayerFirst() { - - JobDetail job = getJob1(); - LayerInterface layer = layerDao.findLayer(job, "pass_2"); - - jobManager.reorderLayer(layer, new FrameSet("5-10"), Order.FIRST); - - assertEquals(-6, frameDao.findFrameDetail(job, "0005-pass_2").dispatchOrder); - assertEquals(-5, frameDao.findFrameDetail(job, "0006-pass_2").dispatchOrder); - assertEquals(-4, frameDao.findFrameDetail(job, "0007-pass_2").dispatchOrder); - assertEquals(-3, frameDao.findFrameDetail(job, "0008-pass_2").dispatchOrder); - assertEquals(-2, frameDao.findFrameDetail(job, "0009-pass_2").dispatchOrder); - assertEquals(-1, frameDao.findFrameDetail(job, "0010-pass_2").dispatchOrder); - assertEquals(3, frameDao.findFrameDetail(job, "0004-pass_2").dispatchOrder); - assertEquals(2, frameDao.findFrameDetail(job, "0003-pass_2").dispatchOrder); - assertEquals(1, frameDao.findFrameDetail(job, "0002-pass_2").dispatchOrder); - assertEquals(0, frameDao.findFrameDetail(job, "0001-pass_2").dispatchOrder); - - DispatchHost host = createHost(); - jobManager.setJobPaused(job, false); - - String[] order = new String[] {"0005-pass_2", "0006-pass_2", "0007-pass_2", "0008-pass_2", - "0009-pass_2", "0010-pass_2", "0001-pass_1", "0001-pass_2"}; - - for (String f : order) { - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job, host); - frameDao.updateFrameState(frame, FrameState.SUCCEEDED); - assertEquals(f, frame.getName()); + + @Test + @Transactional + @Rollback(true) + public void eatLayer() { + JobInterface job = getJob1(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); + FrameSearchInterface r = frameSearchFactory.create(layer); + FrameSearchCriteria criteria = r.getCriteria(); + r.setCriteria(criteria.toBuilder().setPage(1).setLimit(5).build()); + jobManagerSupport.eatFrames(r, new Source()); + + assertTrue(frameDao.findFrameDetails(frameSearchFactory.create(layer)).stream() + .allMatch(frame -> frame.state == FrameState.EATEN)); } - } - @Test - @Transactional - @Rollback(true) - public void testReorderLayerLast() { + @Test + @Transactional + @Rollback(true) + public void optimizeLayer() { + JobInterface job = getJob3(); + LayerDetail layer = layerDao.findLayerDetail(job, "pass_1"); - JobDetail job = getJob1(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); + // Hardcoded value of dispatcher.memory.mem_reserved_default + // to avoid having to read opencue.properties on a test setting + long memReservedDefault = 3355443; - jobManager.reorderLayer(layer, new FrameSet("1-5"), Order.LAST); + assertEquals(memReservedDefault, layer.minimumMemory); + assertThat(layer.tags, contains("general")); - assertEquals(11, frameDao.findFrameDetail(job, "0001-pass_1").dispatchOrder); - assertEquals(12, frameDao.findFrameDetail(job, "0002-pass_1").dispatchOrder); - assertEquals(13, frameDao.findFrameDetail(job, "0003-pass_1").dispatchOrder); - assertEquals(14, frameDao.findFrameDetail(job, "0004-pass_1").dispatchOrder); - assertEquals(15, frameDao.findFrameDetail(job, "0005-pass_1").dispatchOrder); + /* + * Make sure the layer is optimizable. + */ + frameDao.findFrames(frameSearchFactory.create(layer)).stream().limit(5) + .forEach(frame -> frameDao.updateFrameState(frame, FrameState.SUCCEEDED)); + layerDao.updateUsage(layer, new ResourceUsage(100, 3500 * 5, 0), 0); - DispatchHost host = createHost(); - jobManager.setJobPaused(job, false); + // Test to make sure our optimization + jobManager.optimizeLayer(layer, 100, CueUtil.MB512, 120); + + assertEquals(CueUtil.MB512 + CueUtil.MB256, + layerDao.findLayerDetail(job, "pass_1").minimumMemory); + } - String[] order = new String[] {"0001-pass_2", "0002-pass_2", "0003-pass_2", "0004-pass_2", - "0005-pass_2", "0006-pass_1", "0006-pass_2", "0007-pass_1"}; + @Test + @Transactional + @Rollback(true) + public void testIsLayerThreadable() { + JobInterface job = getJob3(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); - for (String f : order) { - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job, host); - frameDao.updateFrameState(frame, FrameState.SUCCEEDED); - assertEquals(f, frame.getName()); + assertFalse(jobManager.isLayerThreadable(layer)); } - } - - @Test - @Transactional - @Rollback(true) - public void testReorderLayerReverse() { - - JobDetail job = getJob1(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - - jobManager.reorderLayer(layer, new FrameSet("1-5"), Order.REVERSE); - - assertEquals(0, frameDao.findFrameDetail(job, "0005-pass_1").dispatchOrder); - assertEquals(1, frameDao.findFrameDetail(job, "0004-pass_1").dispatchOrder); - assertEquals(2, frameDao.findFrameDetail(job, "0003-pass_1").dispatchOrder); - assertEquals(3, frameDao.findFrameDetail(job, "0002-pass_1").dispatchOrder); - assertEquals(4, frameDao.findFrameDetail(job, "0001-pass_1").dispatchOrder); - } - - @Test - @Transactional - @Rollback(true) - public void testStaggerLayer() { - - JobDetail job = getJob1(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - FrameSet staggeredFrameSet = new FrameSet("1-10:2"); - jobManager.staggerLayer(layer, "1-10", 2); - - for (int i = 0; i < staggeredFrameSet.size(); i++) { - assertEquals(staggeredFrameSet.get(i), frameDao.findFrameDetail(job, - CueUtil.buildFrameName(layer, staggeredFrameSet.get(i))).number); + + @Test + @Transactional + @Rollback(true) + public void testGetLayer() { + JobInterface job = getJob3(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); + assertEquals(layer, jobManager.getLayer(layer.getId())); + } + + @Test + @Transactional + @Rollback(true) + public void testFindFrame() { + JobInterface job = getJob3(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); + + FrameInterface frame = jobManager.findFrame(layer, 1); + assertEquals("0001-pass_1", frame.getName()); } - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayers() { - JobDetail job = getJob1(); - jobManager.getLayerDetails(job); - jobManager.getLayers(job); - } - - @Test - @Transactional - @Rollback(true) - public void eatLayer() { - JobInterface job = getJob1(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - FrameSearchInterface r = frameSearchFactory.create(layer); - FrameSearchCriteria criteria = r.getCriteria(); - r.setCriteria(criteria.toBuilder().setPage(1).setLimit(5).build()); - jobManagerSupport.eatFrames(r, new Source()); - - assertTrue(frameDao.findFrameDetails(frameSearchFactory.create(layer)).stream() - .allMatch(frame -> frame.state == FrameState.EATEN)); - } - - @Test - @Transactional - @Rollback(true) - public void optimizeLayer() { - JobInterface job = getJob3(); - LayerDetail layer = layerDao.findLayerDetail(job, "pass_1"); - - // Hardcoded value of dispatcher.memory.mem_reserved_default - // to avoid having to read opencue.properties on a test setting - long memReservedDefault = 3355443; - - assertEquals(memReservedDefault, layer.minimumMemory); - assertThat(layer.tags, contains("general")); - - /* - * Make sure the layer is optimizable. - */ - frameDao.findFrames(frameSearchFactory.create(layer)).stream().limit(5) - .forEach(frame -> frameDao.updateFrameState(frame, FrameState.SUCCEEDED)); - layerDao.updateUsage(layer, new ResourceUsage(100, 3500 * 5, 0), 0); - - // Test to make sure our optimization - jobManager.optimizeLayer(layer, 100, CueUtil.MB512, 120); - - assertEquals(CueUtil.MB512 + CueUtil.MB256, - layerDao.findLayerDetail(job, "pass_1").minimumMemory); - } - - @Test - @Transactional - @Rollback(true) - public void testIsLayerThreadable() { - JobInterface job = getJob3(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - - assertFalse(jobManager.isLayerThreadable(layer)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayer() { - JobInterface job = getJob3(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - assertEquals(layer, jobManager.getLayer(layer.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFrame() { - JobInterface job = getJob3(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - - FrameInterface frame = jobManager.findFrame(layer, 1); - assertEquals("0001-pass_1", frame.getName()); - } - - @Test - @Transactional - @Rollback(true) - public void testAddLayerLimit() { - JobInterface job = getJob3(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - jobManager.addLayerLimit(layer, "0001-limit-1"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayerLimits() { - JobInterface job = getJob3(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - jobManager.getLayerLimits(layer); - } + @Test + @Transactional + @Rollback(true) + public void testAddLayerLimit() { + JobInterface job = getJob3(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); + jobManager.addLayerLimit(layer, "0001-limit-1"); + } + + @Test + @Transactional + @Rollback(true) + public void testGetLayerLimits() { + JobInterface job = getJob3(); + LayerInterface layer = layerDao.findLayer(job, "pass_1"); + jobManager.getLayerLimits(layer); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/JobSpecTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/JobSpecTests.java index 4e7863ae2..69057bb83 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/JobSpecTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/JobSpecTests.java @@ -40,83 +40,84 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class JobSpecTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - JobLauncher jobLauncher; + @Resource + JobLauncher jobLauncher; - private static String readJobSpec(String name) { - String path = "src/test/resources/conf/jobspec/" + name; - byte[] encoded = null; + private static String readJobSpec(String name) { + String path = "src/test/resources/conf/jobspec/" + name; + byte[] encoded = null; - try { - encoded = Files.readAllBytes(Paths.get(path)); - } catch (IOException e) { - fail("readJobSpec should succeed to read jobspec file"); + try { + encoded = Files.readAllBytes(Paths.get(path)); + } catch (IOException e) { + fail("readJobSpec should succeed to read jobspec file"); + } + + return new String(encoded, StandardCharsets.UTF_8); + } + + @Test + public void testParseSuccess() { + String xml = readJobSpec("jobspec_1_10.xml"); + JobSpec spec = jobLauncher.parse(xml); + assertEquals(spec.getDoc().getDocType().getPublicID(), "SPI Cue Specification Language"); + assertEquals(spec.getDoc().getDocType().getSystemID(), + "http://localhost:8080/spcue/dtd/cjsl-1.10.dtd"); + assertEquals(spec.getJobs().size(), 1); + assertEquals(spec.getJobs().get(0).detail.name, "testing-default-testuser_test"); + } + + @Test + public void testParseNonExistent() { + String xml = readJobSpec("jobspec_nonexistent_dtd.xml"); + try { + jobLauncher.parse(xml); + fail("Expected exception"); + } catch (SpecBuilderException e) { + assertTrue(e.getMessage() + .startsWith("Failed to parse job spec XML, java.net.MalformedURLException")); + } + } + + @Test + public void testParseInvalidShot() { + String xml = readJobSpec("jobspec_invalid_shot.xml"); + try { + jobLauncher.parse(xml); + fail("Expected exception"); + } catch (SpecBuilderException e) { + assertEquals(e.getMessage(), + "The shot name: invalid/shot is not in the proper format. " + + "Shot names must be alpha numeric, no dashes or punctuation."); + } } - return new String(encoded, StandardCharsets.UTF_8); - } - - @Test - public void testParseSuccess() { - String xml = readJobSpec("jobspec_1_10.xml"); - JobSpec spec = jobLauncher.parse(xml); - assertEquals(spec.getDoc().getDocType().getPublicID(), "SPI Cue Specification Language"); - assertEquals(spec.getDoc().getDocType().getSystemID(), - "http://localhost:8080/spcue/dtd/cjsl-1.10.dtd"); - assertEquals(spec.getJobs().size(), 1); - assertEquals(spec.getJobs().get(0).detail.name, "testing-default-testuser_test"); - } - - @Test - public void testParseNonExistent() { - String xml = readJobSpec("jobspec_nonexistent_dtd.xml"); - try { - jobLauncher.parse(xml); - fail("Expected exception"); - } catch (SpecBuilderException e) { - assertTrue(e.getMessage() - .startsWith("Failed to parse job spec XML, java.net.MalformedURLException")); + @Test + public void testParseGpuSuccess() { + String xml = readJobSpec("jobspec_1_12.xml"); + JobSpec spec = jobLauncher.parse(xml); + assertEquals(spec.getDoc().getDocType().getPublicID(), "SPI Cue Specification Language"); + assertEquals(spec.getDoc().getDocType().getSystemID(), + "http://localhost:8080/spcue/dtd/cjsl-1.12.dtd"); + assertEquals(spec.getJobs().size(), 1); + BuildableJob job = spec.getJobs().get(0); + assertEquals(job.detail.name, "testing-default-testuser_test"); + LayerDetail layer = job.getBuildableLayers().get(0).layerDetail; + assertEquals(layer.getMinimumGpus(), 1); + assertEquals(layer.getMinimumGpuMemory(), 1048576); } - } - - @Test - public void testParseInvalidShot() { - String xml = readJobSpec("jobspec_invalid_shot.xml"); - try { - jobLauncher.parse(xml); - fail("Expected exception"); - } catch (SpecBuilderException e) { - assertEquals(e.getMessage(), "The shot name: invalid/shot is not in the proper format. " - + "Shot names must be alpha numeric, no dashes or punctuation."); + + @Test + public void testParseMaxCoresAndMaxGpus() { + String xml = readJobSpec("jobspec_1_13.xml"); + JobSpec spec = jobLauncher.parse(xml); + assertEquals(spec.getDoc().getDocType().getPublicID(), "SPI Cue Specification Language"); + assertEquals(spec.getDoc().getDocType().getSystemID(), + "http://localhost:8080/spcue/dtd/cjsl-1.13.dtd"); + assertEquals(spec.getJobs().size(), 1); + BuildableJob job = spec.getJobs().get(0); + assertEquals(job.maxCoresOverride, Integer.valueOf(420)); + assertEquals(job.maxGpusOverride, Integer.valueOf(42)); } - } - - @Test - public void testParseGpuSuccess() { - String xml = readJobSpec("jobspec_1_12.xml"); - JobSpec spec = jobLauncher.parse(xml); - assertEquals(spec.getDoc().getDocType().getPublicID(), "SPI Cue Specification Language"); - assertEquals(spec.getDoc().getDocType().getSystemID(), - "http://localhost:8080/spcue/dtd/cjsl-1.12.dtd"); - assertEquals(spec.getJobs().size(), 1); - BuildableJob job = spec.getJobs().get(0); - assertEquals(job.detail.name, "testing-default-testuser_test"); - LayerDetail layer = job.getBuildableLayers().get(0).layerDetail; - assertEquals(layer.getMinimumGpus(), 1); - assertEquals(layer.getMinimumGpuMemory(), 1048576); - } - - @Test - public void testParseMaxCoresAndMaxGpus() { - String xml = readJobSpec("jobspec_1_13.xml"); - JobSpec spec = jobLauncher.parse(xml); - assertEquals(spec.getDoc().getDocType().getPublicID(), "SPI Cue Specification Language"); - assertEquals(spec.getDoc().getDocType().getSystemID(), - "http://localhost:8080/spcue/dtd/cjsl-1.13.dtd"); - assertEquals(spec.getJobs().size(), 1); - BuildableJob job = spec.getJobs().get(0); - assertEquals(job.maxCoresOverride, Integer.valueOf(420)); - assertEquals(job.maxGpusOverride, Integer.valueOf(42)); - } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/MaintenanceManagerSupportTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/MaintenanceManagerSupportTests.java index ff4188cfb..2f9456409 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/MaintenanceManagerSupportTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/MaintenanceManagerSupportTests.java @@ -30,16 +30,16 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class MaintenanceManagerSupportTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - MaintenanceManagerSupport maintenanceManager; - - @Test - public void testCheckHardwareState() { - maintenanceManager.checkHardwareState(); - } - - @Test - public void archiveFinishedJobs() { - maintenanceManager.archiveFinishedJobs(); - } + @Resource + MaintenanceManagerSupport maintenanceManager; + + @Test + public void testCheckHardwareState() { + maintenanceManager.checkHardwareState(); + } + + @Test + public void archiveFinishedJobs() { + maintenanceManager.archiveFinishedJobs(); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/OwnerManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/OwnerManagerTests.java index 8b712a6a6..e589cc4da 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/OwnerManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/OwnerManagerTests.java @@ -46,119 +46,120 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class OwnerManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - OwnerManager ownerManager; - - @Resource - AdminManager adminManager; - - @Resource - HostManager hostManager; - - @Resource - DeedDao deedDao; - - @Resource - Whiteboard whiteboard; - - public DispatchHost createHost() { - - RenderHost host = RenderHost.newBuilder().setName("test_host").setBootTime(1192369572) - // The minimum amount of free space in the temporary directory to book a host. - .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) - .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16).setTotalSwap((int) CueUtil.GB16) - .setNimbyEnabled(true).setNumProcs(2).setCoresPerProc(100).setState(HardwareState.UP) - .setFacility("spi").addTags("general").setFreeGpuMem((int) CueUtil.MB512) - .setTotalGpuMem((int) CueUtil.MB512).build(); - - DispatchHost dh = hostManager.createHost(host); - hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); - - return dh; - } - - @Test - @Transactional - @Rollback(true) - public void testCreateOwner() { - ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); - - OwnerEntity owner = ownerManager.findOwner("spongebob"); - assertEquals(owner.name, "spongebob"); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteOwner() { - ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); - - assertTrue(ownerManager.deleteOwner(ownerManager.findOwner("spongebob"))); - } - - @Test - @Transactional - @Rollback(true) - public void testGetOwner() { - OwnerEntity o1 = ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); - - OwnerEntity o2 = ownerManager.getOwner(o1.id); - assertEquals(o1, o2); - } - - @Test - @Transactional - @Rollback(true) - public void testFindOwner() { - OwnerEntity o1 = ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); - - OwnerEntity o2 = ownerManager.findOwner(o1.name); - assertEquals(o1, o2); - } - - @Test - @Transactional - @Rollback(true) - public void testSetShow() { - OwnerEntity o = ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); - - ShowEntity newShow = adminManager.findShowEntity("edu"); - ownerManager.setShow(o, newShow); - - assertEquals(newShow.name, whiteboard.getOwner(o.name).getShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testTakeOwnership() { - OwnerEntity o = ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); - - DispatchHost host = createHost(); - ownerManager.takeOwnership(o, host); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDeed() { - OwnerEntity o = ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); - - DispatchHost host = createHost(); - DeedEntity d = ownerManager.takeOwnership(o, host); - - assertEquals(d, ownerManager.getDeed(d.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testRemoveDeed() { - OwnerEntity o = ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); - - DispatchHost host = createHost(); - DeedEntity d = ownerManager.takeOwnership(o, host); - - ownerManager.removeDeed(d); - } + @Resource + OwnerManager ownerManager; + + @Resource + AdminManager adminManager; + + @Resource + HostManager hostManager; + + @Resource + DeedDao deedDao; + + @Resource + Whiteboard whiteboard; + + public DispatchHost createHost() { + + RenderHost host = RenderHost.newBuilder().setName("test_host").setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB).setFreeMem(53500).setFreeSwap(20760).setLoad(1) + .setTotalMcp(CueUtil.GB4).setTotalMem((int) CueUtil.GB16) + .setTotalSwap((int) CueUtil.GB16).setNimbyEnabled(true).setNumProcs(2) + .setCoresPerProc(100).setState(HardwareState.UP).setFacility("spi") + .addTags("general").setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512).build(); + + DispatchHost dh = hostManager.createHost(host); + hostManager.setAllocation(dh, adminManager.findAllocationDetail("spi", "general")); + + return dh; + } + + @Test + @Transactional + @Rollback(true) + public void testCreateOwner() { + ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); + + OwnerEntity owner = ownerManager.findOwner("spongebob"); + assertEquals(owner.name, "spongebob"); + } + + @Test + @Transactional + @Rollback(true) + public void testDeleteOwner() { + ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); + + assertTrue(ownerManager.deleteOwner(ownerManager.findOwner("spongebob"))); + } + + @Test + @Transactional + @Rollback(true) + public void testGetOwner() { + OwnerEntity o1 = ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); + + OwnerEntity o2 = ownerManager.getOwner(o1.id); + assertEquals(o1, o2); + } + + @Test + @Transactional + @Rollback(true) + public void testFindOwner() { + OwnerEntity o1 = ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); + + OwnerEntity o2 = ownerManager.findOwner(o1.name); + assertEquals(o1, o2); + } + + @Test + @Transactional + @Rollback(true) + public void testSetShow() { + OwnerEntity o = ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); + + ShowEntity newShow = adminManager.findShowEntity("edu"); + ownerManager.setShow(o, newShow); + + assertEquals(newShow.name, whiteboard.getOwner(o.name).getShow()); + } + + @Test + @Transactional + @Rollback(true) + public void testTakeOwnership() { + OwnerEntity o = ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); + + DispatchHost host = createHost(); + ownerManager.takeOwnership(o, host); + } + + @Test + @Transactional + @Rollback(true) + public void testGetDeed() { + OwnerEntity o = ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); + + DispatchHost host = createHost(); + DeedEntity d = ownerManager.takeOwnership(o, host); + + assertEquals(d, ownerManager.getDeed(d.id)); + } + + @Test + @Transactional + @Rollback(true) + public void testRemoveDeed() { + OwnerEntity o = ownerManager.createOwner("spongebob", adminManager.findShowEntity("pipe")); + + DispatchHost host = createHost(); + DeedEntity d = ownerManager.takeOwnership(o, host); + + ownerManager.removeDeed(d); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/ServiceManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/ServiceManagerTests.java index 0217dd8e8..d9b67e7aa 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/ServiceManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/ServiceManagerTests.java @@ -47,132 +47,132 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class ServiceManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - ServiceManager serviceManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - LayerDao layerDao; - - @Before - public void setTestMode() { - jobLauncher.testMode = true; - } - - @Test - @Transactional - @Rollback(true) - public void testGetDefaultService() { - ServiceEntity srv1 = serviceManager.getService("default"); - ServiceEntity srv2 = serviceManager.getDefaultService(); - - assertEquals(srv1, srv2); - } - - @Test - @Transactional - @Rollback(true) - public void testCreateService() { - ServiceEntity s = new ServiceEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.minMemory = CueUtil.GB4; - s.minGpuMemory = CueUtil.GB2; - s.threadable = false; - s.timeout = 0; - s.timeout_llu = 0; - s.tags.addAll(Sets.newHashSet("general")); - serviceManager.createService(s); - - ServiceEntity newService = serviceManager.getService(s.id); - assertEquals(s, newService); - } - - @Test - @Transactional - @Rollback(true) - public void testOverrideExistingService() { - ServiceOverrideEntity s = new ServiceOverrideEntity(); - s.name = "arnold"; - s.minCores = 400; - s.timeout = 10; - s.timeout_llu = 10; - s.minMemory = CueUtil.GB8; - s.minGpuMemory = CueUtil.GB2; - s.threadable = false; - s.tags.addAll(Sets.newHashSet("general")); - s.showId = "00000000-0000-0000-0000-000000000000"; - serviceManager.createService(s); - - // Check it was overridden - ServiceEntity newService = serviceManager.getService("arnold", s.showId); - assertEquals(s, newService); - assertEquals(400, newService.minCores); - assertEquals(10, newService.timeout); - assertEquals(10, newService.timeout_llu); - assertEquals(CueUtil.GB8, newService.minMemory); - assertEquals(CueUtil.GB2, newService.minGpuMemory); - assertFalse(newService.threadable); - assertTrue(s.tags.contains("general")); - - serviceManager.deleteService(s); - - // now check the original is back. - newService = serviceManager.getService("arnold", s.showId); - assertEquals(100, newService.minCores); - assertEquals(0, newService.minGpuMemory); - } - - @Test - @Transactional - @Rollback(true) - public void testJobLaunch() { - - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/services.xml")); - jobLauncher.launch(spec); - - ServiceEntity shell = serviceManager.getService("shell"); - ServiceEntity prman = serviceManager.getService("prman"); - ServiceEntity cuda = serviceManager.getService("cuda"); - LayerDetail shellLayer = - layerDao.getLayerDetail(spec.getJobs().get(0).getBuildableLayers().get(0).layerDetail.id); - LayerDetail prmanLayer = - layerDao.getLayerDetail(spec.getJobs().get(0).getBuildableLayers().get(1).layerDetail.id); - LayerDetail cudaLayer = - layerDao.getLayerDetail(spec.getJobs().get(0).getBuildableLayers().get(3).layerDetail.id); - - assertEquals(shell.minCores, shellLayer.minimumCores); - assertEquals(shell.minMemory, shellLayer.minimumMemory); - assertEquals(shell.minGpuMemory, shellLayer.minimumGpuMemory); - assertFalse(shellLayer.isThreadable); - assertEquals(shell.tags, shellLayer.tags); - assertThat(shellLayer.services, contains("shell", "katana", "unknown")); - - assertEquals(prman.minCores, prmanLayer.minimumCores); - assertEquals(prman.minMemory, prmanLayer.minimumMemory); - assertFalse(prmanLayer.isThreadable); - assertEquals(prman.tags, prmanLayer.tags); - assertThat(prmanLayer.services, contains("prman", "katana")); - - assertEquals(cuda.minCores, cudaLayer.minimumCores); - assertEquals(cuda.minMemory, cudaLayer.minimumMemory); - assertEquals(cuda.minGpuMemory, cudaLayer.minimumGpuMemory); - assertFalse(cudaLayer.isThreadable); - assertEquals(cuda.tags, cudaLayer.tags); - assertThat(cudaLayer.services, contains("cuda")); - } - - @Test - @Transactional - @Rollback(true) - public void testManualOverrideThreading() { - - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/services.xml")); - jobLauncher.launch(spec); - - assertFalse( - layerDao.findLayerDetail(spec.getJobs().get(0).detail, "arnold_layer").isThreadable); - } + @Resource + ServiceManager serviceManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + LayerDao layerDao; + + @Before + public void setTestMode() { + jobLauncher.testMode = true; + } + + @Test + @Transactional + @Rollback(true) + public void testGetDefaultService() { + ServiceEntity srv1 = serviceManager.getService("default"); + ServiceEntity srv2 = serviceManager.getDefaultService(); + + assertEquals(srv1, srv2); + } + + @Test + @Transactional + @Rollback(true) + public void testCreateService() { + ServiceEntity s = new ServiceEntity(); + s.name = "dillweed"; + s.minCores = 100; + s.minMemory = CueUtil.GB4; + s.minGpuMemory = CueUtil.GB2; + s.threadable = false; + s.timeout = 0; + s.timeout_llu = 0; + s.tags.addAll(Sets.newHashSet("general")); + serviceManager.createService(s); + + ServiceEntity newService = serviceManager.getService(s.id); + assertEquals(s, newService); + } + + @Test + @Transactional + @Rollback(true) + public void testOverrideExistingService() { + ServiceOverrideEntity s = new ServiceOverrideEntity(); + s.name = "arnold"; + s.minCores = 400; + s.timeout = 10; + s.timeout_llu = 10; + s.minMemory = CueUtil.GB8; + s.minGpuMemory = CueUtil.GB2; + s.threadable = false; + s.tags.addAll(Sets.newHashSet("general")); + s.showId = "00000000-0000-0000-0000-000000000000"; + serviceManager.createService(s); + + // Check it was overridden + ServiceEntity newService = serviceManager.getService("arnold", s.showId); + assertEquals(s, newService); + assertEquals(400, newService.minCores); + assertEquals(10, newService.timeout); + assertEquals(10, newService.timeout_llu); + assertEquals(CueUtil.GB8, newService.minMemory); + assertEquals(CueUtil.GB2, newService.minGpuMemory); + assertFalse(newService.threadable); + assertTrue(s.tags.contains("general")); + + serviceManager.deleteService(s); + + // now check the original is back. + newService = serviceManager.getService("arnold", s.showId); + assertEquals(100, newService.minCores); + assertEquals(0, newService.minGpuMemory); + } + + @Test + @Transactional + @Rollback(true) + public void testJobLaunch() { + + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/services.xml")); + jobLauncher.launch(spec); + + ServiceEntity shell = serviceManager.getService("shell"); + ServiceEntity prman = serviceManager.getService("prman"); + ServiceEntity cuda = serviceManager.getService("cuda"); + LayerDetail shellLayer = layerDao + .getLayerDetail(spec.getJobs().get(0).getBuildableLayers().get(0).layerDetail.id); + LayerDetail prmanLayer = layerDao + .getLayerDetail(spec.getJobs().get(0).getBuildableLayers().get(1).layerDetail.id); + LayerDetail cudaLayer = layerDao + .getLayerDetail(spec.getJobs().get(0).getBuildableLayers().get(3).layerDetail.id); + + assertEquals(shell.minCores, shellLayer.minimumCores); + assertEquals(shell.minMemory, shellLayer.minimumMemory); + assertEquals(shell.minGpuMemory, shellLayer.minimumGpuMemory); + assertFalse(shellLayer.isThreadable); + assertEquals(shell.tags, shellLayer.tags); + assertThat(shellLayer.services, contains("shell", "katana", "unknown")); + + assertEquals(prman.minCores, prmanLayer.minimumCores); + assertEquals(prman.minMemory, prmanLayer.minimumMemory); + assertFalse(prmanLayer.isThreadable); + assertEquals(prman.tags, prmanLayer.tags); + assertThat(prmanLayer.services, contains("prman", "katana")); + + assertEquals(cuda.minCores, cudaLayer.minimumCores); + assertEquals(cuda.minMemory, cudaLayer.minimumMemory); + assertEquals(cuda.minGpuMemory, cudaLayer.minimumGpuMemory); + assertFalse(cudaLayer.isThreadable); + assertEquals(cuda.tags, cudaLayer.tags); + assertThat(cudaLayer.services, contains("cuda")); + } + + @Test + @Transactional + @Rollback(true) + public void testManualOverrideThreading() { + + JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/services.xml")); + jobLauncher.launch(spec); + + assertFalse(layerDao.findLayerDetail(spec.getJobs().get(0).detail, + "arnold_layer").isThreadable); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/WhiteboardTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/WhiteboardTests.java index 2c3a9e694..ef181bf6d 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/WhiteboardTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/WhiteboardTests.java @@ -37,25 +37,25 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class WhiteboardTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - Whiteboard whiteboard; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - public JobDetail launchJob() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - @Test - @Transactional - @Rollback(true) - public void testIsJobPending() { - JobDetail job = launchJob(); - assertTrue(whiteboard.isJobPending(job.getName())); - } + @Resource + Whiteboard whiteboard; + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + public JobDetail launchJob() { + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); + return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); + } + + @Test + @Transactional + @Rollback(true) + public void testIsJobPending() { + JobDetail job = launchJob(); + assertTrue(whiteboard.isJobPending(job.getName())); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/util/CoreSaturationTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/util/CoreSaturationTests.java index 5db5fa7f7..59ddfe104 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/util/CoreSaturationTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/util/CoreSaturationTests.java @@ -25,28 +25,28 @@ public class CoreSaturationTests extends TestCase { - DispatchHost host; - - @Before - public void setUp() throws Exception { - host = new DispatchHost(); - host.isNimby = false; - } - - public void testCoreAndMemorySaturation1() { - host.memory = CueUtil.GB32; - host.idleMemory = CueUtil.GB8; - host.cores = 800; - host.idleCores = 700; - - DispatchFrame frame = new DispatchFrame(); - frame.services = "NOTarnold"; - frame.minCores = 100; - frame.setMinMemory(CueUtil.GB * 7); - frame.threadable = true; - - VirtualProc proc = VirtualProc.build(host, frame); - assertEquals(700, proc.coresReserved); - assertEquals(CueUtil.GB * 7, proc.memoryReserved); - } + DispatchHost host; + + @Before + public void setUp() throws Exception { + host = new DispatchHost(); + host.isNimby = false; + } + + public void testCoreAndMemorySaturation1() { + host.memory = CueUtil.GB32; + host.idleMemory = CueUtil.GB8; + host.cores = 800; + host.idleCores = 700; + + DispatchFrame frame = new DispatchFrame(); + frame.services = "NOTarnold"; + frame.minCores = 100; + frame.setMinMemory(CueUtil.GB * 7); + frame.threadable = true; + + VirtualProc proc = VirtualProc.build(host, frame); + assertEquals(700, proc.coresReserved); + assertEquals(CueUtil.GB * 7, proc.memoryReserved); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/util/CoreSpanTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/util/CoreSpanTests.java index ab634fda2..24e6dafc5 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/util/CoreSpanTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/util/CoreSpanTests.java @@ -26,124 +26,124 @@ public class CoreSpanTests extends TestCase { - DispatchHost host; - - @Before - public void setUp() throws Exception { - host = new DispatchHost(); - host.isNimby = false; - } - - /** - * The coreSpan calculation finds out how many cores a frames's requested memory covers and gives - * more cores when the requested memory spans more than 1 core. - */ - public void testCoreSpan() { - - /* 8 gigs and 7 cores idle, request 7g */ - host.memory = CueUtil.GB32; - host.idleMemory = CueUtil.GB8; - host.cores = 800; - host.idleCores = 700; - - DispatchFrame frame = new DispatchFrame(); - frame.minCores = 100; - frame.setMinMemory(CueUtil.GB * 7); - frame.threadable = true; - - VirtualProc proc = VirtualProc.build(host, frame); - assertEquals(700, proc.coresReserved); - } - - public void testCoreSpanTest1() { - - /* 4 gigs and 1 cores idle, request 1g */ - host.memory = CueUtil.GB32; - host.idleMemory = CueUtil.GB4; - host.cores = 800; - host.idleCores = 100; - - DispatchFrame frame = new DispatchFrame(); - frame.minCores = 100; - frame.setMinMemory(CueUtil.GB); - - VirtualProc proc = VirtualProc.build(host, frame); - assertEquals(100, proc.coresReserved); - } - - public void testCoreSpanTest2() { - host.memory = CueUtil.GB32; - host.idleMemory = CueUtil.GB4; - host.cores = 800; - host.idleCores = 200; - - DispatchFrame frame = new DispatchFrame(); - frame.minCores = 100; - frame.setMinMemory(CueUtil.GB4); - frame.threadable = true; - - VirtualProc proc = VirtualProc.build(host, frame); - assertEquals(200, proc.coresReserved); - } - - public void testCoreSpanTest3() { - host.memory = CueUtil.GB8; - host.idleMemory = CueUtil.GB8; - host.cores = 800; - host.idleCores = 780; - // Hardcoded value of dispatcher.memory.mem_reserved_default - // to avoid having to read opencue.properties on a test setting - long memReservedDefault = 3355443; - - DispatchFrame frame = new DispatchFrame(); - frame.minCores = 100; - frame.setMinMemory(memReservedDefault); - frame.threadable = true; - - VirtualProc proc = VirtualProc.build(host, frame); - assertEquals(300, proc.coresReserved); - } - - public void testCoreSpanTest4() { - host.memory = CueUtil.GB32; - host.idleMemory = CueUtil.GB16; - host.cores = 800; - host.idleCores = 200; - - DispatchFrame frame = new DispatchFrame(); - frame.minCores = 100; - frame.setMinMemory(CueUtil.GB * 8); - frame.threadable = true; - - VirtualProc proc = VirtualProc.build(host, frame); - assertEquals(200, proc.coresReserved); - - } - - public void testBuildVirtualProc() { - VirtualProc proc; - - DispatchHost host = new DispatchHost(); - host.threadMode = ThreadMode.ALL_VALUE; - /* 8 gigs and 7 cores idle, request 7g */ - host.memory = CueUtil.GB8; - host.idleMemory = CueUtil.GB8; - host.cores = 800; - host.idleCores = 800; - // Hardcoded value of dispatcher.memory.mem_reserved_default - // to avoid having to read opencue.properties on a test setting - long memReservedDefault = 3355443; - - DispatchFrame frame = new DispatchFrame(); - frame.minCores = 100; - frame.setMinMemory(memReservedDefault); - frame.threadable = true; - - proc = VirtualProc.build(host, frame); - assertEquals(800, proc.coresReserved); - - host.threadMode = ThreadMode.AUTO_VALUE; - proc = VirtualProc.build(host, frame); - assertEquals(300, proc.coresReserved); - } + DispatchHost host; + + @Before + public void setUp() throws Exception { + host = new DispatchHost(); + host.isNimby = false; + } + + /** + * The coreSpan calculation finds out how many cores a frames's requested memory covers and + * gives more cores when the requested memory spans more than 1 core. + */ + public void testCoreSpan() { + + /* 8 gigs and 7 cores idle, request 7g */ + host.memory = CueUtil.GB32; + host.idleMemory = CueUtil.GB8; + host.cores = 800; + host.idleCores = 700; + + DispatchFrame frame = new DispatchFrame(); + frame.minCores = 100; + frame.setMinMemory(CueUtil.GB * 7); + frame.threadable = true; + + VirtualProc proc = VirtualProc.build(host, frame); + assertEquals(700, proc.coresReserved); + } + + public void testCoreSpanTest1() { + + /* 4 gigs and 1 cores idle, request 1g */ + host.memory = CueUtil.GB32; + host.idleMemory = CueUtil.GB4; + host.cores = 800; + host.idleCores = 100; + + DispatchFrame frame = new DispatchFrame(); + frame.minCores = 100; + frame.setMinMemory(CueUtil.GB); + + VirtualProc proc = VirtualProc.build(host, frame); + assertEquals(100, proc.coresReserved); + } + + public void testCoreSpanTest2() { + host.memory = CueUtil.GB32; + host.idleMemory = CueUtil.GB4; + host.cores = 800; + host.idleCores = 200; + + DispatchFrame frame = new DispatchFrame(); + frame.minCores = 100; + frame.setMinMemory(CueUtil.GB4); + frame.threadable = true; + + VirtualProc proc = VirtualProc.build(host, frame); + assertEquals(200, proc.coresReserved); + } + + public void testCoreSpanTest3() { + host.memory = CueUtil.GB8; + host.idleMemory = CueUtil.GB8; + host.cores = 800; + host.idleCores = 780; + // Hardcoded value of dispatcher.memory.mem_reserved_default + // to avoid having to read opencue.properties on a test setting + long memReservedDefault = 3355443; + + DispatchFrame frame = new DispatchFrame(); + frame.minCores = 100; + frame.setMinMemory(memReservedDefault); + frame.threadable = true; + + VirtualProc proc = VirtualProc.build(host, frame); + assertEquals(300, proc.coresReserved); + } + + public void testCoreSpanTest4() { + host.memory = CueUtil.GB32; + host.idleMemory = CueUtil.GB16; + host.cores = 800; + host.idleCores = 200; + + DispatchFrame frame = new DispatchFrame(); + frame.minCores = 100; + frame.setMinMemory(CueUtil.GB * 8); + frame.threadable = true; + + VirtualProc proc = VirtualProc.build(host, frame); + assertEquals(200, proc.coresReserved); + + } + + public void testBuildVirtualProc() { + VirtualProc proc; + + DispatchHost host = new DispatchHost(); + host.threadMode = ThreadMode.ALL_VALUE; + /* 8 gigs and 7 cores idle, request 7g */ + host.memory = CueUtil.GB8; + host.idleMemory = CueUtil.GB8; + host.cores = 800; + host.idleCores = 800; + // Hardcoded value of dispatcher.memory.mem_reserved_default + // to avoid having to read opencue.properties on a test setting + long memReservedDefault = 3355443; + + DispatchFrame frame = new DispatchFrame(); + frame.minCores = 100; + frame.setMinMemory(memReservedDefault); + frame.threadable = true; + + proc = VirtualProc.build(host, frame); + assertEquals(800, proc.coresReserved); + + host.threadMode = ThreadMode.AUTO_VALUE; + proc = VirtualProc.build(host, frame); + assertEquals(300, proc.coresReserved); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/util/CueUtilTester.java b/cuebot/src/test/java/com/imageworks/spcue/test/util/CueUtilTester.java index 348e514e1..2743071bc 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/util/CueUtilTester.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/util/CueUtilTester.java @@ -25,129 +25,130 @@ public class CueUtilTester extends TestCase { - @Test - public void testFindChunk() { + @Test + public void testFindChunk() { - List dependOnFrameSet = CueUtil.normalizeFrameRange("101-160x10", 1); - List dependErFrameSet = CueUtil.normalizeFrameRange("101-160", 1); + List dependOnFrameSet = CueUtil.normalizeFrameRange("101-160x10", 1); + List dependErFrameSet = CueUtil.normalizeFrameRange("101-160", 1); - Integer[] results = new Integer[] {101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 111, 111, - 111, 111, 111, 111, 111, 111, 111, 111, 121, 121, 121, 121, 121, 121, 121, 121, 121, 121, - 131, 131, 131, 131, 131, 131, 131, 131, 131, 131, 141, 141, 141, 141, 141, 141, 141, 141, - 141, 141, 151, 151, 151, 151, 151, 151, 151, 151, 151, 151}; + Integer[] results = new Integer[] {101, 101, 101, 101, 101, 101, 101, 101, 101, 101, 111, + 111, 111, 111, 111, 111, 111, 111, 111, 111, 121, 121, 121, 121, 121, 121, 121, 121, + 121, 121, 131, 131, 131, 131, 131, 131, 131, 131, 131, 131, 141, 141, 141, 141, 141, + 141, 141, 141, 141, 141, 151, 151, 151, 151, 151, 151, 151, 151, 151, 151}; - for (int dependErFrameSetIdx = 0; dependErFrameSetIdx < dependErFrameSet - .size(); dependErFrameSetIdx = dependErFrameSetIdx + 1) { + for (int dependErFrameSetIdx = 0; dependErFrameSetIdx < dependErFrameSet + .size(); dependErFrameSetIdx = dependErFrameSetIdx + 1) { - int result = CueUtil.findChunk(dependOnFrameSet, dependErFrameSet.get(dependErFrameSetIdx)); - assertEquals((int) results[dependErFrameSetIdx], result); + int result = + CueUtil.findChunk(dependOnFrameSet, dependErFrameSet.get(dependErFrameSetIdx)); + assertEquals((int) results[dependErFrameSetIdx], result); + } } - } - @Test - public void testFindChunkStaggered() { + @Test + public void testFindChunkStaggered() { - List dependOnFrameSet = CueUtil.normalizeFrameRange("101-110:2", 1); - List dependErFrameSet = CueUtil.normalizeFrameRange("101-110", 1); + List dependOnFrameSet = CueUtil.normalizeFrameRange("101-110:2", 1); + List dependErFrameSet = CueUtil.normalizeFrameRange("101-110", 1); - Integer[] results = new Integer[] {101, 102, 103, 104, 105, 106, 107, 108, 109, 110}; - for (int i = 0; i < dependErFrameSet.size(); i = i + 1) { - int result = CueUtil.findChunk(dependOnFrameSet, dependErFrameSet.get(i)); - assertEquals((int) results[i], result); + Integer[] results = new Integer[] {101, 102, 103, 104, 105, 106, 107, 108, 109, 110}; + for (int i = 0; i < dependErFrameSet.size(); i = i + 1) { + int result = CueUtil.findChunk(dependOnFrameSet, dependErFrameSet.get(i)); + assertEquals((int) results[i], result); + } } - } - /** - * Removes all duplicates from the frame range, applies the chunk size, and maintains dispatch - * order. - */ - public void testNormalizeFrameRange() { - - /* - * An array of frame numbers which is the known result - */ - int[] knownResult; - - /* - * An array of frames returned from normalizeFrameRange + /** + * Removes all duplicates from the frame range, applies the chunk size, and maintains dispatch + * order. */ - List frames; + public void testNormalizeFrameRange() { + + /* + * An array of frame numbers which is the known result + */ + int[] knownResult; + + /* + * An array of frames returned from normalizeFrameRange + */ + List frames; + + /* + * Normal every day frame range. + */ + knownResult = new int[] {1, 2, 3, 4, 5}; + frames = CueUtil.normalizeFrameRange("1-5", 1); + for (int i = 0; i < frames.size(); i++) { + assertEquals(knownResult[i], (int) frames.get(i)); + } + + /* + * Frame range with chunking + */ + knownResult = new int[] {1, 5, 9}; + frames = CueUtil.normalizeFrameRange("1-10", 4); + for (int i = 0; i < frames.size(); i++) { + assertEquals(knownResult[i], (int) frames.get(i)); + } + + /* + * Frame range with duplicates... + */ + knownResult = new int[] {1, 3, 5, 7, 9, 2, 4, 6, 8, 10}; + frames = CueUtil.normalizeFrameRange("1-10x2,1-10", 1); + for (int i = 0; i < frames.size(); i++) { + assertEquals(knownResult[i], (int) frames.get(i)); + } + + /* + * Frame range with duplicates..with chunking! + */ + knownResult = new int[] {1, 5, 9, 4, 8}; + frames = CueUtil.normalizeFrameRange("1-10x2,1-10", 2); + for (int i = 0; i < frames.size(); i++) { + assertEquals(knownResult[i], (int) frames.get(i)); + } + + /* + * Frame range with no duplicates..with chunking! + */ + knownResult = new int[] {1, 5, 9, 4, 8}; + frames = CueUtil.normalizeFrameRange("1-10:2", 2); + for (int i = 0; i < frames.size(); i++) { + assertEquals(knownResult[i], (int) frames.get(i)); + } + } - /* - * Normal every day frame range. - */ - knownResult = new int[] {1, 2, 3, 4, 5}; - frames = CueUtil.normalizeFrameRange("1-5", 1); - for (int i = 0; i < frames.size(); i++) { - assertEquals(knownResult[i], (int) frames.get(i)); + @Test + public void testProcsToCores() { + assertEquals(200, Convert.coresToCoreUnits(2.0f)); + assertEquals(235, Convert.coresToCoreUnits(2.35f)); + assertEquals(299, Convert.coresToCoreUnits(2.999f)); } - /* - * Frame range with chunking - */ - knownResult = new int[] {1, 5, 9}; - frames = CueUtil.normalizeFrameRange("1-10", 4); - for (int i = 0; i < frames.size(); i++) { - assertEquals(knownResult[i], (int) frames.get(i)); + @Test + public void testCoreUnitsToCores() { + assertEquals(1.0f, Convert.coreUnitsToCores(100), 0.0001f); } - /* - * Frame range with duplicates... - */ - knownResult = new int[] {1, 3, 5, 7, 9, 2, 4, 6, 8, 10}; - frames = CueUtil.normalizeFrameRange("1-10x2,1-10", 1); - for (int i = 0; i < frames.size(); i++) { - assertEquals(knownResult[i], (int) frames.get(i)); + @Test + public void testCoreUnitsToCoresWithScale() { + assertEquals(100, Convert.coresToWholeCoreUnits(1.132132f)); + assertEquals(19900, Convert.coresToWholeCoreUnits(199.232f)); } - /* - * Frame range with duplicates..with chunking! - */ - knownResult = new int[] {1, 5, 9, 4, 8}; - frames = CueUtil.normalizeFrameRange("1-10x2,1-10", 2); - for (int i = 0; i < frames.size(); i++) { - assertEquals(knownResult[i], (int) frames.get(i)); + @Test + public void testBuildProcName() { + assertEquals("drack100/1.00/1", CueUtil.buildProcName("drack100", 100, 1)); + assertEquals("drack100/1.40/0", CueUtil.buildProcName("drack100", 140, 0)); + assertEquals("drack100/2.01/2", CueUtil.buildProcName("drack100", 201, 2)); } - /* - * Frame range with no duplicates..with chunking! - */ - knownResult = new int[] {1, 5, 9, 4, 8}; - frames = CueUtil.normalizeFrameRange("1-10:2", 2); - for (int i = 0; i < frames.size(); i++) { - assertEquals(knownResult[i], (int) frames.get(i)); + @Test + public void testCoreUnitsToWholecores() { + float cores = Convert.coreUnitsToWholeCores(149); + assertEquals(1.0f, cores); + } - } - - @Test - public void testProcsToCores() { - assertEquals(200, Convert.coresToCoreUnits(2.0f)); - assertEquals(235, Convert.coresToCoreUnits(2.35f)); - assertEquals(299, Convert.coresToCoreUnits(2.999f)); - } - - @Test - public void testCoreUnitsToCores() { - assertEquals(1.0f, Convert.coreUnitsToCores(100), 0.0001f); - } - - @Test - public void testCoreUnitsToCoresWithScale() { - assertEquals(100, Convert.coresToWholeCoreUnits(1.132132f)); - assertEquals(19900, Convert.coresToWholeCoreUnits(199.232f)); - } - - @Test - public void testBuildProcName() { - assertEquals("drack100/1.00/1", CueUtil.buildProcName("drack100", 100, 1)); - assertEquals("drack100/1.40/0", CueUtil.buildProcName("drack100", 140, 0)); - assertEquals("drack100/2.01/2", CueUtil.buildProcName("drack100", 201, 2)); - } - - @Test - public void testCoreUnitsToWholecores() { - float cores = Convert.coreUnitsToWholeCores(149); - assertEquals(1.0f, cores); - - } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameRangeTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameRangeTests.java index 279c3a664..aa9cfcc07 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameRangeTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameRangeTests.java @@ -8,185 +8,185 @@ import static org.junit.Assert.fail; public class FrameRangeTests { - @Test - public void testSingleFrame() { - Integer frame = 4927; + @Test + public void testSingleFrame() { + Integer frame = 4927; - FrameRange result = new FrameRange(frame.toString()); + FrameRange result = new FrameRange(frame.toString()); - assertThat(result.getAll()).containsExactly(frame); - } - - @Test - public void testNegativeSingleFrame() { - Integer frame = -4982; - - FrameRange result = new FrameRange(frame.toString()); - - assertThat(result.getAll()).containsExactly(frame); - } - - @Test - public void testFrameRange() { - FrameRange result = new FrameRange("1-7"); - - assertThat(result.getAll()).containsExactly(1, 2, 3, 4, 5, 6, 7); - } + assertThat(result.getAll()).containsExactly(frame); + } - @Test - public void testNegativeFrameRange() { - FrameRange result = new FrameRange("-20--13"); + @Test + public void testNegativeSingleFrame() { + Integer frame = -4982; - assertThat(result.getAll()).containsExactly(-20, -19, -18, -17, -16, -15, -14, -13); - } + FrameRange result = new FrameRange(frame.toString()); - @Test - public void testNegativeToPositiveFrameRange() { - FrameRange result = new FrameRange("-5-3"); + assertThat(result.getAll()).containsExactly(frame); + } - assertThat(result.getAll()).containsExactly(-5, -4, -3, -2, -1, 0, 1, 2, 3); - } + @Test + public void testFrameRange() { + FrameRange result = new FrameRange("1-7"); - @Test - public void testReverseFrameRange() { - FrameRange result = new FrameRange("6-2"); + assertThat(result.getAll()).containsExactly(1, 2, 3, 4, 5, 6, 7); + } - assertThat(result.getAll()).containsExactly(6, 5, 4, 3, 2); - } + @Test + public void testNegativeFrameRange() { + FrameRange result = new FrameRange("-20--13"); - @Test - public void testReverseNegativeFrameRange() { - FrameRange result = new FrameRange("-2--6"); + assertThat(result.getAll()).containsExactly(-20, -19, -18, -17, -16, -15, -14, -13); + } - assertThat(result.getAll()).containsExactly(-2, -3, -4, -5, -6); - } + @Test + public void testNegativeToPositiveFrameRange() { + FrameRange result = new FrameRange("-5-3"); - @Test - public void testStep() { - FrameRange result = new FrameRange("1-8x2"); + assertThat(result.getAll()).containsExactly(-5, -4, -3, -2, -1, 0, 1, 2, 3); + } - assertThat(result.getAll()).containsExactly(1, 3, 5, 7); - } + @Test + public void testReverseFrameRange() { + FrameRange result = new FrameRange("6-2"); - @Test - public void testNegativeStep() { - FrameRange result = new FrameRange("8-1x-2"); + assertThat(result.getAll()).containsExactly(6, 5, 4, 3, 2); + } - assertThat(result.getAll()).containsExactly(8, 6, 4, 2); - } + @Test + public void testReverseNegativeFrameRange() { + FrameRange result = new FrameRange("-2--6"); - @Test - public void testNegativeStepInvalidRange() { - try { - new FrameRange("1-8x-2"); - fail("negative frame step should have been rejected"); - } catch (IllegalArgumentException e) { - // pass + assertThat(result.getAll()).containsExactly(-2, -3, -4, -5, -6); } - } - @Test - public void testInvertedStep() { - FrameRange result = new FrameRange("1-8y2"); + @Test + public void testStep() { + FrameRange result = new FrameRange("1-8x2"); - assertThat(result.getAll()).containsExactly(2, 4, 6, 8); - } + assertThat(result.getAll()).containsExactly(1, 3, 5, 7); + } - @Test - public void testNegativeInvertedStep() { - FrameRange result = new FrameRange("8-1y-2"); + @Test + public void testNegativeStep() { + FrameRange result = new FrameRange("8-1x-2"); - assertThat(result.getAll()).containsExactly(7, 5, 3, 1); - } + assertThat(result.getAll()).containsExactly(8, 6, 4, 2); + } - @Test - public void testInterleave() { - FrameRange result = new FrameRange("1-10:5"); + @Test + public void testNegativeStepInvalidRange() { + try { + new FrameRange("1-8x-2"); + fail("negative frame step should have been rejected"); + } catch (IllegalArgumentException e) { + // pass + } + } - assertThat(result.getAll()).containsExactly(1, 6, 3, 5, 7, 9, 2, 4, 8, 10); - } + @Test + public void testInvertedStep() { + FrameRange result = new FrameRange("1-8y2"); - @Test - public void testNegativeInterleave() { - FrameRange result = new FrameRange("10-1:-5"); + assertThat(result.getAll()).containsExactly(2, 4, 6, 8); + } - assertThat(result.getAll()).containsExactly(10, 5, 8, 6, 4, 2, 9, 7, 3, 1); - } + @Test + public void testNegativeInvertedStep() { + FrameRange result = new FrameRange("8-1y-2"); - @Test - public void testNonNumericalInput() { - try { - new FrameRange("a"); - fail("non-numerical frame should have been rejected"); - } catch (IllegalArgumentException e) { - // pass + assertThat(result.getAll()).containsExactly(7, 5, 3, 1); } - try { - new FrameRange("a-b"); - fail("non-numerical frame range should have been rejected"); - } catch (IllegalArgumentException e) { - // pass - } + @Test + public void testInterleave() { + FrameRange result = new FrameRange("1-10:5"); - try { - new FrameRange("1-5xc"); - fail("non-numerical step size should have been rejected"); - } catch (IllegalArgumentException e) { - // pass + assertThat(result.getAll()).containsExactly(1, 6, 3, 5, 7, 9, 2, 4, 8, 10); } - try { - new FrameRange("1-5:c"); - fail("non-numerical interleave size should have been rejected"); - } catch (IllegalArgumentException e) { - // pass - } - } + @Test + public void testNegativeInterleave() { + FrameRange result = new FrameRange("10-1:-5"); - @Test - public void testInvalidRange() { - try { - new FrameRange("1-10-20"); - fail("invalid frame range should have been rejected"); - } catch (IllegalArgumentException e) { - // pass + assertThat(result.getAll()).containsExactly(10, 5, 8, 6, 4, 2, 9, 7, 3, 1); } - try { - new FrameRange("1x10-20"); - fail("invalid frame range should have been rejected"); - } catch (IllegalArgumentException e) { - // pass + @Test + public void testNonNumericalInput() { + try { + new FrameRange("a"); + fail("non-numerical frame should have been rejected"); + } catch (IllegalArgumentException e) { + // pass + } + + try { + new FrameRange("a-b"); + fail("non-numerical frame range should have been rejected"); + } catch (IllegalArgumentException e) { + // pass + } + + try { + new FrameRange("1-5xc"); + fail("non-numerical step size should have been rejected"); + } catch (IllegalArgumentException e) { + // pass + } + + try { + new FrameRange("1-5:c"); + fail("non-numerical interleave size should have been rejected"); + } catch (IllegalArgumentException e) { + // pass + } } - try { - new FrameRange("1:10-20"); - fail("invalid frame range should have been rejected"); - } catch (IllegalArgumentException e) { - // pass + @Test + public void testInvalidRange() { + try { + new FrameRange("1-10-20"); + fail("invalid frame range should have been rejected"); + } catch (IllegalArgumentException e) { + // pass + } + + try { + new FrameRange("1x10-20"); + fail("invalid frame range should have been rejected"); + } catch (IllegalArgumentException e) { + // pass + } + + try { + new FrameRange("1:10-20"); + fail("invalid frame range should have been rejected"); + } catch (IllegalArgumentException e) { + // pass + } } - } - @Test - public void testSize() { - FrameRange result = new FrameRange("1-7"); + @Test + public void testSize() { + FrameRange result = new FrameRange("1-7"); - assertEquals(7, result.size()); - } + assertEquals(7, result.size()); + } - @Test - public void testGet() { - FrameRange result = new FrameRange("1-7"); + @Test + public void testGet() { + FrameRange result = new FrameRange("1-7"); - assertEquals(5, result.get(4)); - } + assertEquals(5, result.get(4)); + } - @Test - public void testIndex() { - FrameRange result = new FrameRange("1-7"); + @Test + public void testIndex() { + FrameRange result = new FrameRange("1-7"); - assertEquals(5, result.index(6)); - assertEquals(-1, result.index(22)); - } + assertEquals(5, result.index(6)); + assertEquals(-1, result.index(22)); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameSetTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameSetTests.java index 3c194a445..2e7dfe0df 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameSetTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameSetTests.java @@ -7,100 +7,100 @@ import static org.junit.Assert.assertEquals; public class FrameSetTests { - @Test - public void shouldSplitListAndMaintainOrder() { - FrameSet result = new FrameSet("57,1-3,4-2,12-15x2,76-70x-3,5-12y3,1-7:5"); + @Test + public void shouldSplitListAndMaintainOrder() { + FrameSet result = new FrameSet("57,1-3,4-2,12-15x2,76-70x-3,5-12y3,1-7:5"); - assertThat(result.getAll()).containsExactly(57, 1, 2, 3, 4, 3, 2, 12, 14, 76, 73, 70, 6, 7, 9, - 10, 12, 1, 6, 3, 5, 7, 2, 4); - } + assertThat(result.getAll()).containsExactly(57, 1, 2, 3, 4, 3, 2, 12, 14, 76, 73, 70, 6, 7, + 9, 10, 12, 1, 6, 3, 5, 7, 2, 4); + } - @Test - public void shouldReturnCorrectSize() { - FrameSet result = new FrameSet("1-7"); + @Test + public void shouldReturnCorrectSize() { + FrameSet result = new FrameSet("1-7"); - assertEquals(7, result.size()); - } + assertEquals(7, result.size()); + } - @Test - public void shouldReturnSingleFrame() { - FrameSet result = new FrameSet("1-7"); + @Test + public void shouldReturnSingleFrame() { + FrameSet result = new FrameSet("1-7"); - assertEquals(5, result.get(4)); - } + assertEquals(5, result.get(4)); + } - @Test - public void shouldReturnCorrectIndexes() { - FrameSet result = new FrameSet("1-7"); + @Test + public void shouldReturnCorrectIndexes() { + FrameSet result = new FrameSet("1-7"); - assertEquals(5, result.index(6)); - assertEquals(-1, result.index(22)); - } + assertEquals(5, result.index(6)); + assertEquals(-1, result.index(22)); + } - @Test - public void shouldReconstructSteppedRange() { - FrameSet result = new FrameSet("1-10x2,11-100x20,103-108"); + @Test + public void shouldReconstructSteppedRange() { + FrameSet result = new FrameSet("1-10x2,11-100x20,103-108"); - // int[] intArray = {1, 3, 5, 7, 9, 11, 31, 51, 71, 91, 103, 104, 105, 106, 107, - // 108}; + // int[] intArray = {1, 3, 5, 7, 9, 11, 31, 51, 71, 91, 103, 104, 105, 106, 107, + // 108}; - assertEquals("11-91x20", result.getChunk(5, 5)); - } + assertEquals("11-91x20", result.getChunk(5, 5)); + } - @Test - public void shouldCreateNewSteppedRangeAndNextFrame() { - FrameSet result = new FrameSet("1-10x2,11-100x20,103-108"); + @Test + public void shouldCreateNewSteppedRangeAndNextFrame() { + FrameSet result = new FrameSet("1-10x2,11-100x20,103-108"); - // int[] intArray = {1, 3, 5, 7, 9, 11, 31, 51, 71, 91, 103, 104, 105, 106, 107, - // 108}; + // int[] intArray = {1, 3, 5, 7, 9, 11, 31, 51, 71, 91, 103, 104, 105, 106, 107, + // 108}; - assertEquals("5-11x2,31", result.getChunk(2, 5)); - } + assertEquals("5-11x2,31", result.getChunk(2, 5)); + } - @Test - public void shouldReturnCommaSeparatedList() { - FrameSet result = new FrameSet("1-10x2,11-100x20,103-108"); + @Test + public void shouldReturnCommaSeparatedList() { + FrameSet result = new FrameSet("1-10x2,11-100x20,103-108"); - // int[] intArray = {1, 3, 5, 7, 9, 11, 31, 51, 71, 91, 103, 104, 105, 106, 107, - // 108}; + // int[] intArray = {1, 3, 5, 7, 9, 11, 31, 51, 71, 91, 103, 104, 105, 106, 107, + // 108}; - assertEquals("91,103,104", result.getChunk(9, 3)); - } + assertEquals("91,103,104", result.getChunk(9, 3)); + } - @Test - public void shouldReturnSubsetOfSteppedRange() { - FrameSet result = new FrameSet("1-100x3"); + @Test + public void shouldReturnSubsetOfSteppedRange() { + FrameSet result = new FrameSet("1-100x3"); - assertEquals("28-34x3", result.getChunk(9, 3)); - } + assertEquals("28-34x3", result.getChunk(9, 3)); + } - @Test - public void shouldReturnSubsetOfRange() { - FrameSet result = new FrameSet("1-100"); + @Test + public void shouldReturnSubsetOfRange() { + FrameSet result = new FrameSet("1-100"); - assertEquals("10-12", result.getChunk(9, 3)); - } + assertEquals("10-12", result.getChunk(9, 3)); + } - @Test - public void shouldStopBeforeTheEndOfTheRange() { - FrameSet result = new FrameSet("55-60"); + @Test + public void shouldStopBeforeTheEndOfTheRange() { + FrameSet result = new FrameSet("55-60"); - assertEquals("55-60", result.getChunk(0, 10)); - } + assertEquals("55-60", result.getChunk(0, 10)); + } - @Test - public void shouldReturnLastFrame() { - FrameSet result1 = new FrameSet("1-10x2"); + @Test + public void shouldReturnLastFrame() { + FrameSet result1 = new FrameSet("1-10x2"); - FrameSet chunk1 = new FrameSet(result1.getChunk(0, 3)); - FrameSet chunk2 = new FrameSet(result1.getChunk(3, 3)); + FrameSet chunk1 = new FrameSet(result1.getChunk(0, 3)); + FrameSet chunk2 = new FrameSet(result1.getChunk(3, 3)); - assertEquals(5, chunk1.get(chunk1.size() - 1)); - assertEquals(9, chunk2.get(chunk2.size() - 1)); + assertEquals(5, chunk1.get(chunk1.size() - 1)); + assertEquals(9, chunk2.get(chunk2.size() - 1)); - FrameSet result2 = new FrameSet("1"); - FrameSet chunk3 = new FrameSet(result2.getChunk(0, 3)); + FrameSet result2 = new FrameSet("1"); + FrameSet chunk3 = new FrameSet(result2.getChunk(0, 3)); - assertEquals(1, chunk3.get(chunk3.size() - 1)); - } + assertEquals(1, chunk3.get(chunk3.size() - 1)); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/util/JobLogUtilTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/util/JobLogUtilTests.java index 158e48b50..3a519fdaf 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/util/JobLogUtilTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/util/JobLogUtilTests.java @@ -30,60 +30,62 @@ @ContextConfiguration(classes = TestAppConfig.class, loader = AnnotationConfigContextLoader.class) public class JobLogUtilTests extends AbstractTransactionalJUnit4SpringContextTests { - @Resource - private JobLogUtil jobLogUtil; + @Resource + private JobLogUtil jobLogUtil; - private String logRootDefault; - private String logRootSomeOs; + private String logRootDefault; + private String logRootSomeOs; - @Before - public void setUp() { - // The values should match what's defined in test/resources/opencue.properties. - logRootDefault = "/arbitraryLogDirectory"; - logRootSomeOs = "/arbitrarySomeOsLogDirectory"; - } + @Before + public void setUp() { + // The values should match what's defined in test/resources/opencue.properties. + logRootDefault = "/arbitraryLogDirectory"; + logRootSomeOs = "/arbitrarySomeOsLogDirectory"; + } - @Test - public void testGetJobLogRootDirDefault() { - assertEquals(logRootDefault, jobLogUtil.getJobLogRootDir("someUndefinedOs")); - } + @Test + public void testGetJobLogRootDirDefault() { + assertEquals(logRootDefault, jobLogUtil.getJobLogRootDir("someUndefinedOs")); + } - @Test - public void testGetJobLogRootSomeOs() { - assertEquals(logRootSomeOs, jobLogUtil.getJobLogRootDir("some_os")); - } + @Test + public void testGetJobLogRootSomeOs() { + assertEquals(logRootSomeOs, jobLogUtil.getJobLogRootDir("some_os")); + } - @Test - public void testGetJobLogDirDefault() { - assertEquals(logRootDefault + "/show/shot/logs", - jobLogUtil.getJobLogDir("show", "shot", "someUndefinedOs")); - } + @Test + public void testGetJobLogDirDefault() { + assertEquals(logRootDefault + "/show/shot/logs", + jobLogUtil.getJobLogDir("show", "shot", "someUndefinedOs")); + } - @Test - public void testGetJobLogDirSomeOs() { - assertEquals(logRootSomeOs + "/show/shot/logs", - jobLogUtil.getJobLogDir("show", "shot", "some_os")); - } + @Test + public void testGetJobLogDirSomeOs() { + assertEquals(logRootSomeOs + "/show/shot/logs", + jobLogUtil.getJobLogDir("show", "shot", "some_os")); + } - @Test - public void testGetJobLogPathDefault() { - JobDetail jobDetail = new JobDetail(); - jobDetail.id = "id"; - jobDetail.name = "name"; - jobDetail.showName = "show"; - jobDetail.shot = "shot"; - jobDetail.os = "someUndefinedOs"; - assertEquals(logRootDefault + "/show/shot/logs/name--id", jobLogUtil.getJobLogPath(jobDetail)); - } + @Test + public void testGetJobLogPathDefault() { + JobDetail jobDetail = new JobDetail(); + jobDetail.id = "id"; + jobDetail.name = "name"; + jobDetail.showName = "show"; + jobDetail.shot = "shot"; + jobDetail.os = "someUndefinedOs"; + assertEquals(logRootDefault + "/show/shot/logs/name--id", + jobLogUtil.getJobLogPath(jobDetail)); + } - @Test - public void testGetJobLogPathSomeOs() { - JobDetail jobDetail = new JobDetail(); - jobDetail.id = "id"; - jobDetail.name = "name"; - jobDetail.showName = "show"; - jobDetail.shot = "shot"; - jobDetail.os = "some_os"; - assertEquals(logRootSomeOs + "/show/shot/logs/name--id", jobLogUtil.getJobLogPath(jobDetail)); - } + @Test + public void testGetJobLogPathSomeOs() { + JobDetail jobDetail = new JobDetail(); + jobDetail.id = "id"; + jobDetail.name = "name"; + jobDetail.showName = "show"; + jobDetail.shot = "shot"; + jobDetail.os = "some_os"; + assertEquals(logRootSomeOs + "/show/shot/logs/name--id", + jobLogUtil.getJobLogPath(jobDetail)); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/util/SqlUtilTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/util/SqlUtilTests.java index a219b75e1..e6711bcf5 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/util/SqlUtilTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/util/SqlUtilTests.java @@ -7,12 +7,12 @@ public class SqlUtilTests { - @Test - public void testBuildBindVariableArray() { - String colName = "arbitrary-column-name"; + @Test + public void testBuildBindVariableArray() { + String colName = "arbitrary-column-name"; - String queryString = buildBindVariableArray(colName, 6); + String queryString = buildBindVariableArray(colName, 6); - assertEquals(colName + " IN (?,?,?,?,?,?)", queryString); - } + assertEquals(colName + " IN (?,?,?,?,?,?)", queryString); + } } From aa2f514b785f9ed1c6bc71349976c3b3a10c2359 Mon Sep 17 00:00:00 2001 From: Diego Tavares Date: Mon, 6 Jan 2025 16:54:04 -0800 Subject: [PATCH 3/3] Handle redirect on a single transaction The redirect logic is needs to happen on a single transaction to avoid a race condition where the redirect logic would unbook procs to dispatch the reserved job but the procs would be picked up by another thread booking general jobs. Besides adding a transation, this change ensures both unbookProc and dispatchHost calls run at the same thread to avoid escaping the transaction scope. --- .../spcue/dispatcher/RedirectManager.java | 22 +++++-------------- 1 file changed, 5 insertions(+), 17 deletions(-) diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RedirectManager.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RedirectManager.java index 7fe75c151..ed253f19b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RedirectManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RedirectManager.java @@ -23,6 +23,8 @@ import org.springframework.beans.factory.annotation.Autowired; import org.springframework.core.env.Environment; +import org.springframework.transaction.annotation.Propagation; +import org.springframework.transaction.annotation.Transactional; import com.imageworks.spcue.DispatchHost; import com.imageworks.spcue.GroupInterface; @@ -276,10 +278,9 @@ public boolean addRedirect(VirtualProc proc, GroupInterface group, boolean kill, * @param proc * @return */ + @Transactional(propagation = Propagation.REQUIRED) public boolean redirect(VirtualProc proc) { - try { - Redirect r = redirectService.remove(proc.getProcId()); if (r == null) { logger.info("Failed to find redirect for proc " + proc); @@ -309,34 +310,21 @@ public boolean redirect(VirtualProc proc) { switch (r.getType()) { case JOB_REDIRECT: - logger.info("attempting a job redirect to " + r.getDestinationId()); JobInterface job = jobDao.getJob(r.getDestinationId()); logger.info("redirecting proc " + proc + " to job " + job.getName()); - - if (dispatcher.isTestMode()) { - dispatcher.dispatchHost(host, job); - } else { - bookingQueue.execute(new DispatchBookHost(host, job, dispatcher, env)); - } + dispatcher.dispatchHost(host, job); return true; case GROUP_REDIRECT: - logger.info("attempting a group redirect to " + r.getDestinationId()); GroupInterface group = groupDao.getGroup(r.getDestinationId()); logger.info("redirecting group " + proc + " to job " + group.getName()); - - if (dispatcher.isTestMode()) { - dispatcher.dispatchHost(host, group); - } else { - bookingQueue.execute(new DispatchBookHost(host, group, dispatcher, env)); - } + dispatcher.dispatchHost(host, group); return true; default: logger.info("redirect failed, invalid redirect type: " + r.getType()); return false; } - } catch (Exception e) { /* * If anything fails the redirect fails, so just return false after logging.